2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
8 #include <lib/xlat_tables/xlat_tables_v2.h>
12 #include <arch_helpers.h>
13 #include <bl31/bl31.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <common/bl_common.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <plat/common/platform.h>
24 /* macro to check if Hypervisor is enabled in the HCR_EL2 register */
25 #define HYP_ENABLE_FLAG 0x286001U
27 /* length of Trusty's input parameters (in bytes) */
28 #define TRUSTY_PARAMS_LEN_BYTES (4096U * 2)
31 uint8_t space
[PLATFORM_STACK_SIZE
] __aligned(16);
35 struct trusty_cpu_ctx
{
36 cpu_context_t cpu_ctx
;
38 uint32_t saved_security_state
;
39 int32_t fiq_handler_active
;
40 uint64_t fiq_handler_pc
;
41 uint64_t fiq_handler_cpsr
;
42 uint64_t fiq_handler_sp
;
47 struct trusty_stack secure_stack
;
61 static struct trusty_cpu_ctx trusty_cpu_ctx
[PLATFORM_CORE_COUNT
];
63 struct smc_args
trusty_init_context_stack(void **sp
, void *new_stack
);
64 struct smc_args
trusty_context_switch_helper(void **sp
, void *smc_params
);
66 static uint32_t current_vmid
;
68 static struct trusty_cpu_ctx
*get_trusty_ctx(void)
70 return &trusty_cpu_ctx
[plat_my_core_pos()];
73 static bool is_hypervisor_mode(void)
75 uint64_t hcr
= read_hcr();
77 return ((hcr
& HYP_ENABLE_FLAG
) != 0U) ? true : false;
80 static struct smc_args
trusty_context_switch(uint32_t security_state
, uint64_t r0
,
81 uint64_t r1
, uint64_t r2
, uint64_t r3
)
83 struct smc_args args
, ret_args
;
84 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
85 struct trusty_cpu_ctx
*ctx_smc
;
87 assert(ctx
->saved_security_state
!= security_state
);
90 if (is_hypervisor_mode()) {
91 /* According to the ARM DEN0028A spec, VMID is stored in x7 */
92 ctx_smc
= cm_get_context(NON_SECURE
);
93 assert(ctx_smc
!= NULL
);
94 args
.r7
= SMC_GET_GP(ctx_smc
, CTX_GPREG_X7
);
96 /* r4, r5, r6 reserved for future use. */
106 * To avoid the additional overhead in PSCI flow, skip FP context
107 * saving/restoring in case of CPU suspend and resume, assuming that
108 * when it's needed the PSCI caller has preserved FP context before
111 if (r0
!= SMC_FC_CPU_SUSPEND
&& r0
!= SMC_FC_CPU_RESUME
)
112 fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state
)));
113 cm_el1_sysregs_context_save(security_state
);
115 ctx
->saved_security_state
= security_state
;
116 ret_args
= trusty_context_switch_helper(&ctx
->saved_sp
, &args
);
118 assert(ctx
->saved_security_state
== ((security_state
== 0U) ? 1U : 0U));
120 cm_el1_sysregs_context_restore(security_state
);
121 if (r0
!= SMC_FC_CPU_SUSPEND
&& r0
!= SMC_FC_CPU_RESUME
)
122 fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state
)));
124 cm_set_next_eret_context(security_state
);
129 static uint64_t trusty_fiq_handler(uint32_t id
,
135 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
137 assert(!is_caller_secure(flags
));
139 ret
= trusty_context_switch(NON_SECURE
, SMC_FC_FIQ_ENTER
, 0, 0, 0);
144 if (ctx
->fiq_handler_active
!= 0) {
145 INFO("%s: fiq handler already active\n", __func__
);
149 ctx
->fiq_handler_active
= 1;
150 (void)memcpy(&ctx
->fiq_gpregs
, get_gpregs_ctx(handle
), sizeof(ctx
->fiq_gpregs
));
151 ctx
->fiq_pc
= SMC_GET_EL3(handle
, CTX_ELR_EL3
);
152 ctx
->fiq_cpsr
= SMC_GET_EL3(handle
, CTX_SPSR_EL3
);
153 ctx
->fiq_sp_el1
= read_ctx_reg(get_sysregs_ctx(handle
), CTX_SP_EL1
);
155 write_ctx_reg(get_sysregs_ctx(handle
), CTX_SP_EL1
, ctx
->fiq_handler_sp
);
156 cm_set_elr_spsr_el3(NON_SECURE
, ctx
->fiq_handler_pc
, (uint32_t)ctx
->fiq_handler_cpsr
);
161 static uint64_t trusty_set_fiq_handler(void *handle
, uint64_t cpu
,
162 uint64_t handler
, uint64_t stack
)
164 struct trusty_cpu_ctx
*ctx
;
166 if (cpu
>= (uint64_t)PLATFORM_CORE_COUNT
) {
167 ERROR("%s: cpu %lld >= %d\n", __func__
, cpu
, PLATFORM_CORE_COUNT
);
168 return (uint64_t)SM_ERR_INVALID_PARAMETERS
;
171 ctx
= &trusty_cpu_ctx
[cpu
];
172 ctx
->fiq_handler_pc
= handler
;
173 ctx
->fiq_handler_cpsr
= SMC_GET_EL3(handle
, CTX_SPSR_EL3
);
174 ctx
->fiq_handler_sp
= stack
;
179 static uint64_t trusty_get_fiq_regs(void *handle
)
181 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
182 uint64_t sp_el0
= read_ctx_reg(&ctx
->fiq_gpregs
, CTX_GPREG_SP_EL0
);
184 SMC_RET4(handle
, ctx
->fiq_pc
, ctx
->fiq_cpsr
, sp_el0
, ctx
->fiq_sp_el1
);
187 static uint64_t trusty_fiq_exit(void *handle
, uint64_t x1
, uint64_t x2
, uint64_t x3
)
190 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
192 if (ctx
->fiq_handler_active
== 0) {
193 NOTICE("%s: fiq handler not active\n", __func__
);
194 SMC_RET1(handle
, (uint64_t)SM_ERR_INVALID_PARAMETERS
);
197 ret
= trusty_context_switch(NON_SECURE
, SMC_FC_FIQ_EXIT
, 0, 0, 0);
199 INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %lld\n",
200 __func__
, handle
, ret
.r0
);
204 * Restore register state to state recorded on fiq entry.
206 * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
209 * x1-x4 and x8-x17 need to be restored here because smc_handler64
210 * corrupts them (el1 code also restored them).
212 (void)memcpy(get_gpregs_ctx(handle
), &ctx
->fiq_gpregs
, sizeof(ctx
->fiq_gpregs
));
213 ctx
->fiq_handler_active
= 0;
214 write_ctx_reg(get_sysregs_ctx(handle
), CTX_SP_EL1
, ctx
->fiq_sp_el1
);
215 cm_set_elr_spsr_el3(NON_SECURE
, ctx
->fiq_pc
, (uint32_t)ctx
->fiq_cpsr
);
220 static uintptr_t trusty_smc_handler(uint32_t smc_fid
,
231 entry_point_info_t
*ep_info
= bl31_plat_get_next_image_ep_info(SECURE
);
234 * Return success for SET_ROT_PARAMS if Trusty is not present, as
235 * Verified Boot is not even supported and returning success here
236 * would not compromise the boot process.
238 if ((ep_info
== NULL
) && (smc_fid
== SMC_YC_SET_ROT_PARAMS
)) {
240 } else if (ep_info
== NULL
) {
241 SMC_RET1(handle
, SMC_UNK
);
246 if (is_caller_secure(flags
)) {
247 if (smc_fid
== SMC_YC_NS_RETURN
) {
248 ret
= trusty_context_switch(SECURE
, x1
, 0, 0, 0);
249 SMC_RET8(handle
, ret
.r0
, ret
.r1
, ret
.r2
, ret
.r3
,
250 ret
.r4
, ret
.r5
, ret
.r6
, ret
.r7
);
252 INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
253 cpu %d, unknown smc\n",
254 __func__
, smc_fid
, x1
, x2
, x3
, x4
, cookie
, handle
, flags
,
256 SMC_RET1(handle
, SMC_UNK
);
259 case SMC_FC64_SET_FIQ_HANDLER
:
260 return trusty_set_fiq_handler(handle
, x1
, x2
, x3
);
261 case SMC_FC64_GET_FIQ_REGS
:
262 return trusty_get_fiq_regs(handle
);
263 case SMC_FC_FIQ_EXIT
:
264 return trusty_fiq_exit(handle
, x1
, x2
, x3
);
266 if (is_hypervisor_mode())
267 vmid
= SMC_GET_GP(handle
, CTX_GPREG_X7
);
269 if ((current_vmid
!= 0) && (current_vmid
!= vmid
)) {
270 /* This message will cause SMC mechanism
271 * abnormal in multi-guest environment.
272 * Change it to WARN in case you need it.
274 VERBOSE("Previous SMC not finished.\n");
275 SMC_RET1(handle
, SM_ERR_BUSY
);
278 ret
= trusty_context_switch(NON_SECURE
, smc_fid
, x1
,
281 SMC_RET1(handle
, ret
.r0
);
286 static int32_t trusty_init(void)
288 entry_point_info_t
*ep_info
;
289 struct smc_args zero_args
= {0};
290 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
291 uint32_t cpu
= plat_my_core_pos();
292 uint64_t reg_width
= GET_RW(read_ctx_reg(get_el3state_ctx(&ctx
->cpu_ctx
),
296 * Get information about the Trusty image. Its absence is a critical
299 ep_info
= bl31_plat_get_next_image_ep_info(SECURE
);
300 assert(ep_info
!= NULL
);
302 fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE
)));
303 cm_el1_sysregs_context_save(NON_SECURE
);
305 cm_set_context(&ctx
->cpu_ctx
, SECURE
);
306 cm_init_my_context(ep_info
);
309 * Adjust secondary cpu entry point for 32 bit images to the
310 * end of exception vectors
312 if ((cpu
!= 0U) && (reg_width
== MODE_RW_32
)) {
313 INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
314 cpu
, ep_info
->pc
+ (1U << 5));
315 cm_set_elr_el3(SECURE
, ep_info
->pc
+ (1U << 5));
318 cm_el1_sysregs_context_restore(SECURE
);
319 fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE
)));
320 cm_set_next_eret_context(SECURE
);
322 ctx
->saved_security_state
= ~0U; /* initial saved state is invalid */
323 (void)trusty_init_context_stack(&ctx
->saved_sp
, &ctx
->secure_stack
.end
);
325 (void)trusty_context_switch_helper(&ctx
->saved_sp
, &zero_args
);
327 cm_el1_sysregs_context_restore(NON_SECURE
);
328 fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE
)));
329 cm_set_next_eret_context(NON_SECURE
);
334 static void trusty_cpu_suspend(uint32_t off
)
338 ret
= trusty_context_switch(NON_SECURE
, SMC_FC_CPU_SUSPEND
, off
, 0, 0);
340 INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %lld\n",
341 __func__
, plat_my_core_pos(), ret
.r0
);
345 static void trusty_cpu_resume(uint32_t on
)
349 ret
= trusty_context_switch(NON_SECURE
, SMC_FC_CPU_RESUME
, on
, 0, 0);
351 INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %lld\n",
352 __func__
, plat_my_core_pos(), ret
.r0
);
356 static int32_t trusty_cpu_off_handler(u_register_t max_off_lvl
)
358 trusty_cpu_suspend(max_off_lvl
);
363 static void trusty_cpu_on_finish_handler(u_register_t max_off_lvl
)
365 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
367 if (ctx
->saved_sp
== NULL
) {
370 trusty_cpu_resume(max_off_lvl
);
374 static void trusty_cpu_suspend_handler(u_register_t max_off_lvl
)
376 trusty_cpu_suspend(max_off_lvl
);
379 static void trusty_cpu_suspend_finish_handler(u_register_t max_off_lvl
)
381 trusty_cpu_resume(max_off_lvl
);
384 static const spd_pm_ops_t trusty_pm
= {
385 .svc_off
= trusty_cpu_off_handler
,
386 .svc_suspend
= trusty_cpu_suspend_handler
,
387 .svc_on_finish
= trusty_cpu_on_finish_handler
,
388 .svc_suspend_finish
= trusty_cpu_suspend_finish_handler
,
391 void plat_trusty_set_boot_args(aapcs64_params_t
*args
);
393 #ifdef TSP_SEC_MEM_SIZE
394 #pragma weak plat_trusty_set_boot_args
395 void plat_trusty_set_boot_args(aapcs64_params_t
*args
)
397 args
->arg0
= TSP_SEC_MEM_SIZE
;
401 static int32_t trusty_setup(void)
403 entry_point_info_t
*ep_info
;
407 bool aarch32
= false;
409 /* Get trusty's entry point info */
410 ep_info
= bl31_plat_get_next_image_ep_info(SECURE
);
411 if (ep_info
== NULL
) {
412 INFO("Trusty image missing.\n");
416 /* memmap first page of trusty's code memory before peeking */
417 ret
= mmap_add_dynamic_region(ep_info
->pc
, /* PA */
418 ep_info
->pc
, /* VA */
419 PAGE_SIZE
, /* size */
420 MT_SECURE
| MT_RW_DATA
); /* attrs */
423 /* peek into trusty's code to see if we have a 32-bit or 64-bit image */
424 instr
= *(uint32_t *)ep_info
->pc
;
426 if (instr
>> 24 == 0xeaU
) {
427 INFO("trusty: Found 32 bit image\n");
429 } else if (instr
>> 8 == 0xd53810U
|| instr
>> 16 == 0x9400U
) {
430 INFO("trusty: Found 64 bit image\n");
432 ERROR("trusty: Found unknown image, 0x%x\n", instr
);
436 /* unmap trusty's memory page */
437 (void)mmap_remove_dynamic_region(ep_info
->pc
, PAGE_SIZE
);
439 SET_PARAM_HEAD(ep_info
, PARAM_EP
, VERSION_1
, SECURE
| EP_ST_ENABLE
);
441 ep_info
->spsr
= SPSR_64(MODE_EL1
, MODE_SP_ELX
,
442 DISABLE_ALL_EXCEPTIONS
);
444 ep_info
->spsr
= SPSR_MODE32(MODE32_svc
, SPSR_T_ARM
,
449 (void)memset(&ep_info
->args
, 0, sizeof(ep_info
->args
));
450 plat_trusty_set_boot_args(&ep_info
->args
);
452 /* register init handler */
453 bl31_register_bl32_init(trusty_init
);
455 /* register power management hooks */
456 psci_register_spd_pm_hook(&trusty_pm
);
458 /* register interrupt handler */
460 set_interrupt_rm_flag(flags
, NON_SECURE
);
461 ret
= register_interrupt_type_handler(INTR_TYPE_S_EL1
,
465 ERROR("trusty: failed to register fiq handler, ret = %d\n", ret
);
469 entry_point_info_t
*ns_ep_info
;
472 ns_ep_info
= bl31_plat_get_next_image_ep_info(NON_SECURE
);
473 if (ns_ep_info
== NULL
) {
474 NOTICE("Trusty: non-secure image missing.\n");
477 spsr
= ns_ep_info
->spsr
;
478 if (GET_RW(spsr
) == MODE_RW_64
&& GET_EL(spsr
) == MODE_EL2
) {
479 spsr
&= ~(MODE_EL_MASK
<< MODE_EL_SHIFT
);
480 spsr
|= MODE_EL1
<< MODE_EL_SHIFT
;
482 if (GET_RW(spsr
) == MODE_RW_32
&& GET_M32(spsr
) == MODE32_hyp
) {
483 spsr
&= ~(MODE32_MASK
<< MODE32_SHIFT
);
484 spsr
|= MODE32_svc
<< MODE32_SHIFT
;
486 if (spsr
!= ns_ep_info
->spsr
) {
487 NOTICE("Trusty: Switch bl33 from EL2 to EL1 (spsr 0x%x -> 0x%x)\n",
488 ns_ep_info
->spsr
, spsr
);
489 ns_ep_info
->spsr
= spsr
;
496 /* Define a SPD runtime service descriptor for fast SMC calls */
501 SMC_ENTITY_SECURE_MONITOR
,
507 /* Define a SPD runtime service descriptor for yielding SMC calls */
512 SMC_ENTITY_SECURE_MONITOR
,