2 * Copyright (c) 2016-2019, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
11 #include <arch_helpers.h>
12 #include <bl31/bl31.h>
13 #include <bl31/interrupt_mgmt.h>
14 #include <common/bl_common.h>
15 #include <common/debug.h>
16 #include <common/runtime_svc.h>
17 #include <lib/el3_runtime/context_mgmt.h>
18 #include <plat/common/platform.h>
23 /* macro to check if Hypervisor is enabled in the HCR_EL2 register */
24 #define HYP_ENABLE_FLAG 0x286001U
26 /* length of Trusty's input parameters (in bytes) */
27 #define TRUSTY_PARAMS_LEN_BYTES (4096U * 2)
30 uint8_t space
[PLATFORM_STACK_SIZE
] __aligned(16);
34 struct trusty_cpu_ctx
{
35 cpu_context_t cpu_ctx
;
37 uint32_t saved_security_state
;
38 int32_t fiq_handler_active
;
39 uint64_t fiq_handler_pc
;
40 uint64_t fiq_handler_cpsr
;
41 uint64_t fiq_handler_sp
;
46 struct trusty_stack secure_stack
;
60 static struct trusty_cpu_ctx trusty_cpu_ctx
[PLATFORM_CORE_COUNT
];
62 struct smc_args
trusty_init_context_stack(void **sp
, void *new_stack
);
63 struct smc_args
trusty_context_switch_helper(void **sp
, void *smc_params
);
65 static uint32_t current_vmid
;
67 static struct trusty_cpu_ctx
*get_trusty_ctx(void)
69 return &trusty_cpu_ctx
[plat_my_core_pos()];
72 static bool is_hypervisor_mode(void)
74 uint64_t hcr
= read_hcr();
76 return ((hcr
& HYP_ENABLE_FLAG
) != 0U) ? true : false;
79 static struct smc_args
trusty_context_switch(uint32_t security_state
, uint64_t r0
,
80 uint64_t r1
, uint64_t r2
, uint64_t r3
)
82 struct smc_args args
, ret_args
;
83 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
84 struct trusty_cpu_ctx
*ctx_smc
;
86 assert(ctx
->saved_security_state
!= security_state
);
89 if (is_hypervisor_mode()) {
90 /* According to the ARM DEN0028A spec, VMID is stored in x7 */
91 ctx_smc
= cm_get_context(NON_SECURE
);
92 assert(ctx_smc
!= NULL
);
93 args
.r7
= SMC_GET_GP(ctx_smc
, CTX_GPREG_X7
);
95 /* r4, r5, r6 reserved for future use. */
105 * To avoid the additional overhead in PSCI flow, skip FP context
106 * saving/restoring in case of CPU suspend and resume, assuming that
107 * when it's needed the PSCI caller has preserved FP context before
110 if (r0
!= SMC_FC_CPU_SUSPEND
&& r0
!= SMC_FC_CPU_RESUME
)
111 fpregs_context_save(get_fpregs_ctx(cm_get_context(security_state
)));
112 cm_el1_sysregs_context_save(security_state
);
114 ctx
->saved_security_state
= security_state
;
115 ret_args
= trusty_context_switch_helper(&ctx
->saved_sp
, &args
);
117 assert(ctx
->saved_security_state
== ((security_state
== 0U) ? 1U : 0U));
119 cm_el1_sysregs_context_restore(security_state
);
120 if (r0
!= SMC_FC_CPU_SUSPEND
&& r0
!= SMC_FC_CPU_RESUME
)
121 fpregs_context_restore(get_fpregs_ctx(cm_get_context(security_state
)));
123 cm_set_next_eret_context(security_state
);
128 static uint64_t trusty_fiq_handler(uint32_t id
,
134 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
136 assert(!is_caller_secure(flags
));
138 ret
= trusty_context_switch(NON_SECURE
, SMC_FC_FIQ_ENTER
, 0, 0, 0);
143 if (ctx
->fiq_handler_active
!= 0) {
144 INFO("%s: fiq handler already active\n", __func__
);
148 ctx
->fiq_handler_active
= 1;
149 (void)memcpy(&ctx
->fiq_gpregs
, get_gpregs_ctx(handle
), sizeof(ctx
->fiq_gpregs
));
150 ctx
->fiq_pc
= SMC_GET_EL3(handle
, CTX_ELR_EL3
);
151 ctx
->fiq_cpsr
= SMC_GET_EL3(handle
, CTX_SPSR_EL3
);
152 ctx
->fiq_sp_el1
= read_ctx_reg(get_sysregs_ctx(handle
), CTX_SP_EL1
);
154 write_ctx_reg(get_sysregs_ctx(handle
), CTX_SP_EL1
, ctx
->fiq_handler_sp
);
155 cm_set_elr_spsr_el3(NON_SECURE
, ctx
->fiq_handler_pc
, (uint32_t)ctx
->fiq_handler_cpsr
);
160 static uint64_t trusty_set_fiq_handler(void *handle
, uint64_t cpu
,
161 uint64_t handler
, uint64_t stack
)
163 struct trusty_cpu_ctx
*ctx
;
165 if (cpu
>= (uint64_t)PLATFORM_CORE_COUNT
) {
166 ERROR("%s: cpu %lld >= %d\n", __func__
, cpu
, PLATFORM_CORE_COUNT
);
167 return (uint64_t)SM_ERR_INVALID_PARAMETERS
;
170 ctx
= &trusty_cpu_ctx
[cpu
];
171 ctx
->fiq_handler_pc
= handler
;
172 ctx
->fiq_handler_cpsr
= SMC_GET_EL3(handle
, CTX_SPSR_EL3
);
173 ctx
->fiq_handler_sp
= stack
;
178 static uint64_t trusty_get_fiq_regs(void *handle
)
180 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
181 uint64_t sp_el0
= read_ctx_reg(&ctx
->fiq_gpregs
, CTX_GPREG_SP_EL0
);
183 SMC_RET4(handle
, ctx
->fiq_pc
, ctx
->fiq_cpsr
, sp_el0
, ctx
->fiq_sp_el1
);
186 static uint64_t trusty_fiq_exit(void *handle
, uint64_t x1
, uint64_t x2
, uint64_t x3
)
189 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
191 if (ctx
->fiq_handler_active
== 0) {
192 NOTICE("%s: fiq handler not active\n", __func__
);
193 SMC_RET1(handle
, (uint64_t)SM_ERR_INVALID_PARAMETERS
);
196 ret
= trusty_context_switch(NON_SECURE
, SMC_FC_FIQ_EXIT
, 0, 0, 0);
198 INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %lld\n",
199 __func__
, handle
, ret
.r0
);
203 * Restore register state to state recorded on fiq entry.
205 * x0, sp_el1, pc and cpsr need to be restored because el1 cannot
208 * x1-x4 and x8-x17 need to be restored here because smc_handler64
209 * corrupts them (el1 code also restored them).
211 (void)memcpy(get_gpregs_ctx(handle
), &ctx
->fiq_gpregs
, sizeof(ctx
->fiq_gpregs
));
212 ctx
->fiq_handler_active
= 0;
213 write_ctx_reg(get_sysregs_ctx(handle
), CTX_SP_EL1
, ctx
->fiq_sp_el1
);
214 cm_set_elr_spsr_el3(NON_SECURE
, ctx
->fiq_pc
, (uint32_t)ctx
->fiq_cpsr
);
219 static uintptr_t trusty_smc_handler(uint32_t smc_fid
,
230 entry_point_info_t
*ep_info
= bl31_plat_get_next_image_ep_info(SECURE
);
233 * Return success for SET_ROT_PARAMS if Trusty is not present, as
234 * Verified Boot is not even supported and returning success here
235 * would not compromise the boot process.
237 if ((ep_info
== NULL
) && (smc_fid
== SMC_YC_SET_ROT_PARAMS
)) {
239 } else if (ep_info
== NULL
) {
240 SMC_RET1(handle
, SMC_UNK
);
245 if (is_caller_secure(flags
)) {
246 if (smc_fid
== SMC_YC_NS_RETURN
) {
247 ret
= trusty_context_switch(SECURE
, x1
, 0, 0, 0);
248 SMC_RET8(handle
, ret
.r0
, ret
.r1
, ret
.r2
, ret
.r3
,
249 ret
.r4
, ret
.r5
, ret
.r6
, ret
.r7
);
251 INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \
252 cpu %d, unknown smc\n",
253 __func__
, smc_fid
, x1
, x2
, x3
, x4
, cookie
, handle
, flags
,
255 SMC_RET1(handle
, SMC_UNK
);
258 case SMC_FC64_SET_FIQ_HANDLER
:
259 return trusty_set_fiq_handler(handle
, x1
, x2
, x3
);
260 case SMC_FC64_GET_FIQ_REGS
:
261 return trusty_get_fiq_regs(handle
);
262 case SMC_FC_FIQ_EXIT
:
263 return trusty_fiq_exit(handle
, x1
, x2
, x3
);
265 if (is_hypervisor_mode())
266 vmid
= SMC_GET_GP(handle
, CTX_GPREG_X7
);
268 if ((current_vmid
!= 0) && (current_vmid
!= vmid
)) {
269 /* This message will cause SMC mechanism
270 * abnormal in multi-guest environment.
271 * Change it to WARN in case you need it.
273 VERBOSE("Previous SMC not finished.\n");
274 SMC_RET1(handle
, SM_ERR_BUSY
);
277 ret
= trusty_context_switch(NON_SECURE
, smc_fid
, x1
,
280 SMC_RET1(handle
, ret
.r0
);
285 static int32_t trusty_init(void)
287 entry_point_info_t
*ep_info
;
288 struct smc_args zero_args
= {0};
289 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
290 uint32_t cpu
= plat_my_core_pos();
291 uint64_t reg_width
= GET_RW(read_ctx_reg(get_el3state_ctx(&ctx
->cpu_ctx
),
295 * Get information about the Trusty image. Its absence is a critical
298 ep_info
= bl31_plat_get_next_image_ep_info(SECURE
);
299 assert(ep_info
!= NULL
);
301 fpregs_context_save(get_fpregs_ctx(cm_get_context(NON_SECURE
)));
302 cm_el1_sysregs_context_save(NON_SECURE
);
304 cm_set_context(&ctx
->cpu_ctx
, SECURE
);
305 cm_init_my_context(ep_info
);
308 * Adjust secondary cpu entry point for 32 bit images to the
309 * end of exception vectors
311 if ((cpu
!= 0U) && (reg_width
== MODE_RW_32
)) {
312 INFO("trusty: cpu %d, adjust entry point to 0x%lx\n",
313 cpu
, ep_info
->pc
+ (1U << 5));
314 cm_set_elr_el3(SECURE
, ep_info
->pc
+ (1U << 5));
317 cm_el1_sysregs_context_restore(SECURE
);
318 fpregs_context_restore(get_fpregs_ctx(cm_get_context(SECURE
)));
319 cm_set_next_eret_context(SECURE
);
321 ctx
->saved_security_state
= ~0U; /* initial saved state is invalid */
322 (void)trusty_init_context_stack(&ctx
->saved_sp
, &ctx
->secure_stack
.end
);
324 (void)trusty_context_switch_helper(&ctx
->saved_sp
, &zero_args
);
326 cm_el1_sysregs_context_restore(NON_SECURE
);
327 fpregs_context_restore(get_fpregs_ctx(cm_get_context(NON_SECURE
)));
328 cm_set_next_eret_context(NON_SECURE
);
333 static void trusty_cpu_suspend(uint32_t off
)
337 ret
= trusty_context_switch(NON_SECURE
, SMC_FC_CPU_SUSPEND
, off
, 0, 0);
339 INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %lld\n",
340 __func__
, plat_my_core_pos(), ret
.r0
);
344 static void trusty_cpu_resume(uint32_t on
)
348 ret
= trusty_context_switch(NON_SECURE
, SMC_FC_CPU_RESUME
, on
, 0, 0);
350 INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %lld\n",
351 __func__
, plat_my_core_pos(), ret
.r0
);
355 static int32_t trusty_cpu_off_handler(u_register_t max_off_lvl
)
357 trusty_cpu_suspend(max_off_lvl
);
362 static void trusty_cpu_on_finish_handler(u_register_t max_off_lvl
)
364 struct trusty_cpu_ctx
*ctx
= get_trusty_ctx();
366 if (ctx
->saved_sp
== NULL
) {
369 trusty_cpu_resume(max_off_lvl
);
373 static void trusty_cpu_suspend_handler(u_register_t max_off_lvl
)
375 trusty_cpu_suspend(max_off_lvl
);
378 static void trusty_cpu_suspend_finish_handler(u_register_t max_off_lvl
)
380 trusty_cpu_resume(max_off_lvl
);
383 static const spd_pm_ops_t trusty_pm
= {
384 .svc_off
= trusty_cpu_off_handler
,
385 .svc_suspend
= trusty_cpu_suspend_handler
,
386 .svc_on_finish
= trusty_cpu_on_finish_handler
,
387 .svc_suspend_finish
= trusty_cpu_suspend_finish_handler
,
390 void plat_trusty_set_boot_args(aapcs64_params_t
*args
);
392 #ifdef TSP_SEC_MEM_SIZE
393 #pragma weak plat_trusty_set_boot_args
394 void plat_trusty_set_boot_args(aapcs64_params_t
*args
)
396 args
->arg0
= TSP_SEC_MEM_SIZE
;
400 static int32_t trusty_setup(void)
402 entry_point_info_t
*ep_info
;
406 bool aarch32
= false;
408 /* Get trusty's entry point info */
409 ep_info
= bl31_plat_get_next_image_ep_info(SECURE
);
410 if (ep_info
== NULL
) {
411 INFO("Trusty image missing.\n");
415 instr
= *(uint32_t *)ep_info
->pc
;
417 if (instr
>> 24 == 0xeaU
) {
418 INFO("trusty: Found 32 bit image\n");
420 } else if (instr
>> 8 == 0xd53810U
|| instr
>> 16 == 0x9400U
) {
421 INFO("trusty: Found 64 bit image\n");
423 ERROR("trusty: Found unknown image, 0x%x\n", instr
);
427 SET_PARAM_HEAD(ep_info
, PARAM_EP
, VERSION_1
, SECURE
| EP_ST_ENABLE
);
429 ep_info
->spsr
= SPSR_64(MODE_EL1
, MODE_SP_ELX
,
430 DISABLE_ALL_EXCEPTIONS
);
432 ep_info
->spsr
= SPSR_MODE32(MODE32_svc
, SPSR_T_ARM
,
437 (void)memset(&ep_info
->args
, 0, sizeof(ep_info
->args
));
438 plat_trusty_set_boot_args(&ep_info
->args
);
440 /* register init handler */
441 bl31_register_bl32_init(trusty_init
);
443 /* register power management hooks */
444 psci_register_spd_pm_hook(&trusty_pm
);
446 /* register interrupt handler */
448 set_interrupt_rm_flag(flags
, NON_SECURE
);
449 ret
= register_interrupt_type_handler(INTR_TYPE_S_EL1
,
453 ERROR("trusty: failed to register fiq handler, ret = %d\n", ret
);
457 entry_point_info_t
*ns_ep_info
;
460 ns_ep_info
= bl31_plat_get_next_image_ep_info(NON_SECURE
);
461 if (ns_ep_info
== NULL
) {
462 NOTICE("Trusty: non-secure image missing.\n");
465 spsr
= ns_ep_info
->spsr
;
466 if (GET_RW(spsr
) == MODE_RW_64
&& GET_EL(spsr
) == MODE_EL2
) {
467 spsr
&= ~(MODE_EL_MASK
<< MODE_EL_SHIFT
);
468 spsr
|= MODE_EL1
<< MODE_EL_SHIFT
;
470 if (GET_RW(spsr
) == MODE_RW_32
&& GET_M32(spsr
) == MODE32_hyp
) {
471 spsr
&= ~(MODE32_MASK
<< MODE32_SHIFT
);
472 spsr
|= MODE32_svc
<< MODE32_SHIFT
;
474 if (spsr
!= ns_ep_info
->spsr
) {
475 NOTICE("Trusty: Switch bl33 from EL2 to EL1 (spsr 0x%x -> 0x%x)\n",
476 ns_ep_info
->spsr
, spsr
);
477 ns_ep_info
->spsr
= spsr
;
484 /* Define a SPD runtime service descriptor for fast SMC calls */
489 SMC_ENTITY_SECURE_MONITOR
,
495 /* Define a SPD runtime service descriptor for yielding SMC calls */
500 SMC_ENTITY_SECURE_MONITOR
,