2 * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
7 #include <platform_def.h>
10 #include <asm_macros.S>
11 #include <bl31/ea_handle.h>
12 #include <bl31/interrupt_mgmt.h>
13 #include <common/runtime_svc.h>
15 #include <lib/el3_runtime/cpu_data.h>
16 #include <lib/smccc.h>
18 .globl runtime_exceptions
20 .globl sync_exception_sp_el0
25 .globl sync_exception_sp_elx
30 .globl sync_exception_aarch64
35 .globl sync_exception_aarch32
41 * Macro that prepares entry to EL3 upon taking an exception.
43 * With RAS_EXTENSION, this macro synchronizes pending errors with an ESB
44 * instruction. When an error is thus synchronized, the handling is
45 * delegated to platform EA handler.
47 * Without RAS_EXTENSION, this macro just saves x30, and unmasks
48 * Asynchronous External Aborts.
50 .macro check_and_unmask_ea
52 /* Synchronize pending External Aborts */
55 /* Unmask the SError interrupt */
56 msr daifclr, #DAIF_ABT_BIT
59 * Explicitly save x30 so as to free up a register and to enable
62 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
64 /* Check for SErrors synchronized by the ESB instruction */
66 tbz x30, #DISR_A_BIT, 1f
68 /* Save GP registers and restore them afterwards */
72 * If Secure Cycle Counter is not disabled in MDCR_EL3
73 * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
74 * disable all event counters and cycle counter.
76 bl save_pmcr_disable_pmu
78 bl handle_lower_el_ea_esb
79 bl restore_gp_registers
83 /* Unmask the SError interrupt */
84 msr daifclr, #DAIF_ABT_BIT
86 str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
90 /* ---------------------------------------------------------------------
91 * This macro handles Synchronous exceptions.
92 * Only SMC exceptions are supported.
93 * ---------------------------------------------------------------------
95 .macro handle_sync_exception
96 #if ENABLE_RUNTIME_INSTRUMENTATION
98 * Read the timestamp value and store it in per-cpu data. The value
99 * will be extracted from per-cpu data by the C level SMC handler and
100 * saved to the PMF timestamp region.
103 str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
105 str x30, [x29, #CPU_DATA_PMF_TS0_OFFSET]
106 ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29]
110 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
112 /* Handle SMC exceptions separately from other synchronous exceptions */
113 cmp x30, #EC_AARCH32_SMC
116 cmp x30, #EC_AARCH64_SMC
119 /* Synchronous exceptions other than the above are assumed to be EA */
120 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
121 b enter_lower_el_sync_ea
125 /* ---------------------------------------------------------------------
126 * This macro handles FIQ or IRQ interrupts i.e. EL3, S-EL1 and NS
128 * ---------------------------------------------------------------------
130 .macro handle_interrupt_exception label
135 * If Secure Cycle Counter is not disabled in MDCR_EL3
136 * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
137 * disable all event counters and cycle counter.
139 bl save_pmcr_disable_pmu
141 /* Save ARMv8.3-PAuth registers and load firmware key */
142 #if CTX_INCLUDE_PAUTH_REGS
143 bl pauth_context_save
146 bl pauth_load_bl_apiakey
149 /* Save the EL3 system registers needed to return from this exception */
152 stp x0, x1, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
154 /* Switch to the runtime stack i.e. SP_EL0 */
155 ldr x2, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
161 * Find out whether this is a valid interrupt type.
162 * If the interrupt controller reports a spurious interrupt then return
163 * to where we came from.
165 bl plat_ic_get_pending_interrupt_type
166 cmp x0, #INTR_TYPE_INVAL
167 b.eq interrupt_exit_\label
170 * Get the registered handler for this interrupt type.
171 * A NULL return value could be 'cause of the following conditions:
173 * a. An interrupt of a type was routed correctly but a handler for its
174 * type was not registered.
176 * b. An interrupt of a type was not routed correctly so a handler for
177 * its type was not registered.
179 * c. An interrupt of a type was routed correctly to EL3, but was
180 * deasserted before its pending state could be read. Another
181 * interrupt of a different type pended at the same time and its
182 * type was reported as pending instead. However, a handler for this
183 * type was not registered.
185 * a. and b. can only happen due to a programming error. The
186 * occurrence of c. could be beyond the control of Trusted Firmware.
187 * It makes sense to return from this exception instead of reporting an
190 bl get_interrupt_type_handler
191 cbz x0, interrupt_exit_\label
194 mov x0, #INTR_ID_UNAVAILABLE
196 /* Set the current security state in the 'flags' parameter */
200 /* Restore the reference to the 'handle' i.e. SP_EL3 */
203 /* x3 will point to a cookie (not used now) */
206 /* Call the interrupt type handler */
209 interrupt_exit_\label:
210 /* Return from exception, possibly in a different security state */
216 vector_base runtime_exceptions
218 /* ---------------------------------------------------------------------
219 * Current EL with SP_EL0 : 0x0 - 0x200
220 * ---------------------------------------------------------------------
222 vector_entry sync_exception_sp_el0
223 /* We don't expect any synchronous exceptions from EL3 */
224 b report_unhandled_exception
225 end_vector_entry sync_exception_sp_el0
227 vector_entry irq_sp_el0
229 * EL3 code is non-reentrant. Any asynchronous exception is a serious
230 * error. Loop infinitely.
232 b report_unhandled_interrupt
233 end_vector_entry irq_sp_el0
236 vector_entry fiq_sp_el0
237 b report_unhandled_interrupt
238 end_vector_entry fiq_sp_el0
241 vector_entry serror_sp_el0
242 no_ret plat_handle_el3_ea
243 end_vector_entry serror_sp_el0
245 /* ---------------------------------------------------------------------
246 * Current EL with SP_ELx: 0x200 - 0x400
247 * ---------------------------------------------------------------------
249 vector_entry sync_exception_sp_elx
251 * This exception will trigger if anything went wrong during a previous
252 * exception entry or exit or while handling an earlier unexpected
253 * synchronous exception. There is a high probability that SP_EL3 is
256 b report_unhandled_exception
257 end_vector_entry sync_exception_sp_elx
259 vector_entry irq_sp_elx
260 b report_unhandled_interrupt
261 end_vector_entry irq_sp_elx
263 vector_entry fiq_sp_elx
264 b report_unhandled_interrupt
265 end_vector_entry fiq_sp_elx
267 vector_entry serror_sp_elx
268 no_ret plat_handle_el3_ea
269 end_vector_entry serror_sp_elx
271 /* ---------------------------------------------------------------------
272 * Lower EL using AArch64 : 0x400 - 0x600
273 * ---------------------------------------------------------------------
275 vector_entry sync_exception_aarch64
277 * This exception vector will be the entry point for SMCs and traps
278 * that are unhandled at lower ELs most commonly. SP_EL3 should point
279 * to a valid cpu context where the general purpose and system register
280 * state can be saved.
283 handle_sync_exception
284 end_vector_entry sync_exception_aarch64
286 vector_entry irq_aarch64
288 handle_interrupt_exception irq_aarch64
289 end_vector_entry irq_aarch64
291 vector_entry fiq_aarch64
293 handle_interrupt_exception fiq_aarch64
294 end_vector_entry fiq_aarch64
296 vector_entry serror_aarch64
297 msr daifclr, #DAIF_ABT_BIT
298 b enter_lower_el_async_ea
299 end_vector_entry serror_aarch64
301 /* ---------------------------------------------------------------------
302 * Lower EL using AArch32 : 0x600 - 0x800
303 * ---------------------------------------------------------------------
305 vector_entry sync_exception_aarch32
307 * This exception vector will be the entry point for SMCs and traps
308 * that are unhandled at lower ELs most commonly. SP_EL3 should point
309 * to a valid cpu context where the general purpose and system register
310 * state can be saved.
313 handle_sync_exception
314 end_vector_entry sync_exception_aarch32
316 vector_entry irq_aarch32
318 handle_interrupt_exception irq_aarch32
319 end_vector_entry irq_aarch32
321 vector_entry fiq_aarch32
323 handle_interrupt_exception fiq_aarch32
324 end_vector_entry fiq_aarch32
326 vector_entry serror_aarch32
327 msr daifclr, #DAIF_ABT_BIT
328 b enter_lower_el_async_ea
329 end_vector_entry serror_aarch32
331 /* ---------------------------------------------------------------------
332 * The following code handles secure monitor calls.
333 * Depending upon the execution state from where the SMC has been
334 * invoked, it frees some general purpose registers to perform the
335 * remaining tasks. They involve finding the runtime service handler
336 * that is the target of the SMC & switching to runtime stacks (SP_EL0)
337 * before calling the handler.
339 * Note that x30 has been explicitly saved and can be used here
340 * ---------------------------------------------------------------------
344 /* Check whether aarch32 issued an SMC64 */
345 tbnz x0, #FUNCID_CC_SHIFT, smc_prohibited
348 /* NOTE: The code below must preserve x0-x4 */
350 /* Save general purpose registers */
354 * If Secure Cycle Counter is not disabled in MDCR_EL3
355 * when ARMv8.5-PMU is implemented, save PMCR_EL0 and
356 * disable all event counters and cycle counter.
358 bl save_pmcr_disable_pmu
360 /* Save ARMv8.3-PAuth registers and load firmware key */
361 #if CTX_INCLUDE_PAUTH_REGS
362 bl pauth_context_save
365 bl pauth_load_bl_apiakey
369 * Populate the parameters for the SMC handler.
370 * We already have x0-x4 in place. x5 will point to a cookie (not used
371 * now). x6 will point to the context structure (SP_EL3) and x7 will
372 * contain flags we need to pass to the handler.
378 * Restore the saved C runtime stack value which will become the new
379 * SP_EL0 i.e. EL3 runtime stack. It was saved in the 'cpu_context'
380 * structure prior to the last ERET from EL3.
382 ldr x12, [x6, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
384 /* Switch to SP_EL0 */
388 * Save the SPSR_EL3, ELR_EL3, & SCR_EL3 in case there is a world
389 * switch during SMC handling.
390 * TODO: Revisit if all system registers can be saved later.
395 stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
396 str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
398 /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */
403 /* Get the unique owning entity number */
404 ubfx x16, x0, #FUNCID_OEN_SHIFT, #FUNCID_OEN_WIDTH
405 ubfx x15, x0, #FUNCID_TYPE_SHIFT, #FUNCID_TYPE_WIDTH
406 orr x16, x16, x15, lsl #FUNCID_OEN_WIDTH
408 /* Load descriptor index from array of indices */
409 adr x14, rt_svc_descs_indices
412 /* Any index greater than 127 is invalid. Check bit 7. */
413 tbnz w15, 7, smc_unknown
416 * Get the descriptor using the index
417 * x11 = (base + off), w15 = index
419 * handler = (base + off) + (index << log2(size))
421 adr x11, (__RT_SVC_DESCS_START__ + RT_SVC_DESC_HANDLE)
422 lsl w10, w15, #RT_SVC_SIZE_LOG2
423 ldr x15, [x11, w10, uxtw]
426 * Call the Secure Monitor Call handler and then drop directly into
427 * el3_exit() which will program any remaining architectural state
428 * prior to issuing the ERET to the desired lower EL.
431 cbz x15, rt_svc_fw_critical_error
439 * Unknown SMC call. Populate return value with SMC_UNK and call
440 * el3_exit() which will restore the remaining architectural state
441 * i.e., SYS, GP and PAuth registers(if any) prior to issuing the ERET
442 * to the desired lower EL.
445 str x0, [x6, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
449 ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
453 rt_svc_fw_critical_error:
454 /* Switch to SP_ELx */
456 no_ret report_unhandled_exception