2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
8 #include <asm_macros.S>
10 #include <xlat_tables_defs.h>
11 #include "../tsp_private.h"
15 .globl tsp_vector_table
19 /* ---------------------------------------------
20 * Populate the params in x0-x7 from the pointer
21 * to the smc args structure in x0.
22 * ---------------------------------------------
24 .macro restore_args_call_smc
25 ldp x6, x7, [x0, #TSP_ARG6]
26 ldp x4, x5, [x0, #TSP_ARG4]
27 ldp x2, x3, [x0, #TSP_ARG2]
28 ldp x0, x1, [x0, #TSP_ARG0]
32 .macro save_eret_context reg1 reg2
35 stp \reg1, \reg2, [sp, #-0x10]!
36 stp x30, x18, [sp, #-0x10]!
39 .macro restore_eret_context reg1 reg2
40 ldp x30, x18, [sp], #0x10
41 ldp \reg1, \reg2, [sp], #0x10
51 /* ---------------------------------------------
52 * Set the exception vector to something sane.
53 * ---------------------------------------------
55 adr x0, tsp_exceptions
59 /* ---------------------------------------------
60 * Enable the SError interrupt now that the
61 * exception vectors have been setup.
62 * ---------------------------------------------
64 msr daifclr, #DAIF_ABT_BIT
66 /* ---------------------------------------------
67 * Enable the instruction cache, stack pointer
68 * and data access alignment checks
69 * ---------------------------------------------
71 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
77 /* ---------------------------------------------
78 * Invalidate the RW memory used by the BL32
79 * image. This includes the data and NOBITS
80 * sections. This is done to safeguard against
81 * possible corruption of this memory by dirty
82 * cache lines in a system cache as a result of
83 * use by an earlier boot loader stage.
84 * ---------------------------------------------
91 /* ---------------------------------------------
92 * Zero out NOBITS sections. There are 2 of them:
94 * - the coherent memory section.
95 * ---------------------------------------------
97 ldr x0, =__BSS_START__
102 ldr x0, =__COHERENT_RAM_START__
103 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
107 /* --------------------------------------------
108 * Allocate a stack whose memory will be marked
109 * as Normal-IS-WBWA when the MMU is enabled.
110 * There is no risk of reading stale stack
111 * memory after enabling the MMU as only the
112 * primary cpu is running at the moment.
113 * --------------------------------------------
117 /* ---------------------------------------------
118 * Initialize the stack protector canary before
119 * any C code is called.
120 * ---------------------------------------------
122 #if STACK_PROTECTOR_ENABLED
123 bl update_stack_protector_canary
126 /* ---------------------------------------------
127 * Perform early platform setup & platform
128 * specific early arch. setup e.g. mmu setup
129 * ---------------------------------------------
131 bl tsp_early_platform_setup
132 bl tsp_plat_arch_setup
134 /* ---------------------------------------------
135 * Jump to main function.
136 * ---------------------------------------------
140 /* ---------------------------------------------
141 * Tell TSPD that we are done initialising
142 * ---------------------------------------------
145 mov x0, #TSP_ENTRY_DONE
148 tsp_entrypoint_panic:
149 b tsp_entrypoint_panic
150 endfunc tsp_entrypoint
153 /* -------------------------------------------
154 * Table of entrypoint vectors provided to the
155 * TSPD for the various entrypoints
156 * -------------------------------------------
158 func tsp_vector_table
159 b tsp_yield_smc_entry
163 b tsp_cpu_resume_entry
164 b tsp_cpu_suspend_entry
165 b tsp_sel1_intr_entry
166 b tsp_system_off_entry
167 b tsp_system_reset_entry
168 b tsp_abort_yield_smc_entry
169 endfunc tsp_vector_table
171 /*---------------------------------------------
172 * This entrypoint is used by the TSPD when this
173 * cpu is to be turned off through a CPU_OFF
174 * psci call to ask the TSP to perform any
175 * bookeeping necessary. In the current
176 * implementation, the TSPD expects the TSP to
177 * re-initialise its state so nothing is done
178 * here except for acknowledging the request.
179 * ---------------------------------------------
181 func tsp_cpu_off_entry
183 restore_args_call_smc
184 endfunc tsp_cpu_off_entry
186 /*---------------------------------------------
187 * This entrypoint is used by the TSPD when the
188 * system is about to be switched off (through
189 * a SYSTEM_OFF psci call) to ask the TSP to
190 * perform any necessary bookkeeping.
191 * ---------------------------------------------
193 func tsp_system_off_entry
194 bl tsp_system_off_main
195 restore_args_call_smc
196 endfunc tsp_system_off_entry
198 /*---------------------------------------------
199 * This entrypoint is used by the TSPD when the
200 * system is about to be reset (through a
201 * SYSTEM_RESET psci call) to ask the TSP to
202 * perform any necessary bookkeeping.
203 * ---------------------------------------------
205 func tsp_system_reset_entry
206 bl tsp_system_reset_main
207 restore_args_call_smc
208 endfunc tsp_system_reset_entry
210 /*---------------------------------------------
211 * This entrypoint is used by the TSPD when this
212 * cpu is turned on using a CPU_ON psci call to
213 * ask the TSP to initialise itself i.e. setup
214 * the mmu, stacks etc. Minimal architectural
215 * state will be initialised by the TSPD when
216 * this function is entered i.e. Caches and MMU
217 * will be turned off, the execution state
218 * will be aarch64 and exceptions masked.
219 * ---------------------------------------------
221 func tsp_cpu_on_entry
222 /* ---------------------------------------------
223 * Set the exception vector to something sane.
224 * ---------------------------------------------
226 adr x0, tsp_exceptions
230 /* Enable the SError interrupt */
231 msr daifclr, #DAIF_ABT_BIT
233 /* ---------------------------------------------
234 * Enable the instruction cache, stack pointer
235 * and data access alignment checks
236 * ---------------------------------------------
238 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
244 /* --------------------------------------------
245 * Give ourselves a stack whose memory will be
246 * marked as Normal-IS-WBWA when the MMU is
248 * --------------------------------------------
252 /* --------------------------------------------
253 * Enable the MMU with the DCache disabled. It
254 * is safe to use stacks allocated in normal
255 * memory as a result. All memory accesses are
256 * marked nGnRnE when the MMU is disabled. So
257 * all the stack writes will make it to memory.
258 * All memory accesses are marked Non-cacheable
259 * when the MMU is enabled but D$ is disabled.
260 * So used stack memory is guaranteed to be
261 * visible immediately after the MMU is enabled
262 * Enabling the DCache at the same time as the
263 * MMU can lead to speculatively fetched and
264 * possibly stale stack memory being read from
265 * other caches. This can lead to coherency
267 * --------------------------------------------
269 mov x0, #DISABLE_DCACHE
270 bl bl32_plat_enable_mmu
272 /* ---------------------------------------------
273 * Enable the Data cache now that the MMU has
274 * been enabled. The stack has been unwound. It
275 * will be written first before being read. This
276 * will invalidate any stale cache lines resi-
277 * -dent in other caches. We assume that
278 * interconnect coherency has been enabled for
279 * this cluster by EL3 firmware.
280 * ---------------------------------------------
283 orr x0, x0, #SCTLR_C_BIT
287 /* ---------------------------------------------
288 * Enter C runtime to perform any remaining
290 * ---------------------------------------------
293 restore_args_call_smc
295 /* Should never reach here */
296 tsp_cpu_on_entry_panic:
297 b tsp_cpu_on_entry_panic
298 endfunc tsp_cpu_on_entry
300 /*---------------------------------------------
301 * This entrypoint is used by the TSPD when this
302 * cpu is to be suspended through a CPU_SUSPEND
303 * psci call to ask the TSP to perform any
304 * bookeeping necessary. In the current
305 * implementation, the TSPD saves and restores
307 * ---------------------------------------------
309 func tsp_cpu_suspend_entry
310 bl tsp_cpu_suspend_main
311 restore_args_call_smc
312 endfunc tsp_cpu_suspend_entry
314 /*-------------------------------------------------
315 * This entrypoint is used by the TSPD to pass
316 * control for `synchronously` handling a S-EL1
317 * Interrupt which was triggered while executing
318 * in normal world. 'x0' contains a magic number
319 * which indicates this. TSPD expects control to
320 * be handed back at the end of interrupt
321 * processing. This is done through an SMC.
322 * The handover agreement is:
324 * 1. PSTATE.DAIF are set upon entry. 'x1' has
325 * the ELR_EL3 from the non-secure state.
326 * 2. TSP has to preserve the callee saved
327 * general purpose registers, SP_EL1/EL0 and
329 * 3. TSP has to preserve the system and vfp
330 * registers (if applicable).
331 * 4. TSP can use 'x0-x18' to enable its C
333 * 5. TSP returns to TSPD using an SMC with
334 * 'x0' = TSP_HANDLED_S_EL1_INTR
335 * ------------------------------------------------
337 func tsp_sel1_intr_entry
339 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
341 b.ne tsp_sel1_int_entry_panic
343 /*-------------------------------------------------
344 * Save any previous context needed to perform
345 * an exception return from S-EL1 e.g. context
346 * from a previous Non secure Interrupt.
347 * Update statistics and handle the S-EL1
348 * interrupt before returning to the TSPD.
349 * IRQ/FIQs are not enabled since that will
350 * complicate the implementation. Execution
351 * will be transferred back to the normal world
352 * in any case. The handler can return 0
353 * if the interrupt was handled or TSP_PREEMPTED
354 * if the expected interrupt was preempted
355 * by an interrupt that should be handled in EL3
356 * e.g. Group 0 interrupt in GICv3. In both
357 * the cases switch to EL3 using SMC with id
358 * TSP_HANDLED_S_EL1_INTR. Any other return value
359 * from the handler will result in panic.
360 * ------------------------------------------------
362 save_eret_context x2 x3
363 bl tsp_update_sync_sel1_intr_stats
364 bl tsp_common_int_handler
365 /* Check if the S-EL1 interrupt has been handled */
366 cbnz x0, tsp_sel1_intr_check_preemption
367 b tsp_sel1_intr_return
368 tsp_sel1_intr_check_preemption:
369 /* Check if the S-EL1 interrupt has been preempted */
370 mov_imm x1, TSP_PREEMPTED
372 b.ne tsp_sel1_int_entry_panic
373 tsp_sel1_intr_return:
374 mov_imm x0, TSP_HANDLED_S_EL1_INTR
375 restore_eret_context x2 x3
378 /* Should never reach here */
379 tsp_sel1_int_entry_panic:
380 no_ret plat_panic_handler
381 endfunc tsp_sel1_intr_entry
383 /*---------------------------------------------
384 * This entrypoint is used by the TSPD when this
385 * cpu resumes execution after an earlier
386 * CPU_SUSPEND psci call to ask the TSP to
387 * restore its saved context. In the current
388 * implementation, the TSPD saves and restores
389 * EL1 state so nothing is done here apart from
390 * acknowledging the request.
391 * ---------------------------------------------
393 func tsp_cpu_resume_entry
394 bl tsp_cpu_resume_main
395 restore_args_call_smc
397 /* Should never reach here */
398 no_ret plat_panic_handler
399 endfunc tsp_cpu_resume_entry
401 /*---------------------------------------------
402 * This entrypoint is used by the TSPD to ask
403 * the TSP to service a fast smc request.
404 * ---------------------------------------------
406 func tsp_fast_smc_entry
408 restore_args_call_smc
410 /* Should never reach here */
411 no_ret plat_panic_handler
412 endfunc tsp_fast_smc_entry
414 /*---------------------------------------------
415 * This entrypoint is used by the TSPD to ask
416 * the TSP to service a Yielding SMC request.
417 * We will enable preemption during execution
418 * of tsp_smc_handler.
419 * ---------------------------------------------
421 func tsp_yield_smc_entry
422 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
424 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
425 restore_args_call_smc
427 /* Should never reach here */
428 no_ret plat_panic_handler
429 endfunc tsp_yield_smc_entry
431 /*---------------------------------------------------------------------
432 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
433 * SMC. It could be on behalf of non-secure world or because a CPU
434 * suspend/CPU off request needs to abort the preempted SMC.
435 * --------------------------------------------------------------------
437 func tsp_abort_yield_smc_entry
440 * Exceptions masking is already done by the TSPD when entering this
441 * hook so there is no need to do it here.
444 /* Reset the stack used by the pre-empted SMC */
448 * Allow some cleanup such as releasing locks.
450 bl tsp_abort_smc_handler
452 restore_args_call_smc
454 /* Should never reach here */
455 bl plat_panic_handler
456 endfunc tsp_abort_yield_smc_entry