Merge pull request #925 from dp-arm/dp/spdx
[project/bcm63xx/atf.git] / bl32 / tsp / aarch64 / tsp_entrypoint.S
1 /*
2 * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <arch.h>
8 #include <asm_macros.S>
9 #include <tsp.h>
10 #include <xlat_tables_defs.h>
11 #include "../tsp_private.h"
12
13
14 .globl tsp_entrypoint
15 .globl tsp_vector_table
16
17
18
19 /* ---------------------------------------------
20 * Populate the params in x0-x7 from the pointer
21 * to the smc args structure in x0.
22 * ---------------------------------------------
23 */
24 .macro restore_args_call_smc
25 ldp x6, x7, [x0, #TSP_ARG6]
26 ldp x4, x5, [x0, #TSP_ARG4]
27 ldp x2, x3, [x0, #TSP_ARG2]
28 ldp x0, x1, [x0, #TSP_ARG0]
29 smc #0
30 .endm
31
32 .macro save_eret_context reg1 reg2
33 mrs \reg1, elr_el1
34 mrs \reg2, spsr_el1
35 stp \reg1, \reg2, [sp, #-0x10]!
36 stp x30, x18, [sp, #-0x10]!
37 .endm
38
39 .macro restore_eret_context reg1 reg2
40 ldp x30, x18, [sp], #0x10
41 ldp \reg1, \reg2, [sp], #0x10
42 msr elr_el1, \reg1
43 msr spsr_el1, \reg2
44 .endm
45
46 .section .text, "ax"
47 .align 3
48
49 func tsp_entrypoint
50
51 /* ---------------------------------------------
52 * Set the exception vector to something sane.
53 * ---------------------------------------------
54 */
55 adr x0, tsp_exceptions
56 msr vbar_el1, x0
57 isb
58
59 /* ---------------------------------------------
60 * Enable the SError interrupt now that the
61 * exception vectors have been setup.
62 * ---------------------------------------------
63 */
64 msr daifclr, #DAIF_ABT_BIT
65
66 /* ---------------------------------------------
67 * Enable the instruction cache, stack pointer
68 * and data access alignment checks
69 * ---------------------------------------------
70 */
71 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
72 mrs x0, sctlr_el1
73 orr x0, x0, x1
74 msr sctlr_el1, x0
75 isb
76
77 /* ---------------------------------------------
78 * Invalidate the RW memory used by the BL32
79 * image. This includes the data and NOBITS
80 * sections. This is done to safeguard against
81 * possible corruption of this memory by dirty
82 * cache lines in a system cache as a result of
83 * use by an earlier boot loader stage.
84 * ---------------------------------------------
85 */
86 adr x0, __RW_START__
87 adr x1, __RW_END__
88 sub x1, x1, x0
89 bl inv_dcache_range
90
91 /* ---------------------------------------------
92 * Zero out NOBITS sections. There are 2 of them:
93 * - the .bss section;
94 * - the coherent memory section.
95 * ---------------------------------------------
96 */
97 ldr x0, =__BSS_START__
98 ldr x1, =__BSS_SIZE__
99 bl zeromem
100
101 #if USE_COHERENT_MEM
102 ldr x0, =__COHERENT_RAM_START__
103 ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__
104 bl zeromem
105 #endif
106
107 /* --------------------------------------------
108 * Allocate a stack whose memory will be marked
109 * as Normal-IS-WBWA when the MMU is enabled.
110 * There is no risk of reading stale stack
111 * memory after enabling the MMU as only the
112 * primary cpu is running at the moment.
113 * --------------------------------------------
114 */
115 bl plat_set_my_stack
116
117 /* ---------------------------------------------
118 * Initialize the stack protector canary before
119 * any C code is called.
120 * ---------------------------------------------
121 */
122 #if STACK_PROTECTOR_ENABLED
123 bl update_stack_protector_canary
124 #endif
125
126 /* ---------------------------------------------
127 * Perform early platform setup & platform
128 * specific early arch. setup e.g. mmu setup
129 * ---------------------------------------------
130 */
131 bl tsp_early_platform_setup
132 bl tsp_plat_arch_setup
133
134 /* ---------------------------------------------
135 * Jump to main function.
136 * ---------------------------------------------
137 */
138 bl tsp_main
139
140 /* ---------------------------------------------
141 * Tell TSPD that we are done initialising
142 * ---------------------------------------------
143 */
144 mov x1, x0
145 mov x0, #TSP_ENTRY_DONE
146 smc #0
147
148 tsp_entrypoint_panic:
149 b tsp_entrypoint_panic
150 endfunc tsp_entrypoint
151
152
153 /* -------------------------------------------
154 * Table of entrypoint vectors provided to the
155 * TSPD for the various entrypoints
156 * -------------------------------------------
157 */
158 func tsp_vector_table
159 b tsp_yield_smc_entry
160 b tsp_fast_smc_entry
161 b tsp_cpu_on_entry
162 b tsp_cpu_off_entry
163 b tsp_cpu_resume_entry
164 b tsp_cpu_suspend_entry
165 b tsp_sel1_intr_entry
166 b tsp_system_off_entry
167 b tsp_system_reset_entry
168 b tsp_abort_yield_smc_entry
169 endfunc tsp_vector_table
170
171 /*---------------------------------------------
172 * This entrypoint is used by the TSPD when this
173 * cpu is to be turned off through a CPU_OFF
174 * psci call to ask the TSP to perform any
175 * bookeeping necessary. In the current
176 * implementation, the TSPD expects the TSP to
177 * re-initialise its state so nothing is done
178 * here except for acknowledging the request.
179 * ---------------------------------------------
180 */
181 func tsp_cpu_off_entry
182 bl tsp_cpu_off_main
183 restore_args_call_smc
184 endfunc tsp_cpu_off_entry
185
186 /*---------------------------------------------
187 * This entrypoint is used by the TSPD when the
188 * system is about to be switched off (through
189 * a SYSTEM_OFF psci call) to ask the TSP to
190 * perform any necessary bookkeeping.
191 * ---------------------------------------------
192 */
193 func tsp_system_off_entry
194 bl tsp_system_off_main
195 restore_args_call_smc
196 endfunc tsp_system_off_entry
197
198 /*---------------------------------------------
199 * This entrypoint is used by the TSPD when the
200 * system is about to be reset (through a
201 * SYSTEM_RESET psci call) to ask the TSP to
202 * perform any necessary bookkeeping.
203 * ---------------------------------------------
204 */
205 func tsp_system_reset_entry
206 bl tsp_system_reset_main
207 restore_args_call_smc
208 endfunc tsp_system_reset_entry
209
210 /*---------------------------------------------
211 * This entrypoint is used by the TSPD when this
212 * cpu is turned on using a CPU_ON psci call to
213 * ask the TSP to initialise itself i.e. setup
214 * the mmu, stacks etc. Minimal architectural
215 * state will be initialised by the TSPD when
216 * this function is entered i.e. Caches and MMU
217 * will be turned off, the execution state
218 * will be aarch64 and exceptions masked.
219 * ---------------------------------------------
220 */
221 func tsp_cpu_on_entry
222 /* ---------------------------------------------
223 * Set the exception vector to something sane.
224 * ---------------------------------------------
225 */
226 adr x0, tsp_exceptions
227 msr vbar_el1, x0
228 isb
229
230 /* Enable the SError interrupt */
231 msr daifclr, #DAIF_ABT_BIT
232
233 /* ---------------------------------------------
234 * Enable the instruction cache, stack pointer
235 * and data access alignment checks
236 * ---------------------------------------------
237 */
238 mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
239 mrs x0, sctlr_el1
240 orr x0, x0, x1
241 msr sctlr_el1, x0
242 isb
243
244 /* --------------------------------------------
245 * Give ourselves a stack whose memory will be
246 * marked as Normal-IS-WBWA when the MMU is
247 * enabled.
248 * --------------------------------------------
249 */
250 bl plat_set_my_stack
251
252 /* --------------------------------------------
253 * Enable the MMU with the DCache disabled. It
254 * is safe to use stacks allocated in normal
255 * memory as a result. All memory accesses are
256 * marked nGnRnE when the MMU is disabled. So
257 * all the stack writes will make it to memory.
258 * All memory accesses are marked Non-cacheable
259 * when the MMU is enabled but D$ is disabled.
260 * So used stack memory is guaranteed to be
261 * visible immediately after the MMU is enabled
262 * Enabling the DCache at the same time as the
263 * MMU can lead to speculatively fetched and
264 * possibly stale stack memory being read from
265 * other caches. This can lead to coherency
266 * issues.
267 * --------------------------------------------
268 */
269 mov x0, #DISABLE_DCACHE
270 bl bl32_plat_enable_mmu
271
272 /* ---------------------------------------------
273 * Enable the Data cache now that the MMU has
274 * been enabled. The stack has been unwound. It
275 * will be written first before being read. This
276 * will invalidate any stale cache lines resi-
277 * -dent in other caches. We assume that
278 * interconnect coherency has been enabled for
279 * this cluster by EL3 firmware.
280 * ---------------------------------------------
281 */
282 mrs x0, sctlr_el1
283 orr x0, x0, #SCTLR_C_BIT
284 msr sctlr_el1, x0
285 isb
286
287 /* ---------------------------------------------
288 * Enter C runtime to perform any remaining
289 * book keeping
290 * ---------------------------------------------
291 */
292 bl tsp_cpu_on_main
293 restore_args_call_smc
294
295 /* Should never reach here */
296 tsp_cpu_on_entry_panic:
297 b tsp_cpu_on_entry_panic
298 endfunc tsp_cpu_on_entry
299
300 /*---------------------------------------------
301 * This entrypoint is used by the TSPD when this
302 * cpu is to be suspended through a CPU_SUSPEND
303 * psci call to ask the TSP to perform any
304 * bookeeping necessary. In the current
305 * implementation, the TSPD saves and restores
306 * the EL1 state.
307 * ---------------------------------------------
308 */
309 func tsp_cpu_suspend_entry
310 bl tsp_cpu_suspend_main
311 restore_args_call_smc
312 endfunc tsp_cpu_suspend_entry
313
314 /*-------------------------------------------------
315 * This entrypoint is used by the TSPD to pass
316 * control for `synchronously` handling a S-EL1
317 * Interrupt which was triggered while executing
318 * in normal world. 'x0' contains a magic number
319 * which indicates this. TSPD expects control to
320 * be handed back at the end of interrupt
321 * processing. This is done through an SMC.
322 * The handover agreement is:
323 *
324 * 1. PSTATE.DAIF are set upon entry. 'x1' has
325 * the ELR_EL3 from the non-secure state.
326 * 2. TSP has to preserve the callee saved
327 * general purpose registers, SP_EL1/EL0 and
328 * LR.
329 * 3. TSP has to preserve the system and vfp
330 * registers (if applicable).
331 * 4. TSP can use 'x0-x18' to enable its C
332 * runtime.
333 * 5. TSP returns to TSPD using an SMC with
334 * 'x0' = TSP_HANDLED_S_EL1_INTR
335 * ------------------------------------------------
336 */
337 func tsp_sel1_intr_entry
338 #if DEBUG
339 mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
340 cmp x0, x2
341 b.ne tsp_sel1_int_entry_panic
342 #endif
343 /*-------------------------------------------------
344 * Save any previous context needed to perform
345 * an exception return from S-EL1 e.g. context
346 * from a previous Non secure Interrupt.
347 * Update statistics and handle the S-EL1
348 * interrupt before returning to the TSPD.
349 * IRQ/FIQs are not enabled since that will
350 * complicate the implementation. Execution
351 * will be transferred back to the normal world
352 * in any case. The handler can return 0
353 * if the interrupt was handled or TSP_PREEMPTED
354 * if the expected interrupt was preempted
355 * by an interrupt that should be handled in EL3
356 * e.g. Group 0 interrupt in GICv3. In both
357 * the cases switch to EL3 using SMC with id
358 * TSP_HANDLED_S_EL1_INTR. Any other return value
359 * from the handler will result in panic.
360 * ------------------------------------------------
361 */
362 save_eret_context x2 x3
363 bl tsp_update_sync_sel1_intr_stats
364 bl tsp_common_int_handler
365 /* Check if the S-EL1 interrupt has been handled */
366 cbnz x0, tsp_sel1_intr_check_preemption
367 b tsp_sel1_intr_return
368 tsp_sel1_intr_check_preemption:
369 /* Check if the S-EL1 interrupt has been preempted */
370 mov_imm x1, TSP_PREEMPTED
371 cmp x0, x1
372 b.ne tsp_sel1_int_entry_panic
373 tsp_sel1_intr_return:
374 mov_imm x0, TSP_HANDLED_S_EL1_INTR
375 restore_eret_context x2 x3
376 smc #0
377
378 /* Should never reach here */
379 tsp_sel1_int_entry_panic:
380 no_ret plat_panic_handler
381 endfunc tsp_sel1_intr_entry
382
383 /*---------------------------------------------
384 * This entrypoint is used by the TSPD when this
385 * cpu resumes execution after an earlier
386 * CPU_SUSPEND psci call to ask the TSP to
387 * restore its saved context. In the current
388 * implementation, the TSPD saves and restores
389 * EL1 state so nothing is done here apart from
390 * acknowledging the request.
391 * ---------------------------------------------
392 */
393 func tsp_cpu_resume_entry
394 bl tsp_cpu_resume_main
395 restore_args_call_smc
396
397 /* Should never reach here */
398 no_ret plat_panic_handler
399 endfunc tsp_cpu_resume_entry
400
401 /*---------------------------------------------
402 * This entrypoint is used by the TSPD to ask
403 * the TSP to service a fast smc request.
404 * ---------------------------------------------
405 */
406 func tsp_fast_smc_entry
407 bl tsp_smc_handler
408 restore_args_call_smc
409
410 /* Should never reach here */
411 no_ret plat_panic_handler
412 endfunc tsp_fast_smc_entry
413
414 /*---------------------------------------------
415 * This entrypoint is used by the TSPD to ask
416 * the TSP to service a Yielding SMC request.
417 * We will enable preemption during execution
418 * of tsp_smc_handler.
419 * ---------------------------------------------
420 */
421 func tsp_yield_smc_entry
422 msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
423 bl tsp_smc_handler
424 msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
425 restore_args_call_smc
426
427 /* Should never reach here */
428 no_ret plat_panic_handler
429 endfunc tsp_yield_smc_entry
430
431 /*---------------------------------------------------------------------
432 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
433 * SMC. It could be on behalf of non-secure world or because a CPU
434 * suspend/CPU off request needs to abort the preempted SMC.
435 * --------------------------------------------------------------------
436 */
437 func tsp_abort_yield_smc_entry
438
439 /*
440 * Exceptions masking is already done by the TSPD when entering this
441 * hook so there is no need to do it here.
442 */
443
444 /* Reset the stack used by the pre-empted SMC */
445 bl plat_set_my_stack
446
447 /*
448 * Allow some cleanup such as releasing locks.
449 */
450 bl tsp_abort_smc_handler
451
452 restore_args_call_smc
453
454 /* Should never reach here */
455 bl plat_panic_handler
456 endfunc tsp_abort_yield_smc_entry