Explicitly disable the SPME bit in MDCR_EL3
[project/bcm63xx/atf.git] / lib / cpus / aarch64 / cpu_helpers.S
1 /*
2 * Copyright (c) 2014-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <arch.h>
8 #include <asm_macros.S>
9 #include <assert_macros.S>
10 #include <common/bl_common.h>
11 #include <common/debug.h>
12 #include <cpu_macros.S>
13 #include <lib/cpus/errata_report.h>
14 #include <lib/el3_runtime/cpu_data.h>
15
16 /* Reset fn is needed in BL at reset vector */
17 #if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3)
18 /*
19 * The reset handler common to all platforms. After a matching
20 * cpu_ops structure entry is found, the correponding reset_handler
21 * in the cpu_ops is invoked.
22 * Clobbers: x0 - x19, x30
23 */
24 .globl reset_handler
25 func reset_handler
26 mov x19, x30
27
28 /* The plat_reset_handler can clobber x0 - x18, x30 */
29 bl plat_reset_handler
30
31 /* Get the matching cpu_ops pointer */
32 bl get_cpu_ops_ptr
33 #if ENABLE_ASSERTIONS
34 cmp x0, #0
35 ASM_ASSERT(ne)
36 #endif
37
38 /* Get the cpu_ops reset handler */
39 ldr x2, [x0, #CPU_RESET_FUNC]
40 mov x30, x19
41 cbz x2, 1f
42
43 /* The cpu_ops reset handler can clobber x0 - x19, x30 */
44 br x2
45 1:
46 ret
47 endfunc reset_handler
48
49 #endif
50
51 #ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */
52 /*
53 * void prepare_cpu_pwr_dwn(unsigned int power_level)
54 *
55 * Prepare CPU power down function for all platforms. The function takes
56 * a domain level to be powered down as its parameter. After the cpu_ops
57 * pointer is retrieved from cpu_data, the handler for requested power
58 * level is called.
59 */
60 .globl prepare_cpu_pwr_dwn
61 func prepare_cpu_pwr_dwn
62 /*
63 * If the given power level exceeds CPU_MAX_PWR_DWN_OPS, we call the
64 * power down handler for the last power level
65 */
66 mov_imm x2, (CPU_MAX_PWR_DWN_OPS - 1)
67 cmp x0, x2
68 csel x2, x2, x0, hi
69
70 mrs x1, tpidr_el3
71 ldr x0, [x1, #CPU_DATA_CPU_OPS_PTR]
72 #if ENABLE_ASSERTIONS
73 cmp x0, #0
74 ASM_ASSERT(ne)
75 #endif
76
77 /* Get the appropriate power down handler */
78 mov x1, #CPU_PWR_DWN_OPS
79 add x1, x1, x2, lsl #3
80 ldr x1, [x0, x1]
81 br x1
82 endfunc prepare_cpu_pwr_dwn
83
84
85 /*
86 * Initializes the cpu_ops_ptr if not already initialized
87 * in cpu_data. This can be called without a runtime stack, but may
88 * only be called after the MMU is enabled.
89 * clobbers: x0 - x6, x10
90 */
91 .globl init_cpu_ops
92 func init_cpu_ops
93 mrs x6, tpidr_el3
94 ldr x0, [x6, #CPU_DATA_CPU_OPS_PTR]
95 cbnz x0, 1f
96 mov x10, x30
97 bl get_cpu_ops_ptr
98 #if ENABLE_ASSERTIONS
99 cmp x0, #0
100 ASM_ASSERT(ne)
101 #endif
102 str x0, [x6, #CPU_DATA_CPU_OPS_PTR]!
103 mov x30, x10
104 1:
105 ret
106 endfunc init_cpu_ops
107 #endif /* IMAGE_BL31 */
108
109 #if defined(IMAGE_BL31) && CRASH_REPORTING
110 /*
111 * The cpu specific registers which need to be reported in a crash
112 * are reported via cpu_ops cpu_reg_dump function. After a matching
113 * cpu_ops structure entry is found, the correponding cpu_reg_dump
114 * in the cpu_ops is invoked.
115 */
116 .globl do_cpu_reg_dump
117 func do_cpu_reg_dump
118 mov x16, x30
119
120 /* Get the matching cpu_ops pointer */
121 bl get_cpu_ops_ptr
122 cbz x0, 1f
123
124 /* Get the cpu_ops cpu_reg_dump */
125 ldr x2, [x0, #CPU_REG_DUMP]
126 cbz x2, 1f
127 blr x2
128 1:
129 mov x30, x16
130 ret
131 endfunc do_cpu_reg_dump
132 #endif
133
134 /*
135 * The below function returns the cpu_ops structure matching the
136 * midr of the core. It reads the MIDR_EL1 and finds the matching
137 * entry in cpu_ops entries. Only the implementation and part number
138 * are used to match the entries.
139 * Return :
140 * x0 - The matching cpu_ops pointer on Success
141 * x0 - 0 on failure.
142 * Clobbers : x0 - x5
143 */
144 .globl get_cpu_ops_ptr
145 func get_cpu_ops_ptr
146 /* Get the cpu_ops start and end locations */
147 adr x4, (__CPU_OPS_START__ + CPU_MIDR)
148 adr x5, (__CPU_OPS_END__ + CPU_MIDR)
149
150 /* Initialize the return parameter */
151 mov x0, #0
152
153 /* Read the MIDR_EL1 */
154 mrs x2, midr_el1
155 mov_imm x3, CPU_IMPL_PN_MASK
156
157 /* Retain only the implementation and part number using mask */
158 and w2, w2, w3
159 1:
160 /* Check if we have reached end of list */
161 cmp x4, x5
162 b.eq error_exit
163
164 /* load the midr from the cpu_ops */
165 ldr x1, [x4], #CPU_OPS_SIZE
166 and w1, w1, w3
167
168 /* Check if midr matches to midr of this core */
169 cmp w1, w2
170 b.ne 1b
171
172 /* Subtract the increment and offset to get the cpu-ops pointer */
173 sub x0, x4, #(CPU_OPS_SIZE + CPU_MIDR)
174 error_exit:
175 ret
176 endfunc get_cpu_ops_ptr
177
178 /*
179 * Extract CPU revision and variant, and combine them into a single numeric for
180 * easier comparison.
181 */
182 .globl cpu_get_rev_var
183 func cpu_get_rev_var
184 mrs x1, midr_el1
185
186 /*
187 * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them
188 * as variant[7:4] and revision[3:0] of x0.
189 *
190 * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then
191 * extract x1[3:0] into x0[3:0] retaining other bits.
192 */
193 ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS)
194 bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS
195 ret
196 endfunc cpu_get_rev_var
197
198 /*
199 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
200 * application purposes. If the revision-variant is less than or same as a given
201 * value, indicates that errata applies; otherwise not.
202 *
203 * Shall clobber: x0-x3
204 */
205 .globl cpu_rev_var_ls
206 func cpu_rev_var_ls
207 mov x2, #ERRATA_APPLIES
208 mov x3, #ERRATA_NOT_APPLIES
209 cmp x0, x1
210 csel x0, x2, x3, ls
211 ret
212 endfunc cpu_rev_var_ls
213
214 /*
215 * Compare the CPU's revision-variant (x0) with a given value (x1), for errata
216 * application purposes. If the revision-variant is higher than or same as a
217 * given value, indicates that errata applies; otherwise not.
218 *
219 * Shall clobber: x0-x3
220 */
221 .globl cpu_rev_var_hs
222 func cpu_rev_var_hs
223 mov x2, #ERRATA_APPLIES
224 mov x3, #ERRATA_NOT_APPLIES
225 cmp x0, x1
226 csel x0, x2, x3, hs
227 ret
228 endfunc cpu_rev_var_hs
229
230 #if REPORT_ERRATA
231 /*
232 * void print_errata_status(void);
233 *
234 * Function to print errata status for CPUs of its class. Must be called only:
235 *
236 * - with MMU and data caches are enabled;
237 * - after cpu_ops have been initialized in per-CPU data.
238 */
239 .globl print_errata_status
240 func print_errata_status
241 #ifdef IMAGE_BL1
242 /*
243 * BL1 doesn't have per-CPU data. So retrieve the CPU operations
244 * directly.
245 */
246 stp xzr, x30, [sp, #-16]!
247 bl get_cpu_ops_ptr
248 ldp xzr, x30, [sp], #16
249 ldr x1, [x0, #CPU_ERRATA_FUNC]
250 cbnz x1, .Lprint
251 #else
252 /*
253 * Retrieve pointer to cpu_ops from per-CPU data, and further, the
254 * errata printing function. If it's non-NULL, jump to the function in
255 * turn.
256 */
257 mrs x0, tpidr_el3
258 ldr x1, [x0, #CPU_DATA_CPU_OPS_PTR]
259 ldr x0, [x1, #CPU_ERRATA_FUNC]
260 cbz x0, .Lnoprint
261
262 /*
263 * Printing errata status requires atomically testing the printed flag.
264 */
265 stp x19, x30, [sp, #-16]!
266 mov x19, x0
267
268 /*
269 * Load pointers to errata lock and printed flag. Call
270 * errata_needs_reporting to check whether this CPU needs to report
271 * errata status pertaining to its class.
272 */
273 ldr x0, [x1, #CPU_ERRATA_LOCK]
274 ldr x1, [x1, #CPU_ERRATA_PRINTED]
275 bl errata_needs_reporting
276 mov x1, x19
277 ldp x19, x30, [sp], #16
278 cbnz x0, .Lprint
279 #endif
280 .Lnoprint:
281 ret
282 .Lprint:
283 /* Jump to errata reporting function for this CPU */
284 br x1
285 endfunc print_errata_status
286 #endif
287
288 /*
289 * int check_wa_cve_2017_5715(void);
290 *
291 * This function returns:
292 * - ERRATA_APPLIES when firmware mitigation is required.
293 * - ERRATA_NOT_APPLIES when firmware mitigation is _not_ required.
294 * - ERRATA_MISSING when firmware mitigation would be required but
295 * is not compiled in.
296 *
297 * NOTE: Must be called only after cpu_ops have been initialized
298 * in per-CPU data.
299 */
300 .globl check_wa_cve_2017_5715
301 func check_wa_cve_2017_5715
302 mrs x0, tpidr_el3
303 #if ENABLE_ASSERTIONS
304 cmp x0, #0
305 ASM_ASSERT(ne)
306 #endif
307 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
308 ldr x0, [x0, #CPU_EXTRA1_FUNC]
309 /*
310 * If the reserved function pointer is NULL, this CPU
311 * is unaffected by CVE-2017-5715 so bail out.
312 */
313 cmp x0, #0
314 beq 1f
315 br x0
316 1:
317 mov x0, #ERRATA_NOT_APPLIES
318 ret
319 endfunc check_wa_cve_2017_5715
320
321 /*
322 * void *wa_cve_2018_3639_get_disable_ptr(void);
323 *
324 * Returns a function pointer which is used to disable mitigation
325 * for CVE-2018-3639.
326 * The function pointer is only returned on cores that employ
327 * dynamic mitigation. If the core uses static mitigation or is
328 * unaffected by CVE-2018-3639 this function returns NULL.
329 *
330 * NOTE: Must be called only after cpu_ops have been initialized
331 * in per-CPU data.
332 */
333 .globl wa_cve_2018_3639_get_disable_ptr
334 func wa_cve_2018_3639_get_disable_ptr
335 mrs x0, tpidr_el3
336 #if ENABLE_ASSERTIONS
337 cmp x0, #0
338 ASM_ASSERT(ne)
339 #endif
340 ldr x0, [x0, #CPU_DATA_CPU_OPS_PTR]
341 ldr x0, [x0, #CPU_EXTRA2_FUNC]
342 ret
343 endfunc wa_cve_2018_3639_get_disable_ptr