82b951db6d6e22cd10c7ad030e498c0d203f4368
[project/bcm63xx/atf.git] / lib / psci / psci_private.h
1 /*
2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #ifndef PSCI_PRIVATE_H
8 #define PSCI_PRIVATE_H
9
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <bakery_lock.h>
13 #include <bl_common.h>
14 #include <cpu_data.h>
15 #include <psci.h>
16 #include <spinlock.h>
17 #include <stdbool.h>
18
19 /*
20 * The PSCI capability which are provided by the generic code but does not
21 * depend on the platform or spd capabilities.
22 */
23 #define PSCI_GENERIC_CAP \
24 (define_psci_cap(PSCI_VERSION) | \
25 define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
26 define_psci_cap(PSCI_FEATURES))
27
28 /*
29 * The PSCI capabilities mask for 64 bit functions.
30 */
31 #define PSCI_CAP_64BIT_MASK \
32 (define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \
33 define_psci_cap(PSCI_CPU_ON_AARCH64) | \
34 define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
35 define_psci_cap(PSCI_MIG_AARCH64) | \
36 define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
37 define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) | \
38 define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \
39 define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \
40 define_psci_cap(PSCI_STAT_COUNT_AARCH64) | \
41 define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) | \
42 define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
43
44 /*
45 * Helper functions to get/set the fields of PSCI per-cpu data.
46 */
47 static inline void psci_set_aff_info_state(aff_info_state_t aff_state)
48 {
49 set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state);
50 }
51
52 static inline aff_info_state_t psci_get_aff_info_state(void)
53 {
54 return get_cpu_data(psci_svc_cpu_data.aff_info_state);
55 }
56
57 static inline aff_info_state_t psci_get_aff_info_state_by_idx(int idx)
58 {
59 return get_cpu_data_by_index((unsigned int)idx,
60 psci_svc_cpu_data.aff_info_state);
61 }
62
63 static inline void psci_set_aff_info_state_by_idx(int idx,
64 aff_info_state_t aff_state)
65 {
66 set_cpu_data_by_index((unsigned int)idx,
67 psci_svc_cpu_data.aff_info_state, aff_state);
68 }
69
70 static inline unsigned int psci_get_suspend_pwrlvl(void)
71 {
72 return get_cpu_data(psci_svc_cpu_data.target_pwrlvl);
73 }
74
75 static inline void psci_set_suspend_pwrlvl(unsigned int target_lvl)
76 {
77 set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl);
78 }
79
80 static inline void psci_set_cpu_local_state(plat_local_state_t state)
81 {
82 set_cpu_data(psci_svc_cpu_data.local_state, state);
83 }
84
85 static inline plat_local_state_t psci_get_cpu_local_state(void)
86 {
87 return get_cpu_data(psci_svc_cpu_data.local_state);
88 }
89
90 static inline plat_local_state_t psci_get_cpu_local_state_by_idx(int idx)
91 {
92 return get_cpu_data_by_index((unsigned int)idx,
93 psci_svc_cpu_data.local_state);
94 }
95
96 /* Helper function to identify a CPU standby request in PSCI Suspend call */
97 static inline bool is_cpu_standby_req(unsigned int is_power_down_state,
98 unsigned int retn_lvl)
99 {
100 return (is_power_down_state == 0U) && (retn_lvl == 0U);
101 }
102
103 /*******************************************************************************
104 * The following two data structures implement the power domain tree. The tree
105 * is used to track the state of all the nodes i.e. power domain instances
106 * described by the platform. The tree consists of nodes that describe CPU power
107 * domains i.e. leaf nodes and all other power domains which are parents of a
108 * CPU power domain i.e. non-leaf nodes.
109 ******************************************************************************/
110 typedef struct non_cpu_pwr_domain_node {
111 /*
112 * Index of the first CPU power domain node level 0 which has this node
113 * as its parent.
114 */
115 int cpu_start_idx;
116
117 /*
118 * Number of CPU power domains which are siblings of the domain indexed
119 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
120 * -> cpu_start_idx + ncpus' have this node as their parent.
121 */
122 unsigned int ncpus;
123
124 /*
125 * Index of the parent power domain node.
126 * TODO: Figure out whether to whether using pointer is more efficient.
127 */
128 unsigned int parent_node;
129
130 plat_local_state_t local_state;
131
132 unsigned char level;
133
134 /* For indexing the psci_lock array*/
135 unsigned char lock_index;
136 } non_cpu_pd_node_t;
137
138 typedef struct cpu_pwr_domain_node {
139 u_register_t mpidr;
140
141 /*
142 * Index of the parent power domain node.
143 * TODO: Figure out whether to whether using pointer is more efficient.
144 */
145 unsigned int parent_node;
146
147 /*
148 * A CPU power domain does not require state coordination like its
149 * parent power domains. Hence this node does not include a bakery
150 * lock. A spinlock is required by the CPU_ON handler to prevent a race
151 * when multiple CPUs try to turn ON the same target CPU.
152 */
153 spinlock_t cpu_lock;
154 } cpu_pd_node_t;
155
156 /*******************************************************************************
157 * The following are helpers and declarations of locks.
158 ******************************************************************************/
159 #if HW_ASSISTED_COHERENCY
160 /*
161 * On systems where participant CPUs are cache-coherent, we can use spinlocks
162 * instead of bakery locks.
163 */
164 #define DEFINE_PSCI_LOCK(_name) spinlock_t _name
165 #define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name)
166
167 /* One lock is required per non-CPU power domain node */
168 DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
169
170 /*
171 * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
172 * as PSCI participants are cache-coherent, and there's no need for explicit
173 * cache maintenance operations or barriers to coordinate their state.
174 */
175 static inline void psci_flush_dcache_range(uintptr_t __unused addr,
176 size_t __unused size)
177 {
178 /* Empty */
179 }
180
181 #define psci_flush_cpu_data(member)
182 #define psci_inv_cpu_data(member)
183
184 static inline void psci_dsbish(void)
185 {
186 /* Empty */
187 }
188
189 static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
190 {
191 spin_lock(&psci_locks[non_cpu_pd_node->lock_index]);
192 }
193
194 static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
195 {
196 spin_unlock(&psci_locks[non_cpu_pd_node->lock_index]);
197 }
198
199 #else /* if HW_ASSISTED_COHERENCY == 0 */
200 /*
201 * Use bakery locks for state coordination as not all PSCI participants are
202 * cache coherent.
203 */
204 #define DEFINE_PSCI_LOCK(_name) DEFINE_BAKERY_LOCK(_name)
205 #define DECLARE_PSCI_LOCK(_name) DECLARE_BAKERY_LOCK(_name)
206
207 /* One lock is required per non-CPU power domain node */
208 DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
209
210 /*
211 * If not all PSCI participants are cache-coherent, perform cache maintenance
212 * and issue barriers wherever required to coordinate state.
213 */
214 static inline void psci_flush_dcache_range(uintptr_t addr, size_t size)
215 {
216 flush_dcache_range(addr, size);
217 }
218
219 #define psci_flush_cpu_data(member) flush_cpu_data(member)
220 #define psci_inv_cpu_data(member) inv_cpu_data(member)
221
222 static inline void psci_dsbish(void)
223 {
224 dsbish();
225 }
226
227 static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
228 {
229 bakery_lock_get(&psci_locks[non_cpu_pd_node->lock_index]);
230 }
231
232 static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
233 {
234 bakery_lock_release(&psci_locks[non_cpu_pd_node->lock_index]);
235 }
236
237 #endif /* HW_ASSISTED_COHERENCY */
238
239 static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node,
240 unsigned char idx)
241 {
242 non_cpu_pd_node[idx].lock_index = idx;
243 }
244
245 /*******************************************************************************
246 * Data prototypes
247 ******************************************************************************/
248 extern const plat_psci_ops_t *psci_plat_pm_ops;
249 extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
250 extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
251 extern unsigned int psci_caps;
252
253 /*******************************************************************************
254 * SPD's power management hooks registered with PSCI
255 ******************************************************************************/
256 extern const spd_pm_ops_t *psci_spd_pm;
257
258 /*******************************************************************************
259 * Function prototypes
260 ******************************************************************************/
261 /* Private exported functions from psci_common.c */
262 int psci_validate_power_state(unsigned int power_state,
263 psci_power_state_t *state_info);
264 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
265 int psci_validate_mpidr(u_register_t mpidr);
266 void psci_init_req_local_pwr_states(void);
267 void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
268 psci_power_state_t *target_state);
269 int psci_validate_entry_point(entry_point_info_t *ep,
270 uintptr_t entrypoint, u_register_t context_id);
271 void psci_get_parent_pwr_domain_nodes(int cpu_idx,
272 unsigned int end_lvl,
273 unsigned int *node_index);
274 void psci_do_state_coordination(unsigned int end_pwrlvl,
275 psci_power_state_t *state_info);
276 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx);
277 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl, int cpu_idx);
278 int psci_validate_suspend_req(const psci_power_state_t *state_info,
279 unsigned int is_power_down_state);
280 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
281 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
282 void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
283 void psci_print_power_domain_map(void);
284 unsigned int psci_is_last_on_cpu(void);
285 int psci_spd_migrate_info(u_register_t *mpidr);
286 void psci_do_pwrdown_sequence(unsigned int power_level);
287
288 /*
289 * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
290 * available. Otherwise, this needs post-call stack maintenance, which is
291 * handled in assembly.
292 */
293 void prepare_cpu_pwr_dwn(unsigned int power_level);
294
295 /* Private exported functions from psci_on.c */
296 int psci_cpu_on_start(u_register_t target_cpu,
297 const entry_point_info_t *ep);
298
299 void psci_cpu_on_finish(int cpu_idx, const psci_power_state_t *state_info);
300
301 /* Private exported functions from psci_off.c */
302 int psci_do_cpu_off(unsigned int end_pwrlvl);
303
304 /* Private exported functions from psci_suspend.c */
305 void psci_cpu_suspend_start(const entry_point_info_t *ep,
306 unsigned int end_pwrlvl,
307 psci_power_state_t *state_info,
308 unsigned int is_power_down_state);
309
310 void psci_cpu_suspend_finish(int cpu_idx, const psci_power_state_t *state_info);
311
312 /* Private exported functions from psci_helpers.S */
313 void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
314 void psci_do_pwrup_cache_maintenance(void);
315
316 /* Private exported functions from psci_system_off.c */
317 void __dead2 psci_system_off(void);
318 void __dead2 psci_system_reset(void);
319 u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie);
320
321 /* Private exported functions from psci_stat.c */
322 void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
323 const psci_power_state_t *state_info);
324 void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
325 const psci_power_state_t *state_info);
326 u_register_t psci_stat_residency(u_register_t target_cpu,
327 unsigned int power_state);
328 u_register_t psci_stat_count(u_register_t target_cpu,
329 unsigned int power_state);
330
331 /* Private exported functions from psci_mem_protect.c */
332 u_register_t psci_mem_protect(unsigned int enable);
333 u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length);
334
335 #endif /* PSCI_PRIVATE_H */