1564e840242cf7f48106faa658f00b1c7825d366
[project/bcm63xx/atf.git] / lib / extensions / amu / aarch64 / amu.c
1 /*
2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <amu.h>
8 #include <amu_private.h>
9 #include <arch.h>
10 #include <arch_helpers.h>
11 #include <assert.h>
12 #include <platform.h>
13 #include <pubsub_events.h>
14 #include <stdbool.h>
15
16 #define AMU_GROUP0_NR_COUNTERS 4
17
18 struct amu_ctx {
19 uint64_t group0_cnts[AMU_GROUP0_NR_COUNTERS];
20 uint64_t group1_cnts[AMU_GROUP1_NR_COUNTERS];
21 };
22
23 static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
24
25 bool amu_supported(void)
26 {
27 uint64_t features;
28
29 features = read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT;
30 return (features & ID_AA64PFR0_AMU_MASK) == 1U;
31 }
32
33 /*
34 * Enable counters. This function is meant to be invoked
35 * by the context management library before exiting from EL3.
36 */
37 void amu_enable(bool el2_unused)
38 {
39 uint64_t v;
40
41 if (!amu_supported())
42 return;
43
44 if (el2_unused) {
45 /*
46 * CPTR_EL2.TAM: Set to zero so any accesses to
47 * the Activity Monitor registers do not trap to EL2.
48 */
49 v = read_cptr_el2();
50 v &= ~CPTR_EL2_TAM_BIT;
51 write_cptr_el2(v);
52 }
53
54 /*
55 * CPTR_EL3.TAM: Set to zero so that any accesses to
56 * the Activity Monitor registers do not trap to EL3.
57 */
58 v = read_cptr_el3();
59 v &= ~TAM_BIT;
60 write_cptr_el3(v);
61
62 /* Enable group 0 counters */
63 write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
64 /* Enable group 1 counters */
65 write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
66 }
67
68 /* Read the group 0 counter identified by the given `idx`. */
69 uint64_t amu_group0_cnt_read(int idx)
70 {
71 assert(amu_supported());
72 assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
73
74 return amu_group0_cnt_read_internal(idx);
75 }
76
77 /* Write the group 0 counter identified by the given `idx` with `val`. */
78 void amu_group0_cnt_write(int idx, uint64_t val)
79 {
80 assert(amu_supported());
81 assert((idx >= 0) && (idx < AMU_GROUP0_NR_COUNTERS));
82
83 amu_group0_cnt_write_internal(idx, val);
84 isb();
85 }
86
87 /* Read the group 1 counter identified by the given `idx`. */
88 uint64_t amu_group1_cnt_read(int idx)
89 {
90 assert(amu_supported());
91 assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
92
93 return amu_group1_cnt_read_internal(idx);
94 }
95
96 /* Write the group 1 counter identified by the given `idx` with `val`. */
97 void amu_group1_cnt_write(int idx, uint64_t val)
98 {
99 assert(amu_supported());
100 assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
101
102 amu_group1_cnt_write_internal(idx, val);
103 isb();
104 }
105
106 /*
107 * Program the event type register for the given `idx` with
108 * the event number `val`.
109 */
110 void amu_group1_set_evtype(int idx, unsigned int val)
111 {
112 assert(amu_supported());
113 assert((idx >= 0) && (idx < AMU_GROUP1_NR_COUNTERS));
114
115 amu_group1_set_evtype_internal(idx, val);
116 isb();
117 }
118
119 static void *amu_context_save(const void *arg)
120 {
121 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
122 int i;
123
124 if (!amu_supported())
125 return (void *)-1;
126
127 /* Assert that group 0/1 counter configuration is what we expect */
128 assert((read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK) &&
129 (read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK));
130
131 assert(((sizeof(int) * 8) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
132 <= AMU_GROUP1_NR_COUNTERS);
133
134 /*
135 * Disable group 0/1 counters to avoid other observers like SCP sampling
136 * counter values from the future via the memory mapped view.
137 */
138 write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
139 write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
140 isb();
141
142 /* Save group 0 counters */
143 for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
144 ctx->group0_cnts[i] = amu_group0_cnt_read(i);
145
146 /* Save group 1 counters */
147 for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
148 ctx->group1_cnts[i] = amu_group1_cnt_read(i);
149
150 return (void *)0;
151 }
152
153 static void *amu_context_restore(const void *arg)
154 {
155 struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
156 int i;
157
158 if (!amu_supported())
159 return (void *)-1;
160
161 /* Counters were disabled in `amu_context_save()` */
162 assert((read_amcntenset0_el0() == 0U) && (read_amcntenset1_el0() == 0U));
163
164 assert(((sizeof(int) * 8U) - __builtin_clz(AMU_GROUP1_COUNTERS_MASK))
165 <= AMU_GROUP1_NR_COUNTERS);
166
167 /* Restore group 0 counters */
168 for (i = 0; i < AMU_GROUP0_NR_COUNTERS; i++)
169 if ((AMU_GROUP0_COUNTERS_MASK & (1U << i)) != 0U)
170 amu_group0_cnt_write(i, ctx->group0_cnts[i]);
171
172 /* Restore group 1 counters */
173 for (i = 0; i < AMU_GROUP1_NR_COUNTERS; i++)
174 if ((AMU_GROUP1_COUNTERS_MASK & (1U << i)) != 0U)
175 amu_group1_cnt_write(i, ctx->group1_cnts[i]);
176
177 /* Restore group 0/1 counter configuration */
178 write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
179 write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
180
181 return (void *)0;
182 }
183
184 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
185 SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);