e64fd3ef73218cbfd57000365dace96116f100b8
[project/bcm63xx/atf.git] / lib / xlat_tables / aarch64 / xlat_tables.c
1 /*
2 * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <stdint.h>
9
10 #include <platform_def.h>
11
12 #include <arch.h>
13 #include <arch_features.h>
14 #include <common/bl_common.h>
15 #include <lib/utils.h>
16 #include <lib/xlat_tables/xlat_tables.h>
17 #include <lib/xlat_tables/xlat_tables_arch.h>
18 #include <plat/common/common_def.h>
19
20 #include "../xlat_tables_private.h"
21
22 #define XLAT_TABLE_LEVEL_BASE \
23 GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
24
25 #define NUM_BASE_LEVEL_ENTRIES \
26 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
27
28 static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
29 __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
30
31 static unsigned long long tcr_ps_bits;
32
33 static unsigned long long calc_physical_addr_size_bits(
34 unsigned long long max_addr)
35 {
36 /* Physical address can't exceed 48 bits */
37 assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
38
39 /* 48 bits address */
40 if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
41 return TCR_PS_BITS_256TB;
42
43 /* 44 bits address */
44 if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
45 return TCR_PS_BITS_16TB;
46
47 /* 42 bits address */
48 if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
49 return TCR_PS_BITS_4TB;
50
51 /* 40 bits address */
52 if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
53 return TCR_PS_BITS_1TB;
54
55 /* 36 bits address */
56 if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
57 return TCR_PS_BITS_64GB;
58
59 return TCR_PS_BITS_4GB;
60 }
61
62 #if ENABLE_ASSERTIONS
63 /*
64 * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
65 * supported in ARMv8.2 onwards.
66 */
67 static const unsigned int pa_range_bits_arr[] = {
68 PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
69 PARANGE_0101, PARANGE_0110
70 };
71
72 static unsigned long long get_max_supported_pa(void)
73 {
74 u_register_t pa_range = read_id_aa64mmfr0_el1() &
75 ID_AA64MMFR0_EL1_PARANGE_MASK;
76
77 /* All other values are reserved */
78 assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
79
80 return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
81 }
82
83 /*
84 * Return minimum virtual address space size supported by the architecture
85 */
86 static uintptr_t xlat_get_min_virt_addr_space_size(void)
87 {
88 uintptr_t ret;
89
90 if (is_armv8_4_ttst_present())
91 ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
92 else
93 ret = MIN_VIRT_ADDR_SPACE_SIZE;
94
95 return ret;
96 }
97 #endif /* ENABLE_ASSERTIONS */
98
99 unsigned int xlat_arch_current_el(void)
100 {
101 unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
102
103 assert(el > 0U);
104
105 return el;
106 }
107
108 uint64_t xlat_arch_get_xn_desc(unsigned int el)
109 {
110 if (el == 3U) {
111 return UPPER_ATTRS(XN);
112 } else {
113 assert(el == 1U);
114 return UPPER_ATTRS(PXN);
115 }
116 }
117
118 void init_xlat_tables(void)
119 {
120 unsigned long long max_pa;
121 uintptr_t max_va;
122
123 assert(PLAT_VIRT_ADDR_SPACE_SIZE >=
124 (xlat_get_min_virt_addr_space_size() - 1U));
125 assert(PLAT_VIRT_ADDR_SPACE_SIZE <= MAX_VIRT_ADDR_SPACE_SIZE);
126 assert(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE));
127
128 print_mmap();
129 init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
130 &max_va, &max_pa);
131
132 assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
133 assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
134 assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
135
136 tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
137 }
138
139 /*******************************************************************************
140 * Macro generating the code for the function enabling the MMU in the given
141 * exception level, assuming that the pagetables have already been created.
142 *
143 * _el: Exception level at which the function will run
144 * _tcr_extra: Extra bits to set in the TCR register. This mask will
145 * be OR'ed with the default TCR value.
146 * _tlbi_fct: Function to invalidate the TLBs at the current
147 * exception level
148 ******************************************************************************/
149 #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
150 void enable_mmu_el##_el(unsigned int flags) \
151 { \
152 uint64_t mair, tcr, ttbr; \
153 uint32_t sctlr; \
154 \
155 assert(IS_IN_EL(_el)); \
156 assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0U); \
157 \
158 /* Set attributes in the right indices of the MAIR */ \
159 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
160 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
161 ATTR_IWBWA_OWBWA_NTR_INDEX); \
162 mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
163 ATTR_NON_CACHEABLE_INDEX); \
164 write_mair_el##_el(mair); \
165 \
166 /* Invalidate TLBs at the current exception level */ \
167 _tlbi_fct(); \
168 \
169 /* Set TCR bits as well. */ \
170 /* Set T0SZ to (64 - width of virtual address space) */ \
171 int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\
172 \
173 if ((flags & XLAT_TABLE_NC) != 0U) { \
174 /* Inner & outer non-cacheable non-shareable. */\
175 tcr = TCR_SH_NON_SHAREABLE | \
176 TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
177 (uint64_t) t0sz; \
178 } else { \
179 /* Inner & outer WBWA & shareable. */ \
180 tcr = TCR_SH_INNER_SHAREABLE | \
181 TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
182 (uint64_t) t0sz; \
183 } \
184 tcr |= _tcr_extra; \
185 write_tcr_el##_el(tcr); \
186 \
187 /* Set TTBR bits as well */ \
188 ttbr = (uint64_t) base_xlation_table; \
189 write_ttbr0_el##_el(ttbr); \
190 \
191 /* Ensure all translation table writes have drained */ \
192 /* into memory, the TLB invalidation is complete, */ \
193 /* and translation register writes are committed */ \
194 /* before enabling the MMU */ \
195 dsbish(); \
196 isb(); \
197 \
198 sctlr = read_sctlr_el##_el(); \
199 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
200 \
201 if ((flags & DISABLE_DCACHE) != 0U) \
202 sctlr &= ~SCTLR_C_BIT; \
203 else \
204 sctlr |= SCTLR_C_BIT; \
205 \
206 write_sctlr_el##_el(sctlr); \
207 \
208 /* Ensure the MMU enable takes effect immediately */ \
209 isb(); \
210 } \
211 \
212 void enable_mmu_direct_el##_el(unsigned int flags) \
213 { \
214 enable_mmu_el##_el(flags); \
215 }
216
217 /* Define EL1 and EL3 variants of the function enabling the MMU */
218 DEFINE_ENABLE_MMU_EL(1,
219 /*
220 * TCR_EL1.EPD1: Disable translation table walk for addresses
221 * that are translated using TTBR1_EL1.
222 */
223 TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
224 tlbivmalle1)
225 DEFINE_ENABLE_MMU_EL(3,
226 TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
227 tlbialle3)