2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
7 #include <arch_helpers.h>
11 #include <platform_def.h>
15 #include <utils_def.h>
16 #include <xlat_tables_defs.h>
17 #include <xlat_tables_v2.h>
19 #include "xlat_tables_private.h"
21 #if LOG_LEVEL < LOG_LEVEL_VERBOSE
23 void xlat_mmap_print(__unused
const mmap_region_t
*mmap
)
28 void xlat_tables_print(__unused xlat_ctx_t
*ctx
)
33 #else /* if LOG_LEVEL >= LOG_LEVEL_VERBOSE */
35 void xlat_mmap_print(const mmap_region_t
*mmap
)
38 const mmap_region_t
*mm
= mmap
;
40 while (mm
->size
!= 0U) {
41 printf(" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x granularity:0x%zx\n",
42 mm
->base_va
, mm
->base_pa
, mm
->size
, mm
->attr
,
49 /* Print the attributes of the specified block descriptor. */
50 static void xlat_desc_print(const xlat_ctx_t
*ctx
, uint64_t desc
)
52 uint64_t mem_type_index
= ATTR_INDEX_GET(desc
);
53 int xlat_regime
= ctx
->xlat_regime
;
55 if (mem_type_index
== ATTR_IWBWA_OWBWA_NTR_INDEX
) {
57 } else if (mem_type_index
== ATTR_NON_CACHEABLE_INDEX
) {
60 assert(mem_type_index
== ATTR_DEVICE_INDEX
);
64 if ((xlat_regime
== EL3_REGIME
) || (xlat_regime
== EL2_REGIME
)) {
65 /* For EL3 and EL2 only check the AP[2] and XN bits. */
66 printf(((desc
& LOWER_ATTRS(AP_RO
)) != 0ULL) ? "-RO" : "-RW");
67 printf(((desc
& UPPER_ATTRS(XN
)) != 0ULL) ? "-XN" : "-EXEC");
69 assert(xlat_regime
== EL1_EL0_REGIME
);
72 * - In AArch64 PXN and UXN can be set independently but in
73 * AArch32 there is no UXN (XN affects both privilege levels).
74 * For consistency, we set them simultaneously in both cases.
75 * - RO and RW permissions must be the same in EL1 and EL0. If
76 * EL0 can access that memory region, so can EL1, with the
80 uint64_t xn_mask
= xlat_arch_regime_get_xn_desc(EL1_EL0_REGIME
);
81 uint64_t xn_perm
= desc
& xn_mask
;
83 assert((xn_perm
== xn_mask
) || (xn_perm
== 0ULL));
85 printf(((desc
& LOWER_ATTRS(AP_RO
)) != 0ULL) ? "-RO" : "-RW");
86 /* Only check one of PXN and UXN, the other one is the same. */
87 printf(((desc
& UPPER_ATTRS(PXN
)) != 0ULL) ? "-XN" : "-EXEC");
89 * Privileged regions can only be accessed from EL1, user
90 * regions can be accessed from EL1 and EL0.
92 printf(((desc
& LOWER_ATTRS(AP_ACCESS_UNPRIVILEGED
)) != 0ULL)
96 printf(((LOWER_ATTRS(NS
) & desc
) != 0ULL) ? "-NS" : "-S");
99 static const char * const level_spacers
[] = {
106 static const char *invalid_descriptors_ommited
=
107 "%s(%d invalid descriptors omitted)\n";
110 * Recursive function that reads the translation tables passed as an argument
111 * and prints their status.
113 static void xlat_tables_print_internal(xlat_ctx_t
*ctx
, uintptr_t table_base_va
,
114 const uint64_t *table_base
, unsigned int table_entries
,
117 assert(level
<= XLAT_TABLE_LEVEL_MAX
);
120 uintptr_t table_idx_va
= table_base_va
;
121 unsigned int table_idx
= 0U;
122 size_t level_size
= XLAT_BLOCK_SIZE(level
);
125 * Keep track of how many invalid descriptors are counted in a row.
126 * Whenever multiple invalid descriptors are found, only the first one
127 * is printed, and a line is added to inform about how many descriptors
130 int invalid_row_count
= 0;
132 while (table_idx
< table_entries
) {
134 desc
= table_base
[table_idx
];
136 if ((desc
& DESC_MASK
) == INVALID_DESC
) {
138 if (invalid_row_count
== 0) {
139 printf("%sVA:0x%lx size:0x%zx\n",
140 level_spacers
[level
],
141 table_idx_va
, level_size
);
147 if (invalid_row_count
> 1) {
148 printf(invalid_descriptors_ommited
,
149 level_spacers
[level
],
150 invalid_row_count
- 1);
152 invalid_row_count
= 0;
155 * Check if this is a table or a block. Tables are only
156 * allowed in levels other than 3, but DESC_PAGE has the
157 * same value as DESC_TABLE, so we need to check.
159 if (((desc
& DESC_MASK
) == TABLE_DESC
) &&
160 (level
< XLAT_TABLE_LEVEL_MAX
)) {
162 * Do not print any PA for a table descriptor,
163 * as it doesn't directly map physical memory
164 * but instead points to the next translation
165 * table in the translation table walk.
167 printf("%sVA:0x%lx size:0x%zx\n",
168 level_spacers
[level
],
169 table_idx_va
, level_size
);
171 uintptr_t addr_inner
= desc
& TABLE_ADDR_MASK
;
173 xlat_tables_print_internal(ctx
, table_idx_va
,
174 (uint64_t *)addr_inner
,
175 XLAT_TABLE_ENTRIES
, level
+ 1U);
177 printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
178 level_spacers
[level
], table_idx_va
,
179 (uint64_t)(desc
& TABLE_ADDR_MASK
),
181 xlat_desc_print(ctx
, desc
);
187 table_idx_va
+= level_size
;
190 if (invalid_row_count
> 1) {
191 printf(invalid_descriptors_ommited
,
192 level_spacers
[level
], invalid_row_count
- 1);
196 void xlat_tables_print(xlat_ctx_t
*ctx
)
198 const char *xlat_regime_str
;
199 int used_page_tables
;
201 if (ctx
->xlat_regime
== EL1_EL0_REGIME
) {
202 xlat_regime_str
= "1&0";
203 } else if (ctx
->xlat_regime
== EL2_REGIME
) {
204 xlat_regime_str
= "2";
206 assert(ctx
->xlat_regime
== EL3_REGIME
);
207 xlat_regime_str
= "3";
209 VERBOSE("Translation tables state:\n");
210 VERBOSE(" Xlat regime: EL%s\n", xlat_regime_str
);
211 VERBOSE(" Max allowed PA: 0x%llx\n", ctx
->pa_max_address
);
212 VERBOSE(" Max allowed VA: 0x%lx\n", ctx
->va_max_address
);
213 VERBOSE(" Max mapped PA: 0x%llx\n", ctx
->max_pa
);
214 VERBOSE(" Max mapped VA: 0x%lx\n", ctx
->max_va
);
216 VERBOSE(" Initial lookup level: %u\n", ctx
->base_level
);
217 VERBOSE(" Entries @initial lookup level: %u\n",
218 ctx
->base_table_entries
);
220 #if PLAT_XLAT_TABLES_DYNAMIC
221 used_page_tables
= 0;
222 for (int i
= 0; i
< ctx
->tables_num
; ++i
) {
223 if (ctx
->tables_mapped_regions
[i
] != 0)
227 used_page_tables
= ctx
->next_table
;
229 VERBOSE(" Used %d sub-tables out of %d (spare: %d)\n",
230 used_page_tables
, ctx
->tables_num
,
231 ctx
->tables_num
- used_page_tables
);
233 xlat_tables_print_internal(ctx
, 0U, ctx
->base_table
,
234 ctx
->base_table_entries
, ctx
->base_level
);
237 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
240 * Do a translation table walk to find the block or page descriptor that maps
243 * On success, return the address of the descriptor within the translation
244 * table. Its lookup level is stored in '*out_level'.
245 * On error, return NULL.
248 * Base address for the initial lookup level.
249 * xlat_table_base_entries
250 * Number of entries in the translation table for the initial lookup level.
251 * virt_addr_space_size
252 * Size in bytes of the virtual address space.
254 static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr
,
255 void *xlat_table_base
,
256 unsigned int xlat_table_base_entries
,
257 unsigned long long virt_addr_space_size
,
258 unsigned int *out_level
)
260 unsigned int start_level
;
262 unsigned int entries
;
264 start_level
= GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size
);
266 table
= xlat_table_base
;
267 entries
= xlat_table_base_entries
;
269 for (unsigned int level
= start_level
;
270 level
<= XLAT_TABLE_LEVEL_MAX
;
272 uint64_t idx
, desc
, desc_type
;
274 idx
= XLAT_TABLE_IDX(virtual_addr
, level
);
275 if (idx
>= entries
) {
276 WARN("Missing xlat table entry at address 0x%lx\n",
282 desc_type
= desc
& DESC_MASK
;
284 if (desc_type
== INVALID_DESC
) {
285 VERBOSE("Invalid entry (memory not mapped)\n");
289 if (level
== XLAT_TABLE_LEVEL_MAX
) {
291 * Only page descriptors allowed at the final lookup
294 assert(desc_type
== PAGE_DESC
);
299 if (desc_type
== BLOCK_DESC
) {
304 assert(desc_type
== TABLE_DESC
);
305 table
= (uint64_t *)(uintptr_t)(desc
& TABLE_ADDR_MASK
);
306 entries
= XLAT_TABLE_ENTRIES
;
310 * This shouldn't be reached, the translation table walk should end at
311 * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
319 static int xlat_get_mem_attributes_internal(const xlat_ctx_t
*ctx
,
320 uintptr_t base_va
, uint32_t *attributes
, uint64_t **table_entry
,
321 unsigned long long *addr_pa
, unsigned int *table_level
)
326 unsigned long long virt_addr_space_size
;
329 * Sanity-check arguments.
332 assert(ctx
->initialized
);
333 assert((ctx
->xlat_regime
== EL1_EL0_REGIME
) ||
334 (ctx
->xlat_regime
== EL2_REGIME
) ||
335 (ctx
->xlat_regime
== EL3_REGIME
));
337 virt_addr_space_size
= (unsigned long long)ctx
->va_max_address
+ 1ULL;
338 assert(virt_addr_space_size
> 0U);
340 entry
= find_xlat_table_entry(base_va
,
342 ctx
->base_table_entries
,
343 virt_addr_space_size
,
346 WARN("Address 0x%lx is not mapped.\n", base_va
);
350 if (addr_pa
!= NULL
) {
351 *addr_pa
= *entry
& TABLE_ADDR_MASK
;
354 if (table_entry
!= NULL
) {
355 *table_entry
= entry
;
358 if (table_level
!= NULL
) {
359 *table_level
= level
;
364 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
365 VERBOSE("Attributes: ");
366 xlat_desc_print(ctx
, desc
);
368 #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
370 assert(attributes
!= NULL
);
373 uint64_t attr_index
= (desc
>> ATTR_INDEX_SHIFT
) & ATTR_INDEX_MASK
;
375 if (attr_index
== ATTR_IWBWA_OWBWA_NTR_INDEX
) {
376 *attributes
|= MT_MEMORY
;
377 } else if (attr_index
== ATTR_NON_CACHEABLE_INDEX
) {
378 *attributes
|= MT_NON_CACHEABLE
;
380 assert(attr_index
== ATTR_DEVICE_INDEX
);
381 *attributes
|= MT_DEVICE
;
384 uint64_t ap2_bit
= (desc
>> AP2_SHIFT
) & 1U;
386 if (ap2_bit
== AP2_RW
)
387 *attributes
|= MT_RW
;
389 if (ctx
->xlat_regime
== EL1_EL0_REGIME
) {
390 uint64_t ap1_bit
= (desc
>> AP1_SHIFT
) & 1U;
392 if (ap1_bit
== AP1_ACCESS_UNPRIVILEGED
)
393 *attributes
|= MT_USER
;
396 uint64_t ns_bit
= (desc
>> NS_SHIFT
) & 1U;
399 *attributes
|= MT_NS
;
401 uint64_t xn_mask
= xlat_arch_regime_get_xn_desc(ctx
->xlat_regime
);
403 if ((desc
& xn_mask
) == xn_mask
) {
404 *attributes
|= MT_EXECUTE_NEVER
;
406 assert((desc
& xn_mask
) == 0U);
413 int xlat_get_mem_attributes_ctx(const xlat_ctx_t
*ctx
, uintptr_t base_va
,
416 return xlat_get_mem_attributes_internal(ctx
, base_va
, attr
,
421 int xlat_change_mem_attributes_ctx(const xlat_ctx_t
*ctx
, uintptr_t base_va
,
422 size_t size
, uint32_t attr
)
424 /* Note: This implementation isn't optimized. */
427 assert(ctx
->initialized
);
429 unsigned long long virt_addr_space_size
=
430 (unsigned long long)ctx
->va_max_address
+ 1U;
431 assert(virt_addr_space_size
> 0U);
433 if (!IS_PAGE_ALIGNED(base_va
)) {
434 WARN("%s: Address 0x%lx is not aligned on a page boundary.\n",
440 WARN("%s: Size is 0.\n", __func__
);
444 if ((size
% PAGE_SIZE
) != 0U) {
445 WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
450 if (((attr
& MT_EXECUTE_NEVER
) == 0U) && ((attr
& MT_RW
) != 0U)) {
451 WARN("%s: Mapping memory as read-write and executable not allowed.\n",
456 size_t pages_count
= size
/ PAGE_SIZE
;
458 VERBOSE("Changing memory attributes of %zu pages starting from address 0x%lx...\n",
459 pages_count
, base_va
);
461 uintptr_t base_va_original
= base_va
;
466 for (size_t i
= 0U; i
< pages_count
; ++i
) {
467 const uint64_t *entry
;
468 uint64_t desc
, attr_index
;
471 entry
= find_xlat_table_entry(base_va
,
473 ctx
->base_table_entries
,
474 virt_addr_space_size
,
477 WARN("Address 0x%lx is not mapped.\n", base_va
);
484 * Check that all the required pages are mapped at page
487 if (((desc
& DESC_MASK
) != PAGE_DESC
) ||
488 (level
!= XLAT_TABLE_LEVEL_MAX
)) {
489 WARN("Address 0x%lx is not mapped at the right granularity.\n",
491 WARN("Granularity is 0x%llx, should be 0x%x.\n",
492 (unsigned long long)XLAT_BLOCK_SIZE(level
), PAGE_SIZE
);
497 * If the region type is device, it shouldn't be executable.
499 attr_index
= (desc
>> ATTR_INDEX_SHIFT
) & ATTR_INDEX_MASK
;
500 if (attr_index
== ATTR_DEVICE_INDEX
) {
501 if ((attr
& MT_EXECUTE_NEVER
) == 0U) {
502 WARN("Setting device memory as executable at address 0x%lx.",
508 base_va
+= PAGE_SIZE
;
511 /* Restore original value. */
512 base_va
= base_va_original
;
514 for (unsigned int i
= 0U; i
< pages_count
; ++i
) {
516 uint32_t old_attr
= 0U, new_attr
;
517 uint64_t *entry
= NULL
;
518 unsigned int level
= 0U;
519 unsigned long long addr_pa
= 0ULL;
521 (void) xlat_get_mem_attributes_internal(ctx
, base_va
, &old_attr
,
522 &entry
, &addr_pa
, &level
);
525 * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
526 * MT_USER/MT_PRIVILEGED are taken into account. Any other
527 * information is ignored.
530 /* Clean the old attributes so that they can be rebuilt. */
531 new_attr
= old_attr
& ~(MT_RW
| MT_EXECUTE_NEVER
| MT_USER
);
534 * Update attributes, but filter out the ones this function
535 * isn't allowed to change.
537 new_attr
|= attr
& (MT_RW
| MT_EXECUTE_NEVER
| MT_USER
);
540 * The break-before-make sequence requires writing an invalid
541 * descriptor and making sure that the system sees the change
542 * before writing the new descriptor.
544 *entry
= INVALID_DESC
;
545 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
546 dccvac((uintptr_t)entry
);
548 /* Invalidate any cached copy of this mapping in the TLBs. */
549 xlat_arch_tlbi_va(base_va
, ctx
->xlat_regime
);
551 /* Ensure completion of the invalidation. */
552 xlat_arch_tlbi_va_sync();
554 /* Write new descriptor */
555 *entry
= xlat_desc(ctx
, new_attr
, addr_pa
, level
);
556 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
557 dccvac((uintptr_t)entry
);
559 base_va
+= PAGE_SIZE
;
562 /* Ensure that the last descriptor writen is seen by the system. */