2 * Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
8 #include <arch_helpers.h>
11 #include <common_def.h>
13 #include <platform_def.h>
18 #include <xlat_tables.h>
19 #include "xlat_tables_private.h"
21 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
22 #define LVL0_SPACER ""
23 #define LVL1_SPACER " "
24 #define LVL2_SPACER " "
25 #define LVL3_SPACER " "
26 #define get_level_spacer(level) \
27 (((level) == U(0)) ? LVL0_SPACER : \
28 (((level) == U(1)) ? LVL1_SPACER : \
29 (((level) == U(2)) ? LVL2_SPACER : LVL3_SPACER)))
30 #define debug_print(...) printf(__VA_ARGS__)
32 #define debug_print(...) ((void)0)
35 #define UNSET_DESC ~0ULL
36 #define MT_UNKNOWN ~0U
38 static uint64_t xlat_tables
[MAX_XLAT_TABLES
][XLAT_TABLE_ENTRIES
]
39 __aligned(XLAT_TABLE_SIZE
) __section("xlat_table");
41 static unsigned int next_xlat
;
42 static unsigned long long xlat_max_pa
;
43 static uintptr_t xlat_max_va
;
45 static uint64_t execute_never_mask
;
46 static uint64_t ap1_mask
;
49 * Array of all memory regions stored in order of ascending base address.
50 * The list is terminated by the first entry with size == 0.
52 static mmap_region_t mmap
[MAX_MMAP_REGIONS
+ 1];
57 #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
58 debug_print("mmap:\n");
59 mmap_region_t
*mm
= mmap
;
60 while (mm
->size
!= 0U) {
61 debug_print(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
62 (void *)mm
->base_va
, mm
->base_pa
,
70 void mmap_add_region(unsigned long long base_pa
, uintptr_t base_va
,
71 size_t size
, unsigned int attr
)
73 mmap_region_t
*mm
= mmap
;
74 const mmap_region_t
*mm_last
= mm
+ ARRAY_SIZE(mmap
) - 1U;
75 unsigned long long end_pa
= base_pa
+ size
- 1U;
76 uintptr_t end_va
= base_va
+ size
- 1U;
78 assert(IS_PAGE_ALIGNED(base_pa
));
79 assert(IS_PAGE_ALIGNED(base_va
));
80 assert(IS_PAGE_ALIGNED(size
));
85 assert(base_pa
< end_pa
); /* Check for overflows */
86 assert(base_va
< end_va
);
88 assert((base_va
+ (uintptr_t)size
- (uintptr_t)1) <=
89 (PLAT_VIRT_ADDR_SPACE_SIZE
- 1U));
90 assert((base_pa
+ (unsigned long long)size
- 1ULL) <=
91 (PLAT_PHY_ADDR_SPACE_SIZE
- 1U));
95 /* Check for PAs and VAs overlaps with all other regions */
96 for (mm
= mmap
; mm
->size
; ++mm
) {
98 uintptr_t mm_end_va
= mm
->base_va
+ mm
->size
- 1U;
101 * Check if one of the regions is completely inside the other
104 bool fully_overlapped_va
=
105 ((base_va
>= mm
->base_va
) && (end_va
<= mm_end_va
)) ||
106 ((mm
->base_va
>= base_va
) && (mm_end_va
<= end_va
));
109 * Full VA overlaps are only allowed if both regions are
110 * identity mapped (zero offset) or have the same VA to PA
111 * offset. Also, make sure that it's not the exact same area.
113 if (fully_overlapped_va
) {
114 assert((mm
->base_va
- mm
->base_pa
) ==
115 (base_va
- base_pa
));
116 assert((base_va
!= mm
->base_va
) || (size
!= mm
->size
));
119 * If the regions do not have fully overlapping VAs,
120 * then they must have fully separated VAs and PAs.
121 * Partial overlaps are not allowed
124 unsigned long long mm_end_pa
=
125 mm
->base_pa
+ mm
->size
- 1;
127 bool separated_pa
= (end_pa
< mm
->base_pa
) ||
128 (base_pa
> mm_end_pa
);
129 bool separated_va
= (end_va
< mm
->base_va
) ||
130 (base_va
> mm_end_va
);
132 assert(separated_va
&& separated_pa
);
136 mm
= mmap
; /* Restore pointer to the start of the array */
138 #endif /* ENABLE_ASSERTIONS */
140 /* Find correct place in mmap to insert new region */
141 while ((mm
->base_va
< base_va
) && (mm
->size
!= 0U))
145 * If a section is contained inside another one with the same base
146 * address, it must be placed after the one it is contained in:
148 * 1st |-----------------------|
152 * This is required for mmap_region_attr() to get the attributes of the
153 * small region correctly.
155 while ((mm
->base_va
== base_va
) && (mm
->size
> size
))
158 /* Make room for new region by moving other regions up by one place */
159 (void)memmove(mm
+ 1, mm
, (uintptr_t)mm_last
- (uintptr_t)mm
);
161 /* Check we haven't lost the empty sentinal from the end of the array */
162 assert(mm_last
->size
== 0U);
164 mm
->base_pa
= base_pa
;
165 mm
->base_va
= base_va
;
169 if (end_pa
> xlat_max_pa
)
170 xlat_max_pa
= end_pa
;
171 if (end_va
> xlat_max_va
)
172 xlat_max_va
= end_va
;
175 void mmap_add(const mmap_region_t
*mm
)
177 const mmap_region_t
*mm_cursor
= mm
;
179 while ((mm_cursor
->size
!= 0U) || (mm_cursor
->attr
!= 0U)) {
180 mmap_add_region(mm_cursor
->base_pa
, mm_cursor
->base_va
,
181 mm_cursor
->size
, mm_cursor
->attr
);
186 static uint64_t mmap_desc(unsigned int attr
, unsigned long long addr_pa
,
192 /* Make sure that the granularity is fine enough to map this address. */
193 assert((addr_pa
& XLAT_BLOCK_MASK(level
)) == 0U);
197 * There are different translation table descriptors for level 3 and the
200 desc
|= (level
== XLAT_TABLE_LEVEL_MAX
) ? PAGE_DESC
: BLOCK_DESC
;
201 desc
|= ((attr
& MT_NS
) != 0U) ? LOWER_ATTRS(NS
) : 0U;
202 desc
|= ((attr
& MT_RW
) != 0U) ? LOWER_ATTRS(AP_RW
) : LOWER_ATTRS(AP_RO
);
204 * Always set the access flag, as this library assumes access flag
205 * faults aren't managed.
207 desc
|= LOWER_ATTRS(ACCESS_FLAG
);
211 * Deduce shareability domain and executability of the memory region
212 * from the memory type.
214 * Data accesses to device memory and non-cacheable normal memory are
215 * coherent for all observers in the system, and correspondingly are
216 * always treated as being Outer Shareable. Therefore, for these 2 types
217 * of memory, it is not strictly needed to set the shareability field
218 * in the translation tables.
220 mem_type
= MT_TYPE(attr
);
221 if (mem_type
== MT_DEVICE
) {
222 desc
|= LOWER_ATTRS(ATTR_DEVICE_INDEX
| OSH
);
224 * Always map device memory as execute-never.
225 * This is to avoid the possibility of a speculative instruction
226 * fetch, which could be an issue if this memory region
227 * corresponds to a read-sensitive peripheral.
229 desc
|= execute_never_mask
;
231 } else { /* Normal memory */
233 * Always map read-write normal memory as execute-never.
234 * This library assumes that it is used by software that does
235 * not self-modify its code, therefore R/W memory is reserved
236 * for data storage, which must not be executable.
238 * Note that setting the XN bit here is for consistency only.
239 * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
240 * which makes any writable memory region to be treated as
241 * execute-never, regardless of the value of the XN bit in the
244 * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
245 * attribute to figure out the value of the XN bit.
247 if (((attr
& MT_RW
) != 0U) || ((attr
& MT_EXECUTE_NEVER
) != 0U)) {
248 desc
|= execute_never_mask
;
251 if (mem_type
== MT_MEMORY
) {
252 desc
|= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX
| ISH
);
254 assert(mem_type
== MT_NON_CACHEABLE
);
255 desc
|= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX
| OSH
);
259 debug_print((mem_type
== MT_MEMORY
) ? "MEM" :
260 ((mem_type
== MT_NON_CACHEABLE
) ? "NC" : "DEV"));
261 debug_print(((attr
& MT_RW
) != 0U) ? "-RW" : "-RO");
262 debug_print(((attr
& MT_NS
) != 0U) ? "-NS" : "-S");
263 debug_print(((attr
& MT_EXECUTE_NEVER
) != 0U) ? "-XN" : "-EXEC");
268 * Look for the innermost region that contains the area at `base_va` with size
269 * `size`. Populate *attr with the attributes of this region.
271 * On success, this function returns 0.
272 * If there are partial overlaps (meaning that a smaller size is needed) or if
273 * the region can't be found in the given area, it returns MT_UNKNOWN. In this
274 * case the value pointed by attr should be ignored by the caller.
276 static unsigned int mmap_region_attr(const mmap_region_t
*mm
, uintptr_t base_va
,
277 size_t size
, unsigned int *attr
)
279 /* Don't assume that the area is contained in the first region */
280 unsigned int ret
= MT_UNKNOWN
;
283 * Get attributes from last (innermost) region that contains the
284 * requested area. Don't stop as soon as one region doesn't contain it
285 * because there may be other internal regions that contain this area:
287 * |-----------------------------1-----------------------------|
288 * |----2----| |-------3-------| |----5----|
291 * |---| <- Area we want the attributes of.
293 * In this example, the area is contained in regions 1, 3 and 4 but not
294 * in region 2. The loop shouldn't stop at region 2 as inner regions
295 * have priority over outer regions, it should stop at region 5.
300 return ret
; /* Reached end of list */
302 if (mm
->base_va
> (base_va
+ size
- 1U))
303 return ret
; /* Next region is after area so end */
305 if ((mm
->base_va
+ mm
->size
- 1U) < base_va
)
306 continue; /* Next region has already been overtaken */
308 if ((ret
== 0U) && (mm
->attr
== *attr
))
309 continue; /* Region doesn't override attribs so skip */
311 if ((mm
->base_va
> base_va
) ||
312 ((mm
->base_va
+ mm
->size
- 1U) < (base_va
+ size
- 1U)))
313 return MT_UNKNOWN
; /* Region doesn't fully cover area */
321 static mmap_region_t
*init_xlation_table_inner(mmap_region_t
*mm
,
326 assert((level
>= XLAT_TABLE_LEVEL_MIN
) &&
327 (level
<= XLAT_TABLE_LEVEL_MAX
));
329 unsigned int level_size_shift
=
330 L0_XLAT_ADDRESS_SHIFT
- level
* XLAT_TABLE_ENTRIES_SHIFT
;
331 u_register_t level_size
= (u_register_t
)1 << level_size_shift
;
332 u_register_t level_index_mask
=
333 ((u_register_t
)XLAT_TABLE_ENTRIES_MASK
) << level_size_shift
;
335 debug_print("New xlat table:\n");
338 uint64_t desc
= UNSET_DESC
;
340 if (mm
->size
== 0U) {
341 /* Done mapping regions; finish zeroing the table */
343 } else if ((mm
->base_va
+ mm
->size
- 1U) < base_va
) {
344 /* This area is after the region so get next region */
349 debug_print("%s VA:%p size:0x%llx ", get_level_spacer(level
),
350 (void *)base_va
, (unsigned long long)level_size
);
352 if (mm
->base_va
> (base_va
+ level_size
- 1U)) {
353 /* Next region is after this area. Nothing to map yet */
355 /* Make sure that the current level allows block descriptors */
356 } else if (level
>= XLAT_BLOCK_LEVEL_MIN
) {
358 * Try to get attributes of this area. It will fail if
359 * there are partially overlapping regions. On success,
360 * it will return the innermost region's attributes.
363 unsigned int r
= mmap_region_attr(mm
, base_va
,
367 desc
= mmap_desc(attr
,
368 base_va
- mm
->base_va
+ mm
->base_pa
,
373 if (desc
== UNSET_DESC
) {
374 /* Area not covered by a region so need finer table */
375 uint64_t *new_table
= xlat_tables
[next_xlat
];
378 assert(next_xlat
<= MAX_XLAT_TABLES
);
379 desc
= TABLE_DESC
| (uintptr_t)new_table
;
381 /* Recurse to fill in new table */
382 mm
= init_xlation_table_inner(mm
, base_va
,
383 new_table
, level
+ 1U);
389 base_va
+= level_size
;
390 } while ((base_va
& level_index_mask
) &&
391 ((base_va
- 1U) < (PLAT_VIRT_ADDR_SPACE_SIZE
- 1U)));
396 void init_xlation_table(uintptr_t base_va
, uint64_t *table
,
397 unsigned int level
, uintptr_t *max_va
,
398 unsigned long long *max_pa
)
400 unsigned int el
= xlat_arch_current_el();
402 execute_never_mask
= xlat_arch_get_xn_desc(el
);
405 ap1_mask
= LOWER_ATTRS(AP_ONE_VA_RANGE_RES1
);
411 init_xlation_table_inner(mmap
, base_va
, table
, level
);
412 *max_va
= xlat_max_va
;
413 *max_pa
= xlat_max_pa
;