2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <platform_def.h>
11 #include <common/debug.h>
12 #include <lib/xlat_tables/xlat_tables_defs.h>
13 #include <lib/xlat_tables/xlat_tables_v2.h>
15 #include "xlat_tables_private.h"
18 * MMU configuration register values for the active translation context. Used
19 * from the MMU assembly helpers.
21 uint64_t mmu_cfg_params
[MMU_CFG_PARAM_MAX
];
24 * Allocate and initialise the default translation context for the BL image
25 * currently executing.
27 REGISTER_XLAT_CONTEXT(tf
, MAX_MMAP_REGIONS
, MAX_XLAT_TABLES
,
28 PLAT_VIRT_ADDR_SPACE_SIZE
, PLAT_PHY_ADDR_SPACE_SIZE
);
30 void mmap_add_region(unsigned long long base_pa
, uintptr_t base_va
, size_t size
,
33 mmap_region_t mm
= MAP_REGION(base_pa
, base_va
, size
, attr
);
35 mmap_add_region_ctx(&tf_xlat_ctx
, &mm
);
38 void mmap_add(const mmap_region_t
*mm
)
40 mmap_add_ctx(&tf_xlat_ctx
, mm
);
43 void mmap_add_region_alloc_va(unsigned long long base_pa
, uintptr_t *base_va
,
44 size_t size
, unsigned int attr
)
46 mmap_region_t mm
= MAP_REGION_ALLOC_VA(base_pa
, size
, attr
);
48 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx
, &mm
);
50 *base_va
= mm
.base_va
;
53 void mmap_add_alloc_va(mmap_region_t
*mm
)
55 while (mm
->granularity
!= 0U) {
56 assert(mm
->base_va
== 0U);
57 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx
, mm
);
62 #if PLAT_XLAT_TABLES_DYNAMIC
64 int mmap_add_dynamic_region(unsigned long long base_pa
, uintptr_t base_va
,
65 size_t size
, unsigned int attr
)
67 mmap_region_t mm
= MAP_REGION(base_pa
, base_va
, size
, attr
);
69 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx
, &mm
);
72 int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa
,
73 uintptr_t *base_va
, size_t size
,
76 mmap_region_t mm
= MAP_REGION_ALLOC_VA(base_pa
, size
, attr
);
78 int rc
= mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx
, &mm
);
80 *base_va
= mm
.base_va
;
86 int mmap_remove_dynamic_region(uintptr_t base_va
, size_t size
)
88 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx
,
92 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
94 void __init
init_xlat_tables(void)
96 assert(tf_xlat_ctx
.xlat_regime
== EL_REGIME_INVALID
);
98 unsigned int current_el
= xlat_arch_current_el();
100 if (current_el
== 1U) {
101 tf_xlat_ctx
.xlat_regime
= EL1_EL0_REGIME
;
102 } else if (current_el
== 2U) {
103 tf_xlat_ctx
.xlat_regime
= EL2_REGIME
;
105 assert(current_el
== 3U);
106 tf_xlat_ctx
.xlat_regime
= EL3_REGIME
;
109 init_xlat_tables_ctx(&tf_xlat_ctx
);
112 int xlat_get_mem_attributes(uintptr_t base_va
, uint32_t *attr
)
114 return xlat_get_mem_attributes_ctx(&tf_xlat_ctx
, base_va
, attr
);
117 int xlat_change_mem_attributes(uintptr_t base_va
, size_t size
, uint32_t attr
)
119 return xlat_change_mem_attributes_ctx(&tf_xlat_ctx
, base_va
, size
, attr
);
123 * If dynamic allocation of new regions is disabled then by the time we call the
124 * function enabling the MMU, we'll have registered all the memory regions to
125 * map for the system's lifetime. Therefore, at this point we know the maximum
126 * physical address that will ever be mapped.
128 * If dynamic allocation is enabled then we can't make any such assumption
129 * because the maximum physical address could get pushed while adding a new
130 * region. Therefore, in this case we have to assume that the whole address
131 * space size might be mapped.
133 #ifdef PLAT_XLAT_TABLES_DYNAMIC
134 #define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
136 #define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
141 void enable_mmu_svc_mon(unsigned int flags
)
143 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
144 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
145 tf_xlat_ctx
.va_max_address
, EL1_EL0_REGIME
);
146 enable_mmu_direct_svc_mon(flags
);
149 void enable_mmu_hyp(unsigned int flags
)
151 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
152 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
153 tf_xlat_ctx
.va_max_address
, EL2_REGIME
);
154 enable_mmu_direct_hyp(flags
);
159 void enable_mmu_el1(unsigned int flags
)
161 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
162 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
163 tf_xlat_ctx
.va_max_address
, EL1_EL0_REGIME
);
164 enable_mmu_direct_el1(flags
);
167 void enable_mmu_el2(unsigned int flags
)
169 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
170 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
171 tf_xlat_ctx
.va_max_address
, EL2_REGIME
);
172 enable_mmu_direct_el2(flags
);
175 void enable_mmu_el3(unsigned int flags
)
177 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
178 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
179 tf_xlat_ctx
.va_max_address
, EL3_REGIME
);
180 enable_mmu_direct_el3(flags
);