2 * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
9 #include <platform_def.h>
10 #include <xlat_tables_defs.h>
11 #include <xlat_tables_v2.h>
13 #include "xlat_tables_private.h"
16 * MMU configuration register values for the active translation context. Used
17 * from the MMU assembly helpers.
19 uint64_t mmu_cfg_params
[MMU_CFG_PARAM_MAX
];
22 * Allocate and initialise the default translation context for the BL image
23 * currently executing.
25 REGISTER_XLAT_CONTEXT(tf
, MAX_MMAP_REGIONS
, MAX_XLAT_TABLES
,
26 PLAT_VIRT_ADDR_SPACE_SIZE
, PLAT_PHY_ADDR_SPACE_SIZE
);
28 void mmap_add_region(unsigned long long base_pa
, uintptr_t base_va
, size_t size
,
31 mmap_region_t mm
= MAP_REGION(base_pa
, base_va
, size
, attr
);
33 mmap_add_region_ctx(&tf_xlat_ctx
, &mm
);
36 void mmap_add(const mmap_region_t
*mm
)
38 mmap_add_ctx(&tf_xlat_ctx
, mm
);
41 void mmap_add_region_alloc_va(unsigned long long base_pa
, uintptr_t *base_va
,
42 size_t size
, unsigned int attr
)
44 mmap_region_t mm
= MAP_REGION_ALLOC_VA(base_pa
, size
, attr
);
46 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx
, &mm
);
48 *base_va
= mm
.base_va
;
51 void mmap_add_alloc_va(mmap_region_t
*mm
)
53 while (mm
->granularity
!= 0U) {
54 assert(mm
->base_va
== 0U);
55 mmap_add_region_alloc_va_ctx(&tf_xlat_ctx
, mm
);
60 #if PLAT_XLAT_TABLES_DYNAMIC
62 int mmap_add_dynamic_region(unsigned long long base_pa
, uintptr_t base_va
,
63 size_t size
, unsigned int attr
)
65 mmap_region_t mm
= MAP_REGION(base_pa
, base_va
, size
, attr
);
67 return mmap_add_dynamic_region_ctx(&tf_xlat_ctx
, &mm
);
70 int mmap_add_dynamic_region_alloc_va(unsigned long long base_pa
,
71 uintptr_t *base_va
, size_t size
,
74 mmap_region_t mm
= MAP_REGION_ALLOC_VA(base_pa
, size
, attr
);
76 int rc
= mmap_add_dynamic_region_alloc_va_ctx(&tf_xlat_ctx
, &mm
);
78 *base_va
= mm
.base_va
;
84 int mmap_remove_dynamic_region(uintptr_t base_va
, size_t size
)
86 return mmap_remove_dynamic_region_ctx(&tf_xlat_ctx
,
90 #endif /* PLAT_XLAT_TABLES_DYNAMIC */
92 void __init
init_xlat_tables(void)
94 assert(tf_xlat_ctx
.xlat_regime
== EL_REGIME_INVALID
);
96 unsigned int current_el
= xlat_arch_current_el();
98 if (current_el
== 1U) {
99 tf_xlat_ctx
.xlat_regime
= EL1_EL0_REGIME
;
100 } else if (current_el
== 2U) {
101 tf_xlat_ctx
.xlat_regime
= EL2_REGIME
;
103 assert(current_el
== 3U);
104 tf_xlat_ctx
.xlat_regime
= EL3_REGIME
;
107 init_xlat_tables_ctx(&tf_xlat_ctx
);
110 int xlat_get_mem_attributes(uintptr_t base_va
, uint32_t *attr
)
112 return xlat_get_mem_attributes_ctx(&tf_xlat_ctx
, base_va
, attr
);
115 int xlat_change_mem_attributes(uintptr_t base_va
, size_t size
, uint32_t attr
)
117 return xlat_change_mem_attributes_ctx(&tf_xlat_ctx
, base_va
, size
, attr
);
121 * If dynamic allocation of new regions is disabled then by the time we call the
122 * function enabling the MMU, we'll have registered all the memory regions to
123 * map for the system's lifetime. Therefore, at this point we know the maximum
124 * physical address that will ever be mapped.
126 * If dynamic allocation is enabled then we can't make any such assumption
127 * because the maximum physical address could get pushed while adding a new
128 * region. Therefore, in this case we have to assume that the whole address
129 * space size might be mapped.
131 #ifdef PLAT_XLAT_TABLES_DYNAMIC
132 #define MAX_PHYS_ADDR tf_xlat_ctx.pa_max_address
134 #define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
139 void enable_mmu_svc_mon(unsigned int flags
)
141 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
142 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
143 tf_xlat_ctx
.va_max_address
, EL1_EL0_REGIME
);
144 enable_mmu_direct_svc_mon(flags
);
147 void enable_mmu_hyp(unsigned int flags
)
149 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
150 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
151 tf_xlat_ctx
.va_max_address
, EL2_REGIME
);
152 enable_mmu_direct_hyp(flags
);
157 void enable_mmu_el1(unsigned int flags
)
159 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
160 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
161 tf_xlat_ctx
.va_max_address
, EL1_EL0_REGIME
);
162 enable_mmu_direct_el1(flags
);
165 void enable_mmu_el2(unsigned int flags
)
167 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
168 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
169 tf_xlat_ctx
.va_max_address
, EL2_REGIME
);
170 enable_mmu_direct_el2(flags
);
173 void enable_mmu_el3(unsigned int flags
)
175 setup_mmu_cfg((uint64_t *)&mmu_cfg_params
, flags
,
176 tf_xlat_ctx
.base_table
, MAX_PHYS_ADDR
,
177 tf_xlat_ctx
.va_max_address
, EL3_REGIME
);
178 enable_mmu_direct_el3(flags
);