2 * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
8 #include <arch_helpers.h>
13 #include <platform_def.h>
15 #include <lib/object_pool.h>
16 #include <lib/utils.h>
17 #include <lib/utils_def.h>
18 #include <lib/xlat_tables/xlat_tables_v2.h>
19 #include <plat/common/platform.h>
20 #include <services/sp_res_desc.h>
22 #include "spm_private.h"
23 #include "spm_shim_private.h"
25 /*******************************************************************************
26 * Instantiation of translation table context
27 ******************************************************************************/
29 /* Place translation tables by default along with the ones used by BL31. */
30 #ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
31 #define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
35 * Allocate elements of the translation contexts for the Secure Partitions.
38 /* Allocate an array of mmap_region per partition. */
39 static struct mmap_region sp_mmap_regions
[PLAT_SP_IMAGE_MMAP_REGIONS
+ 1]
40 [PLAT_SPM_MAX_PARTITIONS
];
41 static OBJECT_POOL(sp_mmap_regions_pool
, sp_mmap_regions
,
42 sizeof(mmap_region_t
) * (PLAT_SP_IMAGE_MMAP_REGIONS
+ 1),
43 PLAT_SPM_MAX_PARTITIONS
);
45 /* Allocate individual translation tables. */
46 static uint64_t sp_xlat_tables
[XLAT_TABLE_ENTRIES
]
47 [(PLAT_SP_IMAGE_MAX_XLAT_TABLES
+ 1) * PLAT_SPM_MAX_PARTITIONS
]
48 __aligned(XLAT_TABLE_SIZE
) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME
);
49 static OBJECT_POOL(sp_xlat_tables_pool
, sp_xlat_tables
,
50 XLAT_TABLE_ENTRIES
* sizeof(uint64_t),
51 (PLAT_SP_IMAGE_MAX_XLAT_TABLES
+ 1) * PLAT_SPM_MAX_PARTITIONS
);
53 /* Allocate base translation tables. */
54 static uint64_t sp_xlat_base_tables
55 [GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE
)]
56 [PLAT_SPM_MAX_PARTITIONS
]
57 __aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE
)
59 __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME
);
60 static OBJECT_POOL(sp_xlat_base_tables_pool
, sp_xlat_base_tables
,
61 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE
) * sizeof(uint64_t),
62 PLAT_SPM_MAX_PARTITIONS
);
64 /* Allocate arrays. */
65 static int sp_xlat_mapped_regions
[PLAT_SP_IMAGE_MAX_XLAT_TABLES
]
66 [PLAT_SPM_MAX_PARTITIONS
];
67 static OBJECT_POOL(sp_xlat_mapped_regions_pool
, sp_xlat_mapped_regions
,
68 sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES
, PLAT_SPM_MAX_PARTITIONS
);
70 /* Allocate individual contexts. */
71 static xlat_ctx_t sp_xlat_ctx
[PLAT_SPM_MAX_PARTITIONS
];
72 static OBJECT_POOL(sp_xlat_ctx_pool
, sp_xlat_ctx
, sizeof(xlat_ctx_t
),
73 PLAT_SPM_MAX_PARTITIONS
);
75 /* Get handle of Secure Partition translation context */
76 xlat_ctx_t
*spm_sp_xlat_context_alloc(void)
78 xlat_ctx_t
*ctx
= pool_alloc(&sp_xlat_ctx_pool
);
80 struct mmap_region
*mmap
= pool_alloc(&sp_mmap_regions_pool
);
82 uint64_t *base_table
= pool_alloc(&sp_xlat_base_tables_pool
);
83 uint64_t **tables
= pool_alloc_n(&sp_xlat_tables_pool
,
84 PLAT_SP_IMAGE_MAX_XLAT_TABLES
);
86 int *mapped_regions
= pool_alloc(&sp_xlat_mapped_regions_pool
);
88 xlat_setup_dynamic_ctx(ctx
, PLAT_PHY_ADDR_SPACE_SIZE
- 1,
89 PLAT_VIRT_ADDR_SPACE_SIZE
- 1, mmap
,
90 PLAT_SP_IMAGE_MMAP_REGIONS
, tables
,
91 PLAT_SP_IMAGE_MAX_XLAT_TABLES
, base_table
,
92 EL1_EL0_REGIME
, mapped_regions
);
97 /*******************************************************************************
98 * Translation table context used for S-EL1 exception vectors
99 ******************************************************************************/
101 REGISTER_XLAT_CONTEXT2(spm_sel1
, SPM_SHIM_MMAP_REGIONS
, SPM_SHIM_XLAT_TABLES
,
102 SPM_SHIM_XLAT_VIRT_ADDR_SPACE_SIZE
, PLAT_PHY_ADDR_SPACE_SIZE
,
103 EL1_EL0_REGIME
, PLAT_SP_IMAGE_XLAT_SECTION_NAME
);
105 void spm_exceptions_xlat_init_context(void)
107 /* This region contains the exception vectors used at S-EL1. */
108 mmap_region_t sel1_exception_vectors
=
109 MAP_REGION(SPM_SHIM_EXCEPTIONS_PTR
,
111 SPM_SHIM_EXCEPTIONS_SIZE
,
112 MT_CODE
| MT_SECURE
| MT_PRIVILEGED
);
114 mmap_add_region_ctx(&spm_sel1_xlat_ctx
,
115 &sel1_exception_vectors
);
117 init_xlat_tables_ctx(&spm_sel1_xlat_ctx
);
120 uint64_t *spm_exceptions_xlat_get_base_table(void)
122 return spm_sel1_xlat_ctx
.base_table
;
125 /*******************************************************************************
126 * Functions to allocate memory for regions.
127 ******************************************************************************/
130 * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
131 * reserved for SPM to use as heap to allocate memory regions of Secure
132 * Partitions. This is only done at boot.
134 static OBJECT_POOL(spm_heap_mem
, (void *)PLAT_SPM_HEAP_BASE
, 1U,
137 static uintptr_t spm_alloc_heap(size_t size
)
139 return (uintptr_t)pool_alloc_n(&spm_heap_mem
, size
);
142 /*******************************************************************************
143 * Functions to map memory regions described in the resource description.
144 ******************************************************************************/
145 static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr
)
147 unsigned int index
= attr
& RD_MEM_MASK
;
149 const unsigned int mmap_attr_arr
[8] = {
150 MT_DEVICE
| MT_RW
| MT_SECURE
, /* RD_MEM_DEVICE */
151 MT_CODE
| MT_SECURE
, /* RD_MEM_NORMAL_CODE */
152 MT_MEMORY
| MT_RW
| MT_SECURE
, /* RD_MEM_NORMAL_DATA */
153 MT_MEMORY
| MT_RW
| MT_SECURE
, /* RD_MEM_NORMAL_BSS */
154 MT_RO_DATA
| MT_SECURE
, /* RD_MEM_NORMAL_RODATA */
155 MT_MEMORY
| MT_RW
| MT_SECURE
, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
156 MT_MEMORY
| MT_RW
| MT_SECURE
, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
157 MT_MEMORY
| MT_RW
| MT_SECURE
/* RD_MEM_NORMAL_MISCELLANEOUS */
160 if (index
>= ARRAY_SIZE(mmap_attr_arr
)) {
161 ERROR("Unsupported RD memory attributes 0x%x\n", attr
);
165 return mmap_attr_arr
[index
];
169 * The data provided in the resource description structure is not directly
170 * compatible with a mmap_region structure. This function handles the conversion
173 static void map_rdmem(sp_context_t
*sp_ctx
, struct sp_rd_sect_mem_region
*rdmem
)
178 /* Location of the SP image */
179 uintptr_t sp_size
= sp_ctx
->image_size
;
180 uintptr_t sp_base_va
= sp_ctx
->rd
.attribute
.load_address
;
181 unsigned long long sp_base_pa
= sp_ctx
->image_base
;
183 /* Location of the memory region to map */
184 size_t rd_size
= rdmem
->size
;
185 uintptr_t rd_base_va
= rdmem
->base
;
186 unsigned long long rd_base_pa
;
188 unsigned int memtype
= rdmem
->attr
& RD_MEM_MASK
;
191 VERBOSE("Memory region '%s' is empty. Ignored.\n", rdmem
->name
);
195 VERBOSE("Adding memory region '%s'\n", rdmem
->name
);
197 mmap
.granularity
= REGION_DEFAULT_GRANULARITY
;
199 /* Check if the RD region is inside of the SP image or not */
200 int is_outside
= (rd_base_va
+ rd_size
<= sp_base_va
) ||
201 (sp_base_va
+ sp_size
<= rd_base_va
);
203 /* Set to 1 if it is needed to zero this region */
208 /* Device regions are mapped 1:1 */
209 rd_base_pa
= rd_base_va
;
212 case RD_MEM_NORMAL_CODE
:
213 case RD_MEM_NORMAL_RODATA
:
215 if (is_outside
== 1) {
216 ERROR("Code and rodata sections must be fully contained in the image.");
220 /* Get offset into the image */
221 rd_base_pa
= sp_base_pa
+ rd_base_va
- sp_base_va
;
224 case RD_MEM_NORMAL_DATA
:
226 if (is_outside
== 1) {
227 ERROR("Data sections must be fully contained in the image.");
231 rd_base_pa
= spm_alloc_heap(rd_size
);
233 /* Get offset into the image */
234 void *img_pa
= (void *)(sp_base_pa
+ rd_base_va
- sp_base_va
);
236 VERBOSE(" Copying data from %p to 0x%llx\n", img_pa
, rd_base_pa
);
238 /* Map destination */
239 rc
= mmap_add_dynamic_region(rd_base_pa
, rd_base_pa
,
240 rd_size
, MT_MEMORY
| MT_RW
| MT_SECURE
);
242 ERROR("Unable to map data region at EL3: %d\n", rc
);
246 /* Copy original data to destination */
247 memcpy((void *)rd_base_pa
, img_pa
, rd_size
);
249 /* Unmap destination region */
250 rc
= mmap_remove_dynamic_region(rd_base_pa
, rd_size
);
252 ERROR("Unable to remove data region at EL3: %d\n", rc
);
258 case RD_MEM_NORMAL_MISCELLANEOUS
:
259 /* Allow SPM to change the attributes of the region. */
260 mmap
.granularity
= PAGE_SIZE
;
261 rd_base_pa
= spm_alloc_heap(rd_size
);
265 case RD_MEM_NORMAL_SPM_SP_SHARED_MEM
:
266 if ((sp_ctx
->spm_sp_buffer_base
!= 0) ||
267 (sp_ctx
->spm_sp_buffer_size
!= 0)) {
268 ERROR("A partition must have only one SPM<->SP buffer.\n");
271 rd_base_pa
= spm_alloc_heap(rd_size
);
273 /* Save location of this buffer, it is needed by SPM */
274 sp_ctx
->spm_sp_buffer_base
= rd_base_pa
;
275 sp_ctx
->spm_sp_buffer_size
= rd_size
;
278 case RD_MEM_NORMAL_CLIENT_SHARED_MEM
:
280 case RD_MEM_NORMAL_BSS
:
281 rd_base_pa
= spm_alloc_heap(rd_size
);
289 mmap
.base_pa
= rd_base_pa
;
290 mmap
.base_va
= rd_base_va
;
293 /* Only S-EL0 mappings supported for now */
294 mmap
.attr
= rdmem_attr_to_mmap_attr(rdmem
->attr
) | MT_USER
;
296 VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
297 mmap
.base_va
, mmap
.base_pa
, mmap
.size
, mmap
.attr
);
299 /* Map region in the context of the Secure Partition */
300 mmap_add_region_ctx(sp_ctx
->xlat_ctx_handle
, &mmap
);
302 if (zero_region
== 1) {
303 VERBOSE(" Zeroing region...\n");
305 rc
= mmap_add_dynamic_region(mmap
.base_pa
, mmap
.base_pa
,
306 mmap
.size
, MT_MEMORY
| MT_RW
| MT_SECURE
);
308 ERROR("Unable to map memory at EL3 to zero: %d\n",
313 zeromem((void *)mmap
.base_pa
, mmap
.size
);
316 * Unmap destination region unless it is the SPM<->SP buffer,
317 * which must be used by SPM.
319 if (memtype
!= RD_MEM_NORMAL_SPM_SP_SHARED_MEM
) {
320 rc
= mmap_remove_dynamic_region(rd_base_pa
, rd_size
);
322 ERROR("Unable to remove region at EL3: %d\n", rc
);
329 void sp_map_memory_regions(sp_context_t
*sp_ctx
)
331 struct sp_rd_sect_mem_region
*rdmem
;
333 for (rdmem
= sp_ctx
->rd
.mem_region
; rdmem
!= NULL
; rdmem
= rdmem
->next
) {
334 map_rdmem(sp_ctx
, rdmem
);
337 init_xlat_tables_ctx(sp_ctx
->xlat_ctx_handle
);