2 * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
4 * SPDX-License-Identifier: BSD-3-Clause
8 #include <arch_helpers.h>
13 #include <platform_def.h>
15 #include <lib/object_pool.h>
16 #include <lib/utils.h>
17 #include <lib/utils_def.h>
18 #include <lib/xlat_tables/xlat_tables_v2.h>
19 #include <plat/common/platform.h>
20 #include <services/sp_res_desc.h>
22 #include "spm_private.h"
23 #include "spm_shim_private.h"
25 /*******************************************************************************
26 * Instantiation of translation table context
27 ******************************************************************************/
29 /* Place translation tables by default along with the ones used by BL31. */
30 #ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
31 #define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
35 * Allocate elements of the translation contexts for the Secure Partitions.
38 /* Allocate an array of mmap_region per partition. */
39 static struct mmap_region sp_mmap_regions
[PLAT_SP_IMAGE_MMAP_REGIONS
+ 1]
40 [PLAT_SPM_MAX_PARTITIONS
];
41 static OBJECT_POOL(sp_mmap_regions_pool
, sp_mmap_regions
,
42 sizeof(mmap_region_t
) * (PLAT_SP_IMAGE_MMAP_REGIONS
+ 1),
43 PLAT_SPM_MAX_PARTITIONS
);
45 /* Allocate individual translation tables. */
46 static uint64_t sp_xlat_tables
[XLAT_TABLE_ENTRIES
]
47 [(PLAT_SP_IMAGE_MAX_XLAT_TABLES
+ 1) * PLAT_SPM_MAX_PARTITIONS
]
48 __aligned(XLAT_TABLE_SIZE
) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME
);
49 static OBJECT_POOL(sp_xlat_tables_pool
, sp_xlat_tables
,
50 XLAT_TABLE_ENTRIES
* sizeof(uint64_t),
51 (PLAT_SP_IMAGE_MAX_XLAT_TABLES
+ 1) * PLAT_SPM_MAX_PARTITIONS
);
53 /* Allocate base translation tables. */
54 static uint64_t sp_xlat_base_tables
55 [GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE
)]
56 [PLAT_SPM_MAX_PARTITIONS
]
57 __aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE
)
59 __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME
);
60 static OBJECT_POOL(sp_xlat_base_tables_pool
, sp_xlat_base_tables
,
61 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE
) * sizeof(uint64_t),
62 PLAT_SPM_MAX_PARTITIONS
);
64 /* Allocate arrays. */
65 static int sp_xlat_mapped_regions
[PLAT_SP_IMAGE_MAX_XLAT_TABLES
]
66 [PLAT_SPM_MAX_PARTITIONS
];
67 static OBJECT_POOL(sp_xlat_mapped_regions_pool
, sp_xlat_mapped_regions
,
68 sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES
, PLAT_SPM_MAX_PARTITIONS
);
70 /* Allocate individual contexts. */
71 static xlat_ctx_t sp_xlat_ctx
[PLAT_SPM_MAX_PARTITIONS
];
72 static OBJECT_POOL(sp_xlat_ctx_pool
, sp_xlat_ctx
, sizeof(xlat_ctx_t
),
73 PLAT_SPM_MAX_PARTITIONS
);
75 /* Get handle of Secure Partition translation context */
76 xlat_ctx_t
*spm_sp_xlat_context_alloc(void)
78 xlat_ctx_t
*ctx
= pool_alloc(&sp_xlat_ctx_pool
);
80 struct mmap_region
*mmap
= pool_alloc(&sp_mmap_regions_pool
);
82 uint64_t *base_table
= pool_alloc(&sp_xlat_base_tables_pool
);
83 uint64_t **tables
= pool_alloc_n(&sp_xlat_tables_pool
,
84 PLAT_SP_IMAGE_MAX_XLAT_TABLES
);
86 int *mapped_regions
= pool_alloc(&sp_xlat_mapped_regions_pool
);
88 xlat_setup_dynamic_ctx(ctx
, PLAT_PHY_ADDR_SPACE_SIZE
- 1,
89 PLAT_VIRT_ADDR_SPACE_SIZE
- 1, mmap
,
90 PLAT_SP_IMAGE_MMAP_REGIONS
, tables
,
91 PLAT_SP_IMAGE_MAX_XLAT_TABLES
, base_table
,
92 EL1_EL0_REGIME
, mapped_regions
);
97 /*******************************************************************************
98 * Functions to allocate memory for regions.
99 ******************************************************************************/
102 * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
103 * reserved for SPM to use as heap to allocate memory regions of Secure
104 * Partitions. This is only done at boot.
106 static OBJECT_POOL(spm_heap_mem
, (void *)PLAT_SPM_HEAP_BASE
, 1U,
109 static uintptr_t spm_alloc_heap(size_t size
)
111 return (uintptr_t)pool_alloc_n(&spm_heap_mem
, size
);
114 /*******************************************************************************
115 * Functions to map memory regions described in the resource description.
116 ******************************************************************************/
117 static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr
)
119 unsigned int index
= attr
& RD_MEM_MASK
;
121 const unsigned int mmap_attr_arr
[8] = {
122 MT_DEVICE
| MT_RW
| MT_SECURE
, /* RD_MEM_DEVICE */
123 MT_CODE
| MT_SECURE
, /* RD_MEM_NORMAL_CODE */
124 MT_MEMORY
| MT_RW
| MT_SECURE
, /* RD_MEM_NORMAL_DATA */
125 MT_MEMORY
| MT_RW
| MT_SECURE
, /* RD_MEM_NORMAL_BSS */
126 MT_RO_DATA
| MT_SECURE
, /* RD_MEM_NORMAL_RODATA */
127 MT_MEMORY
| MT_RW
| MT_SECURE
, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
128 MT_MEMORY
| MT_RW
| MT_SECURE
, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
129 MT_MEMORY
| MT_RW
| MT_SECURE
/* RD_MEM_NORMAL_MISCELLANEOUS */
132 if (index
>= ARRAY_SIZE(mmap_attr_arr
)) {
133 ERROR("Unsupported RD memory attributes 0x%x\n", attr
);
137 return mmap_attr_arr
[index
];
141 * The data provided in the resource description structure is not directly
142 * compatible with a mmap_region structure. This function handles the conversion
145 static void map_rdmem(sp_context_t
*sp_ctx
, struct sp_rd_sect_mem_region
*rdmem
)
150 /* Location of the SP image */
151 uintptr_t sp_size
= sp_ctx
->image_size
;
152 uintptr_t sp_base_va
= sp_ctx
->rd
.attribute
.load_address
;
153 unsigned long long sp_base_pa
= sp_ctx
->image_base
;
155 /* Location of the memory region to map */
156 size_t rd_size
= rdmem
->size
;
157 uintptr_t rd_base_va
= rdmem
->base
;
158 unsigned long long rd_base_pa
;
160 unsigned int memtype
= rdmem
->attr
& RD_MEM_MASK
;
163 VERBOSE("Memory region '%s' is empty. Ignored.\n", rdmem
->name
);
167 VERBOSE("Adding memory region '%s'\n", rdmem
->name
);
169 mmap
.granularity
= REGION_DEFAULT_GRANULARITY
;
171 /* Check if the RD region is inside of the SP image or not */
172 int is_outside
= (rd_base_va
+ rd_size
<= sp_base_va
) ||
173 (sp_base_va
+ sp_size
<= rd_base_va
);
175 /* Set to 1 if it is needed to zero this region */
180 /* Device regions are mapped 1:1 */
181 rd_base_pa
= rd_base_va
;
184 case RD_MEM_NORMAL_CODE
:
185 case RD_MEM_NORMAL_RODATA
:
187 if (is_outside
== 1) {
188 ERROR("Code and rodata sections must be fully contained in the image.");
192 /* Get offset into the image */
193 rd_base_pa
= sp_base_pa
+ rd_base_va
- sp_base_va
;
196 case RD_MEM_NORMAL_DATA
:
198 if (is_outside
== 1) {
199 ERROR("Data sections must be fully contained in the image.");
203 rd_base_pa
= spm_alloc_heap(rd_size
);
205 /* Get offset into the image */
206 void *img_pa
= (void *)(sp_base_pa
+ rd_base_va
- sp_base_va
);
208 VERBOSE(" Copying data from %p to 0x%llx\n", img_pa
, rd_base_pa
);
210 /* Map destination */
211 rc
= mmap_add_dynamic_region(rd_base_pa
, rd_base_pa
,
212 rd_size
, MT_MEMORY
| MT_RW
| MT_SECURE
);
214 ERROR("Unable to map data region at EL3: %d\n", rc
);
218 /* Copy original data to destination */
219 memcpy((void *)rd_base_pa
, img_pa
, rd_size
);
221 /* Unmap destination region */
222 rc
= mmap_remove_dynamic_region(rd_base_pa
, rd_size
);
224 ERROR("Unable to remove data region at EL3: %d\n", rc
);
230 case RD_MEM_NORMAL_MISCELLANEOUS
:
231 /* Allow SPM to change the attributes of the region. */
232 mmap
.granularity
= PAGE_SIZE
;
233 rd_base_pa
= spm_alloc_heap(rd_size
);
237 case RD_MEM_NORMAL_SPM_SP_SHARED_MEM
:
238 if ((sp_ctx
->spm_sp_buffer_base
!= 0) ||
239 (sp_ctx
->spm_sp_buffer_size
!= 0)) {
240 ERROR("A partition must have only one SPM<->SP buffer.\n");
243 rd_base_pa
= spm_alloc_heap(rd_size
);
245 /* Save location of this buffer, it is needed by SPM */
246 sp_ctx
->spm_sp_buffer_base
= rd_base_pa
;
247 sp_ctx
->spm_sp_buffer_size
= rd_size
;
250 case RD_MEM_NORMAL_CLIENT_SHARED_MEM
:
252 case RD_MEM_NORMAL_BSS
:
253 rd_base_pa
= spm_alloc_heap(rd_size
);
261 mmap
.base_pa
= rd_base_pa
;
262 mmap
.base_va
= rd_base_va
;
265 /* Only S-EL0 mappings supported for now */
266 mmap
.attr
= rdmem_attr_to_mmap_attr(rdmem
->attr
) | MT_USER
;
268 VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
269 mmap
.base_va
, mmap
.base_pa
, mmap
.size
, mmap
.attr
);
271 /* Map region in the context of the Secure Partition */
272 mmap_add_region_ctx(sp_ctx
->xlat_ctx_handle
, &mmap
);
274 if (zero_region
== 1) {
275 VERBOSE(" Zeroing region...\n");
277 rc
= mmap_add_dynamic_region(mmap
.base_pa
, mmap
.base_pa
,
278 mmap
.size
, MT_MEMORY
| MT_RW
| MT_SECURE
);
280 ERROR("Unable to map memory at EL3 to zero: %d\n",
285 zeromem((void *)mmap
.base_pa
, mmap
.size
);
288 * Unmap destination region unless it is the SPM<->SP buffer,
289 * which must be used by SPM.
291 if (memtype
!= RD_MEM_NORMAL_SPM_SP_SHARED_MEM
) {
292 rc
= mmap_remove_dynamic_region(rd_base_pa
, rd_size
);
294 ERROR("Unable to remove region at EL3: %d\n", rc
);
301 void sp_map_memory_regions(sp_context_t
*sp_ctx
)
303 /* This region contains the exception vectors used at S-EL1. */
304 const mmap_region_t sel1_exception_vectors
=
305 MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START
,
306 SPM_SHIM_EXCEPTIONS_SIZE
,
307 MT_CODE
| MT_SECURE
| MT_PRIVILEGED
);
309 mmap_add_region_ctx(sp_ctx
->xlat_ctx_handle
,
310 &sel1_exception_vectors
);
312 struct sp_rd_sect_mem_region
*rdmem
;
314 for (rdmem
= sp_ctx
->rd
.mem_region
; rdmem
!= NULL
; rdmem
= rdmem
->next
) {
315 map_rdmem(sp_ctx
, rdmem
);
318 init_xlat_tables_ctx(sp_ctx
->xlat_ctx_handle
);