SPM: Move shim layer to TTBR1_EL1
[project/bcm63xx/atf.git] / services / std_svc / spm / spm_xlat.c
1 /*
2 * Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <arch.h>
8 #include <arch_helpers.h>
9 #include <assert.h>
10 #include <errno.h>
11 #include <string.h>
12
13 #include <platform_def.h>
14
15 #include <lib/object_pool.h>
16 #include <lib/utils.h>
17 #include <lib/utils_def.h>
18 #include <lib/xlat_tables/xlat_tables_v2.h>
19 #include <plat/common/platform.h>
20 #include <services/sp_res_desc.h>
21
22 #include "spm_private.h"
23 #include "spm_shim_private.h"
24
25 /*******************************************************************************
26 * Instantiation of translation table context
27 ******************************************************************************/
28
29 /* Place translation tables by default along with the ones used by BL31. */
30 #ifndef PLAT_SP_IMAGE_XLAT_SECTION_NAME
31 #define PLAT_SP_IMAGE_XLAT_SECTION_NAME "xlat_table"
32 #endif
33
34 /*
35 * Allocate elements of the translation contexts for the Secure Partitions.
36 */
37
38 /* Allocate an array of mmap_region per partition. */
39 static struct mmap_region sp_mmap_regions[PLAT_SP_IMAGE_MMAP_REGIONS + 1]
40 [PLAT_SPM_MAX_PARTITIONS];
41 static OBJECT_POOL(sp_mmap_regions_pool, sp_mmap_regions,
42 sizeof(mmap_region_t) * (PLAT_SP_IMAGE_MMAP_REGIONS + 1),
43 PLAT_SPM_MAX_PARTITIONS);
44
45 /* Allocate individual translation tables. */
46 static uint64_t sp_xlat_tables[XLAT_TABLE_ENTRIES]
47 [(PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS]
48 __aligned(XLAT_TABLE_SIZE) __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
49 static OBJECT_POOL(sp_xlat_tables_pool, sp_xlat_tables,
50 XLAT_TABLE_ENTRIES * sizeof(uint64_t),
51 (PLAT_SP_IMAGE_MAX_XLAT_TABLES + 1) * PLAT_SPM_MAX_PARTITIONS);
52
53 /* Allocate base translation tables. */
54 static uint64_t sp_xlat_base_tables
55 [GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)]
56 [PLAT_SPM_MAX_PARTITIONS]
57 __aligned(GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
58 * sizeof(uint64_t))
59 __section(PLAT_SP_IMAGE_XLAT_SECTION_NAME);
60 static OBJECT_POOL(sp_xlat_base_tables_pool, sp_xlat_base_tables,
61 GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE) * sizeof(uint64_t),
62 PLAT_SPM_MAX_PARTITIONS);
63
64 /* Allocate arrays. */
65 static int sp_xlat_mapped_regions[PLAT_SP_IMAGE_MAX_XLAT_TABLES]
66 [PLAT_SPM_MAX_PARTITIONS];
67 static OBJECT_POOL(sp_xlat_mapped_regions_pool, sp_xlat_mapped_regions,
68 sizeof(int) * PLAT_SP_IMAGE_MAX_XLAT_TABLES, PLAT_SPM_MAX_PARTITIONS);
69
70 /* Allocate individual contexts. */
71 static xlat_ctx_t sp_xlat_ctx[PLAT_SPM_MAX_PARTITIONS];
72 static OBJECT_POOL(sp_xlat_ctx_pool, sp_xlat_ctx, sizeof(xlat_ctx_t),
73 PLAT_SPM_MAX_PARTITIONS);
74
75 /* Get handle of Secure Partition translation context */
76 xlat_ctx_t *spm_sp_xlat_context_alloc(void)
77 {
78 xlat_ctx_t *ctx = pool_alloc(&sp_xlat_ctx_pool);
79
80 struct mmap_region *mmap = pool_alloc(&sp_mmap_regions_pool);
81
82 uint64_t *base_table = pool_alloc(&sp_xlat_base_tables_pool);
83 uint64_t **tables = pool_alloc_n(&sp_xlat_tables_pool,
84 PLAT_SP_IMAGE_MAX_XLAT_TABLES);
85
86 int *mapped_regions = pool_alloc(&sp_xlat_mapped_regions_pool);
87
88 xlat_setup_dynamic_ctx(ctx, PLAT_PHY_ADDR_SPACE_SIZE - 1,
89 PLAT_VIRT_ADDR_SPACE_SIZE - 1, mmap,
90 PLAT_SP_IMAGE_MMAP_REGIONS, tables,
91 PLAT_SP_IMAGE_MAX_XLAT_TABLES, base_table,
92 EL1_EL0_REGIME, mapped_regions);
93
94 return ctx;
95 };
96
97 /*******************************************************************************
98 * Translation table context used for S-EL1 exception vectors
99 ******************************************************************************/
100
101 REGISTER_XLAT_CONTEXT2(spm_sel1, SPM_SHIM_MMAP_REGIONS, SPM_SHIM_XLAT_TABLES,
102 SPM_SHIM_XLAT_VIRT_ADDR_SPACE_SIZE, PLAT_PHY_ADDR_SPACE_SIZE,
103 EL1_EL0_REGIME, PLAT_SP_IMAGE_XLAT_SECTION_NAME);
104
105 void spm_exceptions_xlat_init_context(void)
106 {
107 /* This region contains the exception vectors used at S-EL1. */
108 mmap_region_t sel1_exception_vectors =
109 MAP_REGION(SPM_SHIM_EXCEPTIONS_PTR,
110 0x0UL,
111 SPM_SHIM_EXCEPTIONS_SIZE,
112 MT_CODE | MT_SECURE | MT_PRIVILEGED);
113
114 mmap_add_region_ctx(&spm_sel1_xlat_ctx,
115 &sel1_exception_vectors);
116
117 init_xlat_tables_ctx(&spm_sel1_xlat_ctx);
118 }
119
120 uint64_t *spm_exceptions_xlat_get_base_table(void)
121 {
122 return spm_sel1_xlat_ctx.base_table;
123 }
124
125 /*******************************************************************************
126 * Functions to allocate memory for regions.
127 ******************************************************************************/
128
129 /*
130 * The region with base PLAT_SPM_HEAP_BASE and size PLAT_SPM_HEAP_SIZE is
131 * reserved for SPM to use as heap to allocate memory regions of Secure
132 * Partitions. This is only done at boot.
133 */
134 static OBJECT_POOL(spm_heap_mem, (void *)PLAT_SPM_HEAP_BASE, 1U,
135 PLAT_SPM_HEAP_SIZE);
136
137 static uintptr_t spm_alloc_heap(size_t size)
138 {
139 return (uintptr_t)pool_alloc_n(&spm_heap_mem, size);
140 }
141
142 /*******************************************************************************
143 * Functions to map memory regions described in the resource description.
144 ******************************************************************************/
145 static unsigned int rdmem_attr_to_mmap_attr(uint32_t attr)
146 {
147 unsigned int index = attr & RD_MEM_MASK;
148
149 const unsigned int mmap_attr_arr[8] = {
150 MT_DEVICE | MT_RW | MT_SECURE, /* RD_MEM_DEVICE */
151 MT_CODE | MT_SECURE, /* RD_MEM_NORMAL_CODE */
152 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_DATA */
153 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_BSS */
154 MT_RO_DATA | MT_SECURE, /* RD_MEM_NORMAL_RODATA */
155 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_SPM_SP_SHARED_MEM */
156 MT_MEMORY | MT_RW | MT_SECURE, /* RD_MEM_NORMAL_CLIENT_SHARED_MEM */
157 MT_MEMORY | MT_RW | MT_SECURE /* RD_MEM_NORMAL_MISCELLANEOUS */
158 };
159
160 if (index >= ARRAY_SIZE(mmap_attr_arr)) {
161 ERROR("Unsupported RD memory attributes 0x%x\n", attr);
162 panic();
163 }
164
165 return mmap_attr_arr[index];
166 }
167
168 /*
169 * The data provided in the resource description structure is not directly
170 * compatible with a mmap_region structure. This function handles the conversion
171 * and maps it.
172 */
173 static void map_rdmem(sp_context_t *sp_ctx, struct sp_rd_sect_mem_region *rdmem)
174 {
175 int rc;
176 mmap_region_t mmap;
177
178 /* Location of the SP image */
179 uintptr_t sp_size = sp_ctx->image_size;
180 uintptr_t sp_base_va = sp_ctx->rd.attribute.load_address;
181 unsigned long long sp_base_pa = sp_ctx->image_base;
182
183 /* Location of the memory region to map */
184 size_t rd_size = rdmem->size;
185 uintptr_t rd_base_va = rdmem->base;
186 unsigned long long rd_base_pa;
187
188 unsigned int memtype = rdmem->attr & RD_MEM_MASK;
189
190 if (rd_size == 0U) {
191 VERBOSE("Memory region '%s' is empty. Ignored.\n", rdmem->name);
192 return;
193 }
194
195 VERBOSE("Adding memory region '%s'\n", rdmem->name);
196
197 mmap.granularity = REGION_DEFAULT_GRANULARITY;
198
199 /* Check if the RD region is inside of the SP image or not */
200 int is_outside = (rd_base_va + rd_size <= sp_base_va) ||
201 (sp_base_va + sp_size <= rd_base_va);
202
203 /* Set to 1 if it is needed to zero this region */
204 int zero_region = 0;
205
206 switch (memtype) {
207 case RD_MEM_DEVICE:
208 /* Device regions are mapped 1:1 */
209 rd_base_pa = rd_base_va;
210 break;
211
212 case RD_MEM_NORMAL_CODE:
213 case RD_MEM_NORMAL_RODATA:
214 {
215 if (is_outside == 1) {
216 ERROR("Code and rodata sections must be fully contained in the image.");
217 panic();
218 }
219
220 /* Get offset into the image */
221 rd_base_pa = sp_base_pa + rd_base_va - sp_base_va;
222 break;
223 }
224 case RD_MEM_NORMAL_DATA:
225 {
226 if (is_outside == 1) {
227 ERROR("Data sections must be fully contained in the image.");
228 panic();
229 }
230
231 rd_base_pa = spm_alloc_heap(rd_size);
232
233 /* Get offset into the image */
234 void *img_pa = (void *)(sp_base_pa + rd_base_va - sp_base_va);
235
236 VERBOSE(" Copying data from %p to 0x%llx\n", img_pa, rd_base_pa);
237
238 /* Map destination */
239 rc = mmap_add_dynamic_region(rd_base_pa, rd_base_pa,
240 rd_size, MT_MEMORY | MT_RW | MT_SECURE);
241 if (rc != 0) {
242 ERROR("Unable to map data region at EL3: %d\n", rc);
243 panic();
244 }
245
246 /* Copy original data to destination */
247 memcpy((void *)rd_base_pa, img_pa, rd_size);
248
249 /* Unmap destination region */
250 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
251 if (rc != 0) {
252 ERROR("Unable to remove data region at EL3: %d\n", rc);
253 panic();
254 }
255
256 break;
257 }
258 case RD_MEM_NORMAL_MISCELLANEOUS:
259 /* Allow SPM to change the attributes of the region. */
260 mmap.granularity = PAGE_SIZE;
261 rd_base_pa = spm_alloc_heap(rd_size);
262 zero_region = 1;
263 break;
264
265 case RD_MEM_NORMAL_SPM_SP_SHARED_MEM:
266 if ((sp_ctx->spm_sp_buffer_base != 0) ||
267 (sp_ctx->spm_sp_buffer_size != 0)) {
268 ERROR("A partition must have only one SPM<->SP buffer.\n");
269 panic();
270 }
271 rd_base_pa = spm_alloc_heap(rd_size);
272 zero_region = 1;
273 /* Save location of this buffer, it is needed by SPM */
274 sp_ctx->spm_sp_buffer_base = rd_base_pa;
275 sp_ctx->spm_sp_buffer_size = rd_size;
276 break;
277
278 case RD_MEM_NORMAL_CLIENT_SHARED_MEM:
279 /* Fallthrough */
280 case RD_MEM_NORMAL_BSS:
281 rd_base_pa = spm_alloc_heap(rd_size);
282 zero_region = 1;
283 break;
284
285 default:
286 panic();
287 }
288
289 mmap.base_pa = rd_base_pa;
290 mmap.base_va = rd_base_va;
291 mmap.size = rd_size;
292
293 /* Only S-EL0 mappings supported for now */
294 mmap.attr = rdmem_attr_to_mmap_attr(rdmem->attr) | MT_USER;
295
296 VERBOSE(" VA: 0x%lx PA: 0x%llx (0x%lx, attr: 0x%x)\n",
297 mmap.base_va, mmap.base_pa, mmap.size, mmap.attr);
298
299 /* Map region in the context of the Secure Partition */
300 mmap_add_region_ctx(sp_ctx->xlat_ctx_handle, &mmap);
301
302 if (zero_region == 1) {
303 VERBOSE(" Zeroing region...\n");
304
305 rc = mmap_add_dynamic_region(mmap.base_pa, mmap.base_pa,
306 mmap.size, MT_MEMORY | MT_RW | MT_SECURE);
307 if (rc != 0) {
308 ERROR("Unable to map memory at EL3 to zero: %d\n",
309 rc);
310 panic();
311 }
312
313 zeromem((void *)mmap.base_pa, mmap.size);
314
315 /*
316 * Unmap destination region unless it is the SPM<->SP buffer,
317 * which must be used by SPM.
318 */
319 if (memtype != RD_MEM_NORMAL_SPM_SP_SHARED_MEM) {
320 rc = mmap_remove_dynamic_region(rd_base_pa, rd_size);
321 if (rc != 0) {
322 ERROR("Unable to remove region at EL3: %d\n", rc);
323 panic();
324 }
325 }
326 }
327 }
328
329 void sp_map_memory_regions(sp_context_t *sp_ctx)
330 {
331 struct sp_rd_sect_mem_region *rdmem;
332
333 for (rdmem = sp_ctx->rd.mem_region; rdmem != NULL; rdmem = rdmem->next) {
334 map_rdmem(sp_ctx, rdmem);
335 }
336
337 init_xlat_tables_ctx(sp_ctx->xlat_ctx_handle);
338 }