2 * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
3 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 * SPDX-License-Identifier: BSD-3-Clause
8 #include <arch_helpers.h>
10 #include <common/debug.h>
11 #include <drivers/delay_timer.h>
14 #include <lib/psci/psci.h>
15 #include <se_private.h>
16 #include <security_engine.h>
17 #include <tegra_platform.h>
19 /*******************************************************************************
20 * Constants and Macros
21 ******************************************************************************/
23 #define TIMEOUT_100MS 100U // Timeout in 100ms
24 #define RNG_AES_KEY_INDEX 1
26 /*******************************************************************************
27 * Data structure and global variables
28 ******************************************************************************/
30 /* The security engine contexts are formatted as follows:
33 * #--------------------------------#
34 * | Random Data 1 Block |
35 * #--------------------------------#
36 * | Sticky Bits 2 Blocks |
37 * #--------------------------------#
38 * | Key Table 64 Blocks |
39 * | For each Key (x16): |
41 * | Original-IV: 1 Block |
42 * | Updated-IV: 1 Block |
43 * #--------------------------------#
44 * | RSA Keys 64 Blocks |
45 * #--------------------------------#
46 * | Known Pattern 1 Block |
47 * #--------------------------------#
50 * #--------------------------------#
51 * | Random Data 1 Block |
52 * #--------------------------------#
53 * | Sticky Bits 2 Blocks |
54 * #--------------------------------#
55 * | Key Table 64 Blocks |
56 * | For each Key (x16): |
58 * | Original-IV: 1 Block |
59 * | Updated-IV: 1 Block |
60 * #--------------------------------#
61 * | RSA Keys 64 Blocks |
62 * #--------------------------------#
63 * | PKA sticky bits 1 Block |
64 * #--------------------------------#
65 * | PKA keys 512 Blocks |
66 * #--------------------------------#
67 * | Known Pattern 1 Block |
68 * #--------------------------------#
71 /* Known pattern data */
72 static const uint32_t se_ctx_known_pattern_data
[SE_CTX_KNOWN_PATTERN_SIZE_WORDS
] = {
73 /* 128 bit AES block */
80 /* SE input and output linked list buffers */
81 static tegra_se_io_lst_t se1_src_ll_buf
;
82 static tegra_se_io_lst_t se1_dst_ll_buf
;
84 /* SE2 input and output linked list buffers */
85 static tegra_se_io_lst_t se2_src_ll_buf
;
86 static tegra_se_io_lst_t se2_dst_ll_buf
;
88 /* SE1 security engine device handle */
89 static tegra_se_dev_t se_dev_1
= {
91 /* Setup base address for se */
92 .se_base
= TEGRA_SE1_BASE
,
93 /* Setup context size in AES blocks */
94 .ctx_size_blks
= SE_CTX_SAVE_SIZE_BLOCKS_SE1
,
95 /* Setup SRC buffers for SE operations */
96 .src_ll_buf
= &se1_src_ll_buf
,
97 /* Setup DST buffers for SE operations */
98 .dst_ll_buf
= &se1_dst_ll_buf
,
99 /* Setup context save destination */
100 .ctx_save_buf
= (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE
),
103 /* SE2 security engine device handle */
104 static tegra_se_dev_t se_dev_2
= {
106 /* Setup base address for se */
107 .se_base
= TEGRA_SE2_BASE
,
108 /* Setup context size in AES blocks */
109 .ctx_size_blks
= SE_CTX_SAVE_SIZE_BLOCKS_SE2
,
110 /* Setup SRC buffers for SE operations */
111 .src_ll_buf
= &se2_src_ll_buf
,
112 /* Setup DST buffers for SE operations */
113 .dst_ll_buf
= &se2_dst_ll_buf
,
114 /* Setup context save destination */
115 .ctx_save_buf
= (uint32_t *)(TEGRA_TZRAM_CARVEOUT_BASE
+ 0x1000),
118 static bool ecid_valid
;
120 /*******************************************************************************
121 * Functions Definition
122 ******************************************************************************/
124 static void tegra_se_make_data_coherent(const tegra_se_dev_t
*se_dev
)
126 flush_dcache_range(((uint64_t)(se_dev
->src_ll_buf
)),
127 sizeof(tegra_se_io_lst_t
));
128 flush_dcache_range(((uint64_t)(se_dev
->dst_ll_buf
)),
129 sizeof(tegra_se_io_lst_t
));
133 * Check that SE operation has completed after kickoff
134 * This function is invoked after an SE operation has been started,
135 * and it checks the following conditions:
136 * 1. SE_INT_STATUS = SE_OP_DONE
137 * 2. SE_STATUS = IDLE
138 * 3. AHB bus data transfer complete.
139 * 4. SE_ERR_STATUS is clean.
141 static int32_t tegra_se_operation_complete(const tegra_se_dev_t
*se_dev
)
147 /* Poll the SE interrupt register to ensure H/W operation complete */
148 val
= tegra_se_read_32(se_dev
, SE_INT_STATUS_REG_OFFSET
);
149 for (timeout
= 0; (SE_INT_OP_DONE(val
) == SE_INT_OP_DONE_CLEAR
) &&
150 (timeout
< TIMEOUT_100MS
); timeout
++) {
152 val
= tegra_se_read_32(se_dev
, SE_INT_STATUS_REG_OFFSET
);
155 if (timeout
== TIMEOUT_100MS
) {
156 ERROR("%s: ERR: Atomic context save operation timeout!\n",
161 /* Poll the SE status idle to ensure H/W operation complete */
163 val
= tegra_se_read_32(se_dev
, SE_STATUS_OFFSET
);
164 for (timeout
= 0; (val
!= 0U) && (timeout
< TIMEOUT_100MS
);
167 val
= tegra_se_read_32(se_dev
, SE_STATUS_OFFSET
);
170 if (timeout
== TIMEOUT_100MS
) {
171 ERROR("%s: ERR: MEM_INTERFACE and SE state "
172 "idle state timeout.\n", __func__
);
177 /* Check AHB bus transfer complete */
179 val
= mmio_read_32(TEGRA_AHB_ARB_BASE
+ ARAHB_MEM_WRQUE_MST_ID_OFFSET
);
180 for (timeout
= 0; ((val
& (ARAHB_MST_ID_SE_MASK
| ARAHB_MST_ID_SE2_MASK
)) != 0U) &&
181 (timeout
< TIMEOUT_100MS
); timeout
++) {
183 val
= mmio_read_32(TEGRA_AHB_ARB_BASE
+ ARAHB_MEM_WRQUE_MST_ID_OFFSET
);
186 if (timeout
== TIMEOUT_100MS
) {
187 ERROR("%s: SE write over AHB timeout.\n", __func__
);
192 /* Ensure that no errors are thrown during operation */
194 val
= tegra_se_read_32(se_dev
, SE_ERR_STATUS_REG_OFFSET
);
196 ERROR("%s: error during SE operation! 0x%x", __func__
, val
);
205 * Returns true if the SE engine is configured to perform SE context save in
208 static inline bool tegra_se_atomic_save_enabled(const tegra_se_dev_t
*se_dev
)
212 val
= tegra_se_read_32(se_dev
, SE_CTX_SAVE_AUTO_REG_OFFSET
);
213 return (SE_CTX_SAVE_AUTO_ENABLE(val
) == SE_CTX_SAVE_AUTO_EN
);
217 * Wait for SE engine to be idle and clear pending interrupts before
218 * starting the next SE operation.
220 static int32_t tegra_se_operation_prepare(const tegra_se_dev_t
*se_dev
)
226 /* Wait for previous operation to finish */
227 val
= tegra_se_read_32(se_dev
, SE_STATUS_OFFSET
);
228 for (timeout
= 0; (val
!= 0U) && (timeout
< TIMEOUT_100MS
); timeout
++) {
230 val
= tegra_se_read_32(se_dev
, SE_STATUS_OFFSET
);
233 if (timeout
== TIMEOUT_100MS
) {
234 ERROR("%s: ERR: SE status is not idle!\n", __func__
);
238 /* Clear any pending interrupts from previous operation */
239 val
= tegra_se_read_32(se_dev
, SE_INT_STATUS_REG_OFFSET
);
240 tegra_se_write_32(se_dev
, SE_INT_STATUS_REG_OFFSET
, val
);
245 * SE atomic context save. At SC7 entry, SE driver triggers the
246 * hardware automatically performs the context save operation.
248 static int32_t tegra_se_context_save_atomic(const tegra_se_dev_t
*se_dev
)
252 uint32_t blk_count_limit
= 0;
253 uint32_t block_count
;
255 /* Check that previous operation is finalized */
256 ret
= tegra_se_operation_prepare(se_dev
);
258 /* Read the context save progress counter: block_count
259 * Ensure no previous context save has been triggered
260 * SE_CTX_SAVE_AUTO.CURR_CNT == 0
263 val
= tegra_se_read_32(se_dev
, SE_CTX_SAVE_AUTO_REG_OFFSET
);
264 block_count
= SE_CTX_SAVE_GET_BLK_COUNT(val
);
265 if (block_count
!= 0U) {
266 ERROR("%s: ctx_save triggered multiple times\n",
272 /* Set the destination block count when the context save complete */
274 blk_count_limit
= block_count
+ se_dev
->ctx_size_blks
;
277 /* Program SE_CONFIG register as for RNG operation
278 * SE_CONFIG.ENC_ALG = RNG
279 * SE_CONFIG.DEC_ALG = NOP
280 * SE_CONFIG.ENC_MODE is ignored
281 * SE_CONFIG.DEC_MODE is ignored
282 * SE_CONFIG.DST = MEMORY
285 val
= (SE_CONFIG_ENC_ALG_RNG
|
286 SE_CONFIG_DEC_ALG_NOP
|
287 SE_CONFIG_DST_MEMORY
);
288 tegra_se_write_32(se_dev
, SE_CONFIG_REG_OFFSET
, val
);
290 tegra_se_make_data_coherent(se_dev
);
292 /* SE_CTX_SAVE operation */
293 tegra_se_write_32(se_dev
, SE_OPERATION_REG_OFFSET
,
296 ret
= tegra_se_operation_complete(se_dev
);
299 /* Check that context has written the correct number of blocks */
301 val
= tegra_se_read_32(se_dev
, SE_CTX_SAVE_AUTO_REG_OFFSET
);
302 if (SE_CTX_SAVE_GET_BLK_COUNT(val
) != blk_count_limit
) {
303 ERROR("%s: expected %d blocks but %d were written\n",
304 __func__
, blk_count_limit
, val
);
313 * Security engine primitive operations, including normal operation
314 * and the context save operation.
316 static int tegra_se_perform_operation(const tegra_se_dev_t
*se_dev
, uint32_t nbytes
,
319 uint32_t nblocks
= nbytes
/ TEGRA_SE_AES_BLOCK_SIZE
;
324 /* Use device buffers for in and out */
325 tegra_se_write_32(se_dev
, SE_OUT_LL_ADDR_REG_OFFSET
, ((uint64_t)(se_dev
->dst_ll_buf
)));
326 tegra_se_write_32(se_dev
, SE_IN_LL_ADDR_REG_OFFSET
, ((uint64_t)(se_dev
->src_ll_buf
)));
328 /* Check that previous operation is finalized */
329 ret
= tegra_se_operation_prepare(se_dev
);
334 /* Program SE operation size */
336 tegra_se_write_32(se_dev
, SE_BLOCK_COUNT_REG_OFFSET
, nblocks
- 1);
339 /* Make SE LL data coherent before the SE operation */
340 tegra_se_make_data_coherent(se_dev
);
342 /* Start hardware operation */
344 tegra_se_write_32(se_dev
, SE_OPERATION_REG_OFFSET
, SE_OP_CTX_SAVE
);
346 tegra_se_write_32(se_dev
, SE_OPERATION_REG_OFFSET
, SE_OP_START
);
348 /* Wait for operation to finish */
349 ret
= tegra_se_operation_complete(se_dev
);
356 * Normal security engine operations other than the context save
358 int tegra_se_start_normal_operation(const tegra_se_dev_t
*se_dev
, uint32_t nbytes
)
360 return tegra_se_perform_operation(se_dev
, nbytes
, false);
364 * Security engine context save operation
366 int tegra_se_start_ctx_save_operation(const tegra_se_dev_t
*se_dev
, uint32_t nbytes
)
368 return tegra_se_perform_operation(se_dev
, nbytes
, true);
372 * Security Engine sequence to generat SRK
373 * SE and SE2 will generate different SRK by different
376 static int tegra_se_generate_srk(const tegra_se_dev_t
*se_dev
)
378 int ret
= PSCI_E_INTERN_FAIL
;
381 /* Confgure the following hardware register settings:
382 * SE_CONFIG.DEC_ALG = NOP
383 * SE_CONFIG.ENC_ALG = RNG
384 * SE_CONFIG.DST = SRK
385 * SE_OPERATION.OP = START
386 * SE_CRYPTO_LAST_BLOCK = 0
388 se_dev
->src_ll_buf
->last_buff_num
= 0;
389 se_dev
->dst_ll_buf
->last_buff_num
= 0;
391 /* Configure random number generator */
393 val
= (DRBG_MODE_FORCE_INSTANTION
| DRBG_SRC_ENTROPY
);
395 val
= (DRBG_MODE_FORCE_RESEED
| DRBG_SRC_ENTROPY
);
396 tegra_se_write_32(se_dev
, SE_RNG_CONFIG_REG_OFFSET
, val
);
398 /* Configure output destination = SRK */
399 val
= (SE_CONFIG_ENC_ALG_RNG
|
400 SE_CONFIG_DEC_ALG_NOP
|
402 tegra_se_write_32(se_dev
, SE_CONFIG_REG_OFFSET
, val
);
404 /* Perform hardware operation */
405 ret
= tegra_se_start_normal_operation(se_dev
, 0);
411 * Generate plain text random data to some memory location using
412 * SE/SE2's SP800-90 random number generator. The random data size
413 * must be some multiple of the AES block size (16 bytes).
415 static int tegra_se_lp_generate_random_data(tegra_se_dev_t
*se_dev
)
420 /* Set some arbitrary memory location to store the random data */
421 se_dev
->dst_ll_buf
->last_buff_num
= 0;
422 if (!se_dev
->ctx_save_buf
) {
423 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__
);
424 return PSCI_E_NOT_PRESENT
;
426 se_dev
->dst_ll_buf
->buffer
[0].addr
= ((uint64_t)(&(((tegra_se_context_t
*)
427 se_dev
->ctx_save_buf
)->rand_data
)));
428 se_dev
->dst_ll_buf
->buffer
[0].data_len
= SE_CTX_SAVE_RANDOM_DATA_SIZE
;
431 /* Confgure the following hardware register settings:
432 * SE_CONFIG.DEC_ALG = NOP
433 * SE_CONFIG.ENC_ALG = RNG
434 * SE_CONFIG.ENC_MODE = KEY192
435 * SE_CONFIG.DST = MEMORY
437 val
= (SE_CONFIG_ENC_ALG_RNG
|
438 SE_CONFIG_DEC_ALG_NOP
|
439 SE_CONFIG_ENC_MODE_KEY192
|
440 SE_CONFIG_DST_MEMORY
);
441 tegra_se_write_32(se_dev
, SE_CONFIG_REG_OFFSET
, val
);
443 /* Program the RNG options in SE_CRYPTO_CONFIG as follows:
445 * INPUT_SEL = RANDOM (Entropy or LFSR)
448 val
= (SE_CRYPTO_INPUT_RANDOM
|
449 SE_CRYPTO_XOR_BYPASS
|
450 SE_CRYPTO_CORE_ENCRYPT
|
451 SE_CRYPTO_HASH_DISABLE
|
452 SE_CRYPTO_KEY_INDEX(RNG_AES_KEY_INDEX
) |
453 SE_CRYPTO_IV_ORIGINAL
);
454 tegra_se_write_32(se_dev
, SE_CRYPTO_REG_OFFSET
, val
);
458 val
= (DRBG_MODE_FORCE_INSTANTION
| DRBG_SRC_LFSR
);
460 val
= (DRBG_MODE_FORCE_RESEED
| DRBG_SRC_LFSR
);
461 tegra_se_write_32(se_dev
, SE_RNG_CONFIG_REG_OFFSET
, val
);
463 /* SE normal operation */
464 ret
= tegra_se_start_normal_operation(se_dev
, SE_CTX_SAVE_RANDOM_DATA_SIZE
);
470 * Encrypt memory blocks with SRK as part of the security engine context.
471 * The data blocks include: random data and the known pattern data, where
472 * the random data is the first block and known pattern is the last block.
474 static int tegra_se_lp_data_context_save(tegra_se_dev_t
*se_dev
,
475 uint64_t src_addr
, uint64_t dst_addr
, uint32_t data_size
)
479 se_dev
->src_ll_buf
->last_buff_num
= 0;
480 se_dev
->dst_ll_buf
->last_buff_num
= 0;
481 se_dev
->src_ll_buf
->buffer
[0].addr
= src_addr
;
482 se_dev
->src_ll_buf
->buffer
[0].data_len
= data_size
;
483 se_dev
->dst_ll_buf
->buffer
[0].addr
= dst_addr
;
484 se_dev
->dst_ll_buf
->buffer
[0].data_len
= data_size
;
486 /* By setting the context source from memory and calling the context save
487 * operation, the SE encrypts the memory data with SRK.
489 tegra_se_write_32(se_dev
, SE_CTX_SAVE_CONFIG_REG_OFFSET
, SE_CTX_SAVE_SRC_MEM
);
491 ret
= tegra_se_start_ctx_save_operation(se_dev
, data_size
);
497 * Context save the key table access control sticky bits and
498 * security status of each key-slot. The encrypted sticky-bits are
499 * 32 bytes (2 AES blocks) and formatted as the following structure:
500 * { bit in registers bit in context save
502 * SE_RSA_KEYTABLE_ACCE4SS_1[2:0] 157:155
503 * SE_RSA_KEYTABLE_ACCE4SS_0[2:0] 154:152
504 * SE_RSA_SECURITY_PERKEY_0[1:0] 151:150
505 * SE_CRYPTO_KEYTABLE_ACCESS_15[7:0] 149:142
507 * SE_CRYPTO_KEYTABLE_ACCESS_0[7:0] 29:22
508 * SE_CRYPTO_SECURITY_PERKEY_0[15:0] 21:6
509 * SE_TZRAM_SECURITY_0[1:0] 5:4
510 * SE_SECURITY_0[16] 3:3
511 * SE_SECURITY_0[2:0] } 2:0
513 static int tegra_se_lp_sticky_bits_context_save(tegra_se_dev_t
*se_dev
)
515 int ret
= PSCI_E_INTERN_FAIL
;
518 se_dev
->dst_ll_buf
->last_buff_num
= 0;
519 if (!se_dev
->ctx_save_buf
) {
520 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__
);
521 return PSCI_E_NOT_PRESENT
;
523 se_dev
->dst_ll_buf
->buffer
[0].addr
= ((uint64_t)(&(((tegra_se_context_t
*)
524 se_dev
->ctx_save_buf
)->sticky_bits
)));
525 se_dev
->dst_ll_buf
->buffer
[0].data_len
= SE_CTX_SAVE_STICKY_BITS_SIZE
;
528 * The 1st AES block save the sticky-bits context 1 - 16 bytes (0 - 3 words).
529 * The 2nd AES block save the sticky-bits context 17 - 32 bytes (4 - 7 words).
531 for (int i
= 0; i
< 2; i
++) {
532 val
= SE_CTX_SAVE_SRC_STICKY_BITS
|
533 SE_CTX_SAVE_STICKY_WORD_QUAD(i
);
534 tegra_se_write_32(se_dev
, SE_CTX_SAVE_CONFIG_REG_OFFSET
, val
);
536 /* SE context save operation */
537 ret
= tegra_se_start_ctx_save_operation(se_dev
,
538 SE_CTX_SAVE_STICKY_BITS_SIZE
);
541 se_dev
->dst_ll_buf
->buffer
[0].addr
+= SE_CTX_SAVE_STICKY_BITS_SIZE
;
547 static int tegra_se_aeskeytable_context_save(tegra_se_dev_t
*se_dev
)
552 se_dev
->dst_ll_buf
->last_buff_num
= 0;
553 if (!se_dev
->ctx_save_buf
) {
554 ERROR("%s: ERR: context save buffer NULL pointer!\n", __func__
);
556 goto aes_keytable_save_err
;
559 /* AES key context save */
560 for (int slot
= 0; slot
< TEGRA_SE_AES_KEYSLOT_COUNT
; slot
++) {
561 se_dev
->dst_ll_buf
->buffer
[0].addr
= ((uint64_t)(&(
562 ((tegra_se_context_t
*)se_dev
->
563 ctx_save_buf
)->key_slots
[slot
].key
)));
564 se_dev
->dst_ll_buf
->buffer
[0].data_len
= TEGRA_SE_KEY_128_SIZE
;
565 for (int i
= 0; i
< 2; i
++) {
566 val
= SE_CTX_SAVE_SRC_AES_KEYTABLE
|
567 SE_CTX_SAVE_KEY_INDEX(slot
) |
568 SE_CTX_SAVE_WORD_QUAD(i
);
569 tegra_se_write_32(se_dev
, SE_CTX_SAVE_CONFIG_REG_OFFSET
, val
);
571 /* SE context save operation */
572 ret
= tegra_se_start_ctx_save_operation(se_dev
,
573 TEGRA_SE_KEY_128_SIZE
);
575 ERROR("%s: ERR: AES key CTX_SAVE OP failed, "
576 "slot=%d, word_quad=%d.\n",
578 goto aes_keytable_save_err
;
580 se_dev
->dst_ll_buf
->buffer
[0].addr
+= TEGRA_SE_KEY_128_SIZE
;
583 /* OIV context save */
584 se_dev
->dst_ll_buf
->last_buff_num
= 0;
585 se_dev
->dst_ll_buf
->buffer
[0].addr
= ((uint64_t)(&(
586 ((tegra_se_context_t
*)se_dev
->
587 ctx_save_buf
)->key_slots
[slot
].oiv
)));
588 se_dev
->dst_ll_buf
->buffer
[0].data_len
= TEGRA_SE_AES_IV_SIZE
;
590 val
= SE_CTX_SAVE_SRC_AES_KEYTABLE
|
591 SE_CTX_SAVE_KEY_INDEX(slot
) |
592 SE_CTX_SAVE_WORD_QUAD_ORIG_IV
;
593 tegra_se_write_32(se_dev
, SE_CTX_SAVE_CONFIG_REG_OFFSET
, val
);
595 /* SE context save operation */
596 ret
= tegra_se_start_ctx_save_operation(se_dev
, TEGRA_SE_AES_IV_SIZE
);
598 ERROR("%s: ERR: OIV CTX_SAVE OP failed, slot=%d.\n",
600 goto aes_keytable_save_err
;
603 /* UIV context save */
604 se_dev
->dst_ll_buf
->last_buff_num
= 0;
605 se_dev
->dst_ll_buf
->buffer
[0].addr
= ((uint64_t)(&(
606 ((tegra_se_context_t
*)se_dev
->
607 ctx_save_buf
)->key_slots
[slot
].uiv
)));
608 se_dev
->dst_ll_buf
->buffer
[0].data_len
= TEGRA_SE_AES_IV_SIZE
;
610 val
= SE_CTX_SAVE_SRC_AES_KEYTABLE
|
611 SE_CTX_SAVE_KEY_INDEX(slot
) |
612 SE_CTX_SAVE_WORD_QUAD_UPD_IV
;
613 tegra_se_write_32(se_dev
, SE_CTX_SAVE_CONFIG_REG_OFFSET
, val
);
615 /* SE context save operation */
616 ret
= tegra_se_start_ctx_save_operation(se_dev
, TEGRA_SE_AES_IV_SIZE
);
618 ERROR("%s: ERR: UIV CTX_SAVE OP failed, slot=%d\n",
620 goto aes_keytable_save_err
;
624 aes_keytable_save_err
:
628 static int tegra_se_lp_rsakeytable_context_save(tegra_se_dev_t
*se_dev
)
632 /* First the modulus and then the exponent must be
633 * encrypted and saved. This is repeated for SLOT 0
634 * and SLOT 1. Hence the order:
635 * SLOT 0 exponent : RSA_KEY_INDEX : 0
636 * SLOT 0 modulus : RSA_KEY_INDEX : 1
637 * SLOT 1 exponent : RSA_KEY_INDEX : 2
638 * SLOT 1 modulus : RSA_KEY_INDEX : 3
640 const unsigned int key_index_mod
[TEGRA_SE_RSA_KEYSLOT_COUNT
][2] = {
642 {SE_RSA_KEY_INDEX_SLOT0_EXP
, SE_RSA_KEY_INDEX_SLOT0_MOD
},
644 {SE_RSA_KEY_INDEX_SLOT1_EXP
, SE_RSA_KEY_INDEX_SLOT1_MOD
},
647 se_dev
->dst_ll_buf
->last_buff_num
= 0;
648 se_dev
->dst_ll_buf
->buffer
[0].addr
= ((uint64_t)(&(
649 ((tegra_se_context_t
*)se_dev
->
650 ctx_save_buf
)->rsa_keys
)));
651 se_dev
->dst_ll_buf
->buffer
[0].data_len
= TEGRA_SE_KEY_128_SIZE
;
653 for (int slot
= 0; slot
< TEGRA_SE_RSA_KEYSLOT_COUNT
; slot
++) {
654 /* loop for modulus and exponent */
655 for (int index
= 0; index
< 2; index
++) {
656 for (int word_quad
= 0; word_quad
< 16; word_quad
++) {
657 val
= SE_CTX_SAVE_SRC_RSA_KEYTABLE
|
658 SE_CTX_SAVE_RSA_KEY_INDEX(
659 key_index_mod
[slot
][index
]) |
660 SE_CTX_RSA_WORD_QUAD(word_quad
);
661 tegra_se_write_32(se_dev
,
662 SE_CTX_SAVE_CONFIG_REG_OFFSET
, val
);
664 /* SE context save operation */
665 ret
= tegra_se_start_ctx_save_operation(se_dev
,
666 TEGRA_SE_KEY_128_SIZE
);
668 ERROR("%s: ERR: slot=%d.\n",
670 goto rsa_keytable_save_err
;
673 /* Update the pointer to the next word quad */
674 se_dev
->dst_ll_buf
->buffer
[0].addr
+=
675 TEGRA_SE_KEY_128_SIZE
;
680 rsa_keytable_save_err
:
684 static int tegra_se_pkakeytable_sticky_bits_save(tegra_se_dev_t
*se_dev
)
688 se_dev
->dst_ll_buf
->last_buff_num
= 0;
689 se_dev
->dst_ll_buf
->buffer
[0].addr
= ((uint64_t)(&(
690 ((tegra_se2_context_blob_t
*)se_dev
->
691 ctx_save_buf
)->pka_ctx
.sticky_bits
)));
692 se_dev
->dst_ll_buf
->buffer
[0].data_len
= TEGRA_SE_AES_BLOCK_SIZE
;
694 /* PKA1 sticky bits are 1 AES block (16 bytes) */
695 tegra_se_write_32(se_dev
, SE_CTX_SAVE_CONFIG_REG_OFFSET
,
696 SE_CTX_SAVE_SRC_PKA1_STICKY_BITS
|
697 SE_CTX_STICKY_WORD_QUAD_WORDS_0_3
);
699 /* SE context save operation */
700 ret
= tegra_se_start_ctx_save_operation(se_dev
, 0);
702 ERROR("%s: ERR: PKA1 sticky bits CTX_SAVE OP failed\n",
704 goto pka_sticky_bits_save_err
;
707 pka_sticky_bits_save_err
:
711 static int tegra_se_pkakeytable_context_save(tegra_se_dev_t
*se_dev
)
716 se_dev
->dst_ll_buf
->last_buff_num
= 0;
717 se_dev
->dst_ll_buf
->buffer
[0].addr
= ((uint64_t)(&(
718 ((tegra_se2_context_blob_t
*)se_dev
->
719 ctx_save_buf
)->pka_ctx
.pka_keys
)));
720 se_dev
->dst_ll_buf
->buffer
[0].data_len
= TEGRA_SE_KEY_128_SIZE
;
722 /* for each slot, save word quad 0-127 */
723 for (int slot
= 0; slot
< TEGRA_SE_PKA1_KEYSLOT_COUNT
; slot
++) {
724 for (int word_quad
= 0; word_quad
< 512/4; word_quad
++) {
725 val
= SE_CTX_SAVE_SRC_PKA1_KEYTABLE
|
726 SE_CTX_PKA1_WORD_QUAD_L((slot
* 128) +
728 SE_CTX_PKA1_WORD_QUAD_H((slot
* 128) +
730 tegra_se_write_32(se_dev
,
731 SE_CTX_SAVE_CONFIG_REG_OFFSET
, val
);
733 /* SE context save operation */
734 ret
= tegra_se_start_ctx_save_operation(se_dev
,
735 TEGRA_SE_KEY_128_SIZE
);
737 ERROR("%s: ERR: pka1 keytable ctx save error\n",
739 goto pka_keytable_save_err
;
742 /* Update the pointer to the next word quad */
743 se_dev
->dst_ll_buf
->buffer
[0].addr
+=
744 TEGRA_SE_KEY_128_SIZE
;
748 pka_keytable_save_err
:
752 static int tegra_se_save_SRK(tegra_se_dev_t
*se_dev
)
754 tegra_se_write_32(se_dev
, SE_CTX_SAVE_CONFIG_REG_OFFSET
,
755 SE_CTX_SAVE_SRC_SRK
);
757 /* SE context save operation */
758 return tegra_se_start_ctx_save_operation(se_dev
, 0);
762 * Lock both SE from non-TZ clients.
764 static inline void tegra_se_lock(tegra_se_dev_t
*se_dev
)
769 val
= tegra_se_read_32(se_dev
, SE_SECURITY_REG_OFFSET
);
770 val
|= SE_SECURITY_TZ_LOCK_SOFT(SE_SECURE
);
771 tegra_se_write_32(se_dev
, SE_SECURITY_REG_OFFSET
, val
);
775 * Use SRK to encrypt SE state and save to TZRAM carveout
777 static int tegra_se_context_save_sw(tegra_se_dev_t
*se_dev
)
783 /* Lock entire SE/SE2 as TZ protected */
784 tegra_se_lock(se_dev
);
786 INFO("%s: generate SRK\n", __func__
);
788 err
= tegra_se_generate_srk(se_dev
);
790 ERROR("%s: ERR: SRK generation failed\n", __func__
);
794 INFO("%s: generate random data\n", __func__
);
795 /* Generate random data */
796 err
= tegra_se_lp_generate_random_data(se_dev
);
798 ERROR("%s: ERR: LP random pattern generation failed\n", __func__
);
802 INFO("%s: encrypt random data\n", __func__
);
803 /* Encrypt the random data block */
804 err
= tegra_se_lp_data_context_save(se_dev
,
805 ((uint64_t)(&(((tegra_se_context_t
*)se_dev
->
806 ctx_save_buf
)->rand_data
))),
807 ((uint64_t)(&(((tegra_se_context_t
*)se_dev
->
808 ctx_save_buf
)->rand_data
))),
809 SE_CTX_SAVE_RANDOM_DATA_SIZE
);
811 ERROR("%s: ERR: random pattern encryption failed\n", __func__
);
815 INFO("%s: save SE sticky bits\n", __func__
);
816 /* Save AES sticky bits context */
817 err
= tegra_se_lp_sticky_bits_context_save(se_dev
);
819 ERROR("%s: ERR: sticky bits context save failed\n", __func__
);
823 INFO("%s: save AES keytables\n", __func__
);
824 /* Save AES key table context */
825 err
= tegra_se_aeskeytable_context_save(se_dev
);
827 ERROR("%s: ERR: LP keytable save failed\n", __func__
);
831 /* RSA key slot table context save */
832 INFO("%s: save RSA keytables\n", __func__
);
833 err
= tegra_se_lp_rsakeytable_context_save(se_dev
);
835 ERROR("%s: ERR: rsa key table context save failed\n", __func__
);
839 /* Only SE2 has an interface with PKA1; thus, PKA1's context is saved
842 if (se_dev
->se_num
== 2) {
843 /* Encrypt PKA1 sticky bits on SE2 only */
844 INFO("%s: save PKA sticky bits\n", __func__
);
845 err
= tegra_se_pkakeytable_sticky_bits_save(se_dev
);
847 ERROR("%s: ERR: PKA sticky bits context save failed\n", __func__
);
851 /* Encrypt PKA1 keyslots on SE2 only */
852 INFO("%s: save PKA keytables\n", __func__
);
853 err
= tegra_se_pkakeytable_context_save(se_dev
);
855 ERROR("%s: ERR: PKA key table context save failed\n", __func__
);
860 /* Encrypt known pattern */
861 if (se_dev
->se_num
== 1) {
862 err
= tegra_se_lp_data_context_save(se_dev
,
863 ((uint64_t)(&se_ctx_known_pattern_data
)),
864 ((uint64_t)(&(((tegra_se_context_blob_t
*)se_dev
->ctx_save_buf
)->known_pattern
))),
865 SE_CTX_KNOWN_PATTERN_SIZE
);
866 } else if (se_dev
->se_num
== 2) {
867 err
= tegra_se_lp_data_context_save(se_dev
,
868 ((uint64_t)(&se_ctx_known_pattern_data
)),
869 ((uint64_t)(&(((tegra_se2_context_blob_t
*)se_dev
->ctx_save_buf
)->known_pattern
))),
870 SE_CTX_KNOWN_PATTERN_SIZE
);
873 ERROR("%s: ERR: save LP known pattern failure\n", __func__
);
877 /* Write lp context buffer address into PMC scratch register */
878 if (se_dev
->se_num
== 1) {
879 /* SE context address */
880 mmio_write_32((uint64_t)TEGRA_PMC_BASE
+ PMC_SECURE_SCRATCH117_OFFSET
,
881 ((uint64_t)(se_dev
->ctx_save_buf
)));
882 } else if (se_dev
->se_num
== 2) {
883 /* SE2 & PKA1 context address */
884 mmio_write_32((uint64_t)TEGRA_PMC_BASE
+ PMC_SECURE_SCRATCH116_OFFSET
,
885 ((uint64_t)(se_dev
->ctx_save_buf
)));
888 /* Saves SRK to PMC secure scratch registers for BootROM, which
889 * verifies and restores the security engine context on warm boot.
891 err
= tegra_se_save_SRK(se_dev
);
893 ERROR("%s: ERR: LP SRK save failure\n", __func__
);
897 INFO("%s: SE context save done \n", __func__
);
903 * Initialize the SE engine handle
905 void tegra_se_init(void)
908 INFO("%s: start SE init\n", __func__
);
910 /* Generate random SRK to initialize DRBG */
911 tegra_se_generate_srk(&se_dev_1
);
912 tegra_se_generate_srk(&se_dev_2
);
914 /* determine if ECID is valid */
915 val
= mmio_read_32(TEGRA_FUSE_BASE
+ FUSE_JTAG_SECUREID_VALID
);
916 ecid_valid
= (val
== ECID_VALID
);
918 INFO("%s: SE init done\n", __func__
);
921 static void tegra_se_enable_clocks(void)
925 /* Enable entropy clock */
926 val
= mmio_read_32(TEGRA_CAR_RESET_BASE
+ TEGRA_CLK_OUT_ENB_W
);
927 val
|= ENTROPY_CLK_ENB_BIT
;
928 mmio_write_32(TEGRA_CAR_RESET_BASE
+ TEGRA_CLK_OUT_ENB_W
, val
);
930 /* De-Assert Entropy Reset */
931 val
= mmio_read_32(TEGRA_CAR_RESET_BASE
+ TEGRA_RST_DEVICES_W
);
932 val
&= ~ENTROPY_RESET_BIT
;
933 mmio_write_32(TEGRA_CAR_RESET_BASE
+ TEGRA_RST_DEVICES_W
, val
);
935 /* Enable SE clock */
936 val
= mmio_read_32(TEGRA_CAR_RESET_BASE
+ TEGRA_CLK_OUT_ENB_V
);
937 val
|= SE_CLK_ENB_BIT
;
938 mmio_write_32(TEGRA_CAR_RESET_BASE
+ TEGRA_CLK_OUT_ENB_V
, val
);
940 /* De-Assert SE Reset */
941 val
= mmio_read_32(TEGRA_CAR_RESET_BASE
+ TEGRA_RST_DEVICES_V
);
942 val
&= ~SE_RESET_BIT
;
943 mmio_write_32(TEGRA_CAR_RESET_BASE
+ TEGRA_RST_DEVICES_V
, val
);
946 static void tegra_se_disable_clocks(void)
950 /* Disable entropy clock */
951 val
= mmio_read_32(TEGRA_CAR_RESET_BASE
+ TEGRA_CLK_OUT_ENB_W
);
952 val
&= ~ENTROPY_CLK_ENB_BIT
;
953 mmio_write_32(TEGRA_CAR_RESET_BASE
+ TEGRA_CLK_OUT_ENB_W
, val
);
955 /* Disable SE clock */
956 val
= mmio_read_32(TEGRA_CAR_RESET_BASE
+ TEGRA_CLK_OUT_ENB_V
);
957 val
&= ~SE_CLK_ENB_BIT
;
958 mmio_write_32(TEGRA_CAR_RESET_BASE
+ TEGRA_CLK_OUT_ENB_V
, val
);
962 * Security engine power suspend entry point.
963 * This function is invoked from PSCI power domain suspend handler.
965 int32_t tegra_se_suspend(void)
970 /* SE does not use SMMU in EL3, disable SMMU.
971 * This will be re-enabled by kernel on resume */
972 val
= mmio_read_32(TEGRA_MC_BASE
+ MC_SMMU_PPCS_ASID_0
);
973 val
&= ~PPCS_SMMU_ENABLE
;
974 mmio_write_32(TEGRA_MC_BASE
+ MC_SMMU_PPCS_ASID_0
, val
);
976 tegra_se_enable_clocks();
978 if (tegra_se_atomic_save_enabled(&se_dev_2
) &&
979 tegra_se_atomic_save_enabled(&se_dev_1
)) {
980 /* Atomic context save se2 and pka1 */
981 INFO("%s: SE2/PKA1 atomic context save\n", __func__
);
983 ret
= tegra_se_context_save_atomic(&se_dev_2
);
986 /* Atomic context save se */
988 INFO("%s: SE1 atomic context save\n", __func__
);
989 ret
= tegra_se_context_save_atomic(&se_dev_1
);
993 INFO("%s: SE atomic context save done\n", __func__
);
995 } else if (!tegra_se_atomic_save_enabled(&se_dev_2
) &&
996 !tegra_se_atomic_save_enabled(&se_dev_1
)) {
997 /* SW context save se2 and pka1 */
998 INFO("%s: SE2/PKA1 legacy(SW) context save\n", __func__
);
1000 ret
= tegra_se_context_save_sw(&se_dev_2
);
1003 /* SW context save se */
1005 INFO("%s: SE1 legacy(SW) context save\n", __func__
);
1006 ret
= tegra_se_context_save_sw(&se_dev_1
);
1010 INFO("%s: SE SW context save done\n", __func__
);
1013 ERROR("%s: One SE set for atomic CTX save, the other is not\n",
1017 tegra_se_disable_clocks();
1023 * Save TZRAM to shadow TZRAM in AON
1025 int32_t tegra_se_save_tzram(void)
1031 INFO("%s: SE TZRAM save start\n", __func__
);
1032 tegra_se_enable_clocks();
1034 val
= (SE_TZRAM_OP_REQ_INIT
| SE_TZRAM_OP_MODE_SAVE
);
1035 tegra_se_write_32(&se_dev_1
, SE_TZRAM_OPERATION
, val
);
1037 val
= tegra_se_read_32(&se_dev_1
, SE_TZRAM_OPERATION
);
1038 for (timeout
= 0; (SE_TZRAM_OP_BUSY(val
) == SE_TZRAM_OP_BUSY_ON
) &&
1039 (timeout
< TIMEOUT_100MS
); timeout
++) {
1041 val
= tegra_se_read_32(&se_dev_1
, SE_TZRAM_OPERATION
);
1044 if (timeout
== TIMEOUT_100MS
) {
1045 ERROR("%s: ERR: TZRAM save timeout!\n", __func__
);
1050 INFO("%s: SE TZRAM save done!\n", __func__
);
1053 tegra_se_disable_clocks();
1059 * The function is invoked by SE resume
1061 static void tegra_se_warm_boot_resume(const tegra_se_dev_t
*se_dev
)
1067 /* Lock RNG source to ENTROPY on resume */
1068 val
= DRBG_RO_ENT_IGNORE_MEM_ENABLE
|
1069 DRBG_RO_ENT_SRC_LOCK_ENABLE
|
1070 DRBG_RO_ENT_SRC_ENABLE
;
1071 tegra_se_write_32(se_dev
, SE_RNG_SRC_CONFIG_REG_OFFSET
, val
);
1073 /* Set a random value to SRK to initialize DRBG */
1074 tegra_se_generate_srk(se_dev
);
1078 * The function is invoked on SC7 resume
1080 void tegra_se_resume(void)
1082 tegra_se_warm_boot_resume(&se_dev_1
);
1083 tegra_se_warm_boot_resume(&se_dev_2
);