kernel: 5.4: import wireguard backport
[openwrt/openwrt.git] / target / linux / generic / backport-5.4 / 080-wireguard-0011-crypto-mips-chacha-wire-up-accelerated-32r2-code-fro.patch
1 From 01c1104f551dae77125bb3d0f461f4084f2a98df Mon Sep 17 00:00:00 2001
2 From: Ard Biesheuvel <ardb@kernel.org>
3 Date: Fri, 8 Nov 2019 13:22:17 +0100
4 Subject: [PATCH 011/124] crypto: mips/chacha - wire up accelerated 32r2 code
5 from Zinc
6 MIME-Version: 1.0
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
9
10 commit 3a2f58f3ba4f6f44e33d1a48240d5eadb882cb59 upstream.
11
12 This integrates the accelerated MIPS 32r2 implementation of ChaCha
13 into both the API and library interfaces of the kernel crypto stack.
14
15 The significance of this is that, in addition to becoming available
16 as an accelerated library implementation, it can also be used by
17 existing crypto API code such as Adiantum (for block encryption on
18 ultra low performance cores) or IPsec using chacha20poly1305. These
19 are use cases that have already opted into using the abstract crypto
20 API. In order to support Adiantum, the core assembler routine has
21 been adapted to take the round count as a function argument rather
22 than hardcoding it to 20.
23
24 Co-developed-by: René van Dorst <opensource@vdorst.com>
25 Signed-off-by: René van Dorst <opensource@vdorst.com>
26 Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
29 ---
30 arch/mips/Makefile | 2 +-
31 arch/mips/crypto/Makefile | 4 +
32 arch/mips/crypto/chacha-core.S | 159 ++++++++++++++++++++++++---------
33 arch/mips/crypto/chacha-glue.c | 150 +++++++++++++++++++++++++++++++
34 crypto/Kconfig | 6 ++
35 5 files changed, 277 insertions(+), 44 deletions(-)
36 create mode 100644 arch/mips/crypto/chacha-glue.c
37
38 --- a/arch/mips/Makefile
39 +++ b/arch/mips/Makefile
40 @@ -334,7 +334,7 @@ libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/m
41 # See arch/mips/Kbuild for content of core part of the kernel
42 core-y += arch/mips/
43
44 -drivers-$(CONFIG_MIPS_CRC_SUPPORT) += arch/mips/crypto/
45 +drivers-y += arch/mips/crypto/
46 drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/
47
48 # suspend and hibernation support
49 --- a/arch/mips/crypto/Makefile
50 +++ b/arch/mips/crypto/Makefile
51 @@ -4,3 +4,7 @@
52 #
53
54 obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32-mips.o
55 +
56 +obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
57 +chacha-mips-y := chacha-core.o chacha-glue.o
58 +AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
59 --- a/arch/mips/crypto/chacha-core.S
60 +++ b/arch/mips/crypto/chacha-core.S
61 @@ -125,7 +125,7 @@
62 #define CONCAT3(a,b,c) _CONCAT3(a,b,c)
63
64 #define STORE_UNALIGNED(x) \
65 -CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
66 +CONCAT3(.Lchacha_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
67 .if (x != 12); \
68 lw T0, (x*4)(STATE); \
69 .endif; \
70 @@ -142,7 +142,7 @@ CONCAT3(.Lchacha20_mips_xor_unaligned_,
71 swr X ## x, (x*4)+LSB ## (OUT);
72
73 #define STORE_ALIGNED(x) \
74 -CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
75 +CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
76 .if (x != 12); \
77 lw T0, (x*4)(STATE); \
78 .endif; \
79 @@ -162,9 +162,9 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL
80 * Every jumptable entry must be equal in size.
81 */
82 #define JMPTBL_ALIGNED(x) \
83 -.Lchacha20_mips_jmptbl_aligned_ ## x: ; \
84 +.Lchacha_mips_jmptbl_aligned_ ## x: ; \
85 .set noreorder; \
86 - b .Lchacha20_mips_xor_aligned_ ## x ## _b; \
87 + b .Lchacha_mips_xor_aligned_ ## x ## _b; \
88 .if (x == 12); \
89 addu SAVED_X, X ## x, NONCE_0; \
90 .else; \
91 @@ -173,9 +173,9 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL
92 .set reorder
93
94 #define JMPTBL_UNALIGNED(x) \
95 -.Lchacha20_mips_jmptbl_unaligned_ ## x: ; \
96 +.Lchacha_mips_jmptbl_unaligned_ ## x: ; \
97 .set noreorder; \
98 - b .Lchacha20_mips_xor_unaligned_ ## x ## _b; \
99 + b .Lchacha_mips_xor_unaligned_ ## x ## _b; \
100 .if (x == 12); \
101 addu SAVED_X, X ## x, NONCE_0; \
102 .else; \
103 @@ -200,15 +200,18 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL
104 .text
105 .set reorder
106 .set noat
107 -.globl chacha20_mips
108 -.ent chacha20_mips
109 -chacha20_mips:
110 +.globl chacha_crypt_arch
111 +.ent chacha_crypt_arch
112 +chacha_crypt_arch:
113 .frame $sp, STACK_SIZE, $ra
114
115 + /* Load number of rounds */
116 + lw $at, 16($sp)
117 +
118 addiu $sp, -STACK_SIZE
119
120 /* Return bytes = 0. */
121 - beqz BYTES, .Lchacha20_mips_end
122 + beqz BYTES, .Lchacha_mips_end
123
124 lw NONCE_0, 48(STATE)
125
126 @@ -228,18 +231,15 @@ chacha20_mips:
127 or IS_UNALIGNED, IN, OUT
128 andi IS_UNALIGNED, 0x3
129
130 - /* Set number of rounds */
131 - li $at, 20
132 -
133 - b .Lchacha20_rounds_start
134 + b .Lchacha_rounds_start
135
136 .align 4
137 -.Loop_chacha20_rounds:
138 +.Loop_chacha_rounds:
139 addiu IN, CHACHA20_BLOCK_SIZE
140 addiu OUT, CHACHA20_BLOCK_SIZE
141 addiu NONCE_0, 1
142
143 -.Lchacha20_rounds_start:
144 +.Lchacha_rounds_start:
145 lw X0, 0(STATE)
146 lw X1, 4(STATE)
147 lw X2, 8(STATE)
148 @@ -259,7 +259,7 @@ chacha20_mips:
149 lw X14, 56(STATE)
150 lw X15, 60(STATE)
151
152 -.Loop_chacha20_xor_rounds:
153 +.Loop_chacha_xor_rounds:
154 addiu $at, -2
155 AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
156 AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
157 @@ -269,31 +269,31 @@ chacha20_mips:
158 AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
159 AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
160 AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
161 - bnez $at, .Loop_chacha20_xor_rounds
162 + bnez $at, .Loop_chacha_xor_rounds
163
164 addiu BYTES, -(CHACHA20_BLOCK_SIZE)
165
166 /* Is data src/dst unaligned? Jump */
167 - bnez IS_UNALIGNED, .Loop_chacha20_unaligned
168 + bnez IS_UNALIGNED, .Loop_chacha_unaligned
169
170 /* Set number rounds here to fill delayslot. */
171 - li $at, 20
172 + lw $at, (STACK_SIZE+16)($sp)
173
174 /* BYTES < 0, it has no full block. */
175 - bltz BYTES, .Lchacha20_mips_no_full_block_aligned
176 + bltz BYTES, .Lchacha_mips_no_full_block_aligned
177
178 FOR_EACH_WORD_REV(STORE_ALIGNED)
179
180 /* BYTES > 0? Loop again. */
181 - bgtz BYTES, .Loop_chacha20_rounds
182 + bgtz BYTES, .Loop_chacha_rounds
183
184 /* Place this here to fill delay slot */
185 addiu NONCE_0, 1
186
187 /* BYTES < 0? Handle last bytes */
188 - bltz BYTES, .Lchacha20_mips_xor_bytes
189 + bltz BYTES, .Lchacha_mips_xor_bytes
190
191 -.Lchacha20_mips_xor_done:
192 +.Lchacha_mips_xor_done:
193 /* Restore used registers */
194 lw $s0, 0($sp)
195 lw $s1, 4($sp)
196 @@ -307,11 +307,11 @@ chacha20_mips:
197 /* Write NONCE_0 back to right location in state */
198 sw NONCE_0, 48(STATE)
199
200 -.Lchacha20_mips_end:
201 +.Lchacha_mips_end:
202 addiu $sp, STACK_SIZE
203 jr $ra
204
205 -.Lchacha20_mips_no_full_block_aligned:
206 +.Lchacha_mips_no_full_block_aligned:
207 /* Restore the offset on BYTES */
208 addiu BYTES, CHACHA20_BLOCK_SIZE
209
210 @@ -319,7 +319,7 @@ chacha20_mips:
211 andi $at, BYTES, MASK_U32
212
213 /* Load upper half of jump table addr */
214 - lui T0, %hi(.Lchacha20_mips_jmptbl_aligned_0)
215 + lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0)
216
217 /* Calculate lower half jump table offset */
218 ins T0, $at, 1, 6
219 @@ -328,7 +328,7 @@ chacha20_mips:
220 addu T1, STATE, $at
221
222 /* Add lower half jump table addr */
223 - addiu T0, %lo(.Lchacha20_mips_jmptbl_aligned_0)
224 + addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0)
225
226 /* Read value from STATE */
227 lw SAVED_CA, 0(T1)
228 @@ -342,31 +342,31 @@ chacha20_mips:
229 FOR_EACH_WORD(JMPTBL_ALIGNED)
230
231
232 -.Loop_chacha20_unaligned:
233 +.Loop_chacha_unaligned:
234 /* Set number rounds here to fill delayslot. */
235 - li $at, 20
236 + lw $at, (STACK_SIZE+16)($sp)
237
238 /* BYTES > 0, it has no full block. */
239 - bltz BYTES, .Lchacha20_mips_no_full_block_unaligned
240 + bltz BYTES, .Lchacha_mips_no_full_block_unaligned
241
242 FOR_EACH_WORD_REV(STORE_UNALIGNED)
243
244 /* BYTES > 0? Loop again. */
245 - bgtz BYTES, .Loop_chacha20_rounds
246 + bgtz BYTES, .Loop_chacha_rounds
247
248 /* Write NONCE_0 back to right location in state */
249 sw NONCE_0, 48(STATE)
250
251 .set noreorder
252 /* Fall through to byte handling */
253 - bgez BYTES, .Lchacha20_mips_xor_done
254 -.Lchacha20_mips_xor_unaligned_0_b:
255 -.Lchacha20_mips_xor_aligned_0_b:
256 + bgez BYTES, .Lchacha_mips_xor_done
257 +.Lchacha_mips_xor_unaligned_0_b:
258 +.Lchacha_mips_xor_aligned_0_b:
259 /* Place this here to fill delay slot */
260 addiu NONCE_0, 1
261 .set reorder
262
263 -.Lchacha20_mips_xor_bytes:
264 +.Lchacha_mips_xor_bytes:
265 addu IN, $at
266 addu OUT, $at
267 /* First byte */
268 @@ -376,22 +376,22 @@ chacha20_mips:
269 ROTR(SAVED_X)
270 xor T1, SAVED_X
271 sb T1, 0(OUT)
272 - beqz $at, .Lchacha20_mips_xor_done
273 + beqz $at, .Lchacha_mips_xor_done
274 /* Second byte */
275 lbu T1, 1(IN)
276 addiu $at, BYTES, 2
277 ROTx SAVED_X, 8
278 xor T1, SAVED_X
279 sb T1, 1(OUT)
280 - beqz $at, .Lchacha20_mips_xor_done
281 + beqz $at, .Lchacha_mips_xor_done
282 /* Third byte */
283 lbu T1, 2(IN)
284 ROTx SAVED_X, 8
285 xor T1, SAVED_X
286 sb T1, 2(OUT)
287 - b .Lchacha20_mips_xor_done
288 + b .Lchacha_mips_xor_done
289
290 -.Lchacha20_mips_no_full_block_unaligned:
291 +.Lchacha_mips_no_full_block_unaligned:
292 /* Restore the offset on BYTES */
293 addiu BYTES, CHACHA20_BLOCK_SIZE
294
295 @@ -399,7 +399,7 @@ chacha20_mips:
296 andi $at, BYTES, MASK_U32
297
298 /* Load upper half of jump table addr */
299 - lui T0, %hi(.Lchacha20_mips_jmptbl_unaligned_0)
300 + lui T0, %hi(.Lchacha_mips_jmptbl_unaligned_0)
301
302 /* Calculate lower half jump table offset */
303 ins T0, $at, 1, 6
304 @@ -408,7 +408,7 @@ chacha20_mips:
305 addu T1, STATE, $at
306
307 /* Add lower half jump table addr */
308 - addiu T0, %lo(.Lchacha20_mips_jmptbl_unaligned_0)
309 + addiu T0, %lo(.Lchacha_mips_jmptbl_unaligned_0)
310
311 /* Read value from STATE */
312 lw SAVED_CA, 0(T1)
313 @@ -420,5 +420,78 @@ chacha20_mips:
314
315 /* Jump table */
316 FOR_EACH_WORD(JMPTBL_UNALIGNED)
317 -.end chacha20_mips
318 +.end chacha_crypt_arch
319 +.set at
320 +
321 +/* Input arguments
322 + * STATE $a0
323 + * OUT $a1
324 + * NROUND $a2
325 + */
326 +
327 +#undef X12
328 +#undef X13
329 +#undef X14
330 +#undef X15
331 +
332 +#define X12 $a3
333 +#define X13 $at
334 +#define X14 $v0
335 +#define X15 STATE
336 +
337 +.set noat
338 +.globl hchacha_block_arch
339 +.ent hchacha_block_arch
340 +hchacha_block_arch:
341 + .frame $sp, STACK_SIZE, $ra
342 +
343 + addiu $sp, -STACK_SIZE
344 +
345 + /* Save X11(s6) */
346 + sw X11, 0($sp)
347 +
348 + lw X0, 0(STATE)
349 + lw X1, 4(STATE)
350 + lw X2, 8(STATE)
351 + lw X3, 12(STATE)
352 + lw X4, 16(STATE)
353 + lw X5, 20(STATE)
354 + lw X6, 24(STATE)
355 + lw X7, 28(STATE)
356 + lw X8, 32(STATE)
357 + lw X9, 36(STATE)
358 + lw X10, 40(STATE)
359 + lw X11, 44(STATE)
360 + lw X12, 48(STATE)
361 + lw X13, 52(STATE)
362 + lw X14, 56(STATE)
363 + lw X15, 60(STATE)
364 +
365 +.Loop_hchacha_xor_rounds:
366 + addiu $a2, -2
367 + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
368 + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
369 + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
370 + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
371 + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
372 + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
373 + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
374 + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
375 + bnez $a2, .Loop_hchacha_xor_rounds
376 +
377 + /* Restore used register */
378 + lw X11, 0($sp)
379 +
380 + sw X0, 0(OUT)
381 + sw X1, 4(OUT)
382 + sw X2, 8(OUT)
383 + sw X3, 12(OUT)
384 + sw X12, 16(OUT)
385 + sw X13, 20(OUT)
386 + sw X14, 24(OUT)
387 + sw X15, 28(OUT)
388 +
389 + addiu $sp, STACK_SIZE
390 + jr $ra
391 +.end hchacha_block_arch
392 .set at
393 --- /dev/null
394 +++ b/arch/mips/crypto/chacha-glue.c
395 @@ -0,0 +1,150 @@
396 +// SPDX-License-Identifier: GPL-2.0
397 +/*
398 + * MIPS accelerated ChaCha and XChaCha stream ciphers,
399 + * including ChaCha20 (RFC7539)
400 + *
401 + * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
402 + */
403 +
404 +#include <asm/byteorder.h>
405 +#include <crypto/algapi.h>
406 +#include <crypto/internal/chacha.h>
407 +#include <crypto/internal/skcipher.h>
408 +#include <linux/kernel.h>
409 +#include <linux/module.h>
410 +
411 +asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
412 + unsigned int bytes, int nrounds);
413 +EXPORT_SYMBOL(chacha_crypt_arch);
414 +
415 +asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds);
416 +EXPORT_SYMBOL(hchacha_block_arch);
417 +
418 +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
419 +{
420 + chacha_init_generic(state, key, iv);
421 +}
422 +EXPORT_SYMBOL(chacha_init_arch);
423 +
424 +static int chacha_mips_stream_xor(struct skcipher_request *req,
425 + const struct chacha_ctx *ctx, const u8 *iv)
426 +{
427 + struct skcipher_walk walk;
428 + u32 state[16];
429 + int err;
430 +
431 + err = skcipher_walk_virt(&walk, req, false);
432 +
433 + chacha_init_generic(state, ctx->key, iv);
434 +
435 + while (walk.nbytes > 0) {
436 + unsigned int nbytes = walk.nbytes;
437 +
438 + if (nbytes < walk.total)
439 + nbytes = round_down(nbytes, walk.stride);
440 +
441 + chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr,
442 + nbytes, ctx->nrounds);
443 + err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
444 + }
445 +
446 + return err;
447 +}
448 +
449 +static int chacha_mips(struct skcipher_request *req)
450 +{
451 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
452 + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
453 +
454 + return chacha_mips_stream_xor(req, ctx, req->iv);
455 +}
456 +
457 +static int xchacha_mips(struct skcipher_request *req)
458 +{
459 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
460 + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
461 + struct chacha_ctx subctx;
462 + u32 state[16];
463 + u8 real_iv[16];
464 +
465 + chacha_init_generic(state, ctx->key, req->iv);
466 +
467 + hchacha_block(state, subctx.key, ctx->nrounds);
468 + subctx.nrounds = ctx->nrounds;
469 +
470 + memcpy(&real_iv[0], req->iv + 24, 8);
471 + memcpy(&real_iv[8], req->iv + 16, 8);
472 + return chacha_mips_stream_xor(req, &subctx, real_iv);
473 +}
474 +
475 +static struct skcipher_alg algs[] = {
476 + {
477 + .base.cra_name = "chacha20",
478 + .base.cra_driver_name = "chacha20-mips",
479 + .base.cra_priority = 200,
480 + .base.cra_blocksize = 1,
481 + .base.cra_ctxsize = sizeof(struct chacha_ctx),
482 + .base.cra_module = THIS_MODULE,
483 +
484 + .min_keysize = CHACHA_KEY_SIZE,
485 + .max_keysize = CHACHA_KEY_SIZE,
486 + .ivsize = CHACHA_IV_SIZE,
487 + .chunksize = CHACHA_BLOCK_SIZE,
488 + .setkey = chacha20_setkey,
489 + .encrypt = chacha_mips,
490 + .decrypt = chacha_mips,
491 + }, {
492 + .base.cra_name = "xchacha20",
493 + .base.cra_driver_name = "xchacha20-mips",
494 + .base.cra_priority = 200,
495 + .base.cra_blocksize = 1,
496 + .base.cra_ctxsize = sizeof(struct chacha_ctx),
497 + .base.cra_module = THIS_MODULE,
498 +
499 + .min_keysize = CHACHA_KEY_SIZE,
500 + .max_keysize = CHACHA_KEY_SIZE,
501 + .ivsize = XCHACHA_IV_SIZE,
502 + .chunksize = CHACHA_BLOCK_SIZE,
503 + .setkey = chacha20_setkey,
504 + .encrypt = xchacha_mips,
505 + .decrypt = xchacha_mips,
506 + }, {
507 + .base.cra_name = "xchacha12",
508 + .base.cra_driver_name = "xchacha12-mips",
509 + .base.cra_priority = 200,
510 + .base.cra_blocksize = 1,
511 + .base.cra_ctxsize = sizeof(struct chacha_ctx),
512 + .base.cra_module = THIS_MODULE,
513 +
514 + .min_keysize = CHACHA_KEY_SIZE,
515 + .max_keysize = CHACHA_KEY_SIZE,
516 + .ivsize = XCHACHA_IV_SIZE,
517 + .chunksize = CHACHA_BLOCK_SIZE,
518 + .setkey = chacha12_setkey,
519 + .encrypt = xchacha_mips,
520 + .decrypt = xchacha_mips,
521 + }
522 +};
523 +
524 +static int __init chacha_simd_mod_init(void)
525 +{
526 + return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
527 +}
528 +
529 +static void __exit chacha_simd_mod_fini(void)
530 +{
531 + crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
532 +}
533 +
534 +module_init(chacha_simd_mod_init);
535 +module_exit(chacha_simd_mod_fini);
536 +
537 +MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)");
538 +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
539 +MODULE_LICENSE("GPL v2");
540 +MODULE_ALIAS_CRYPTO("chacha20");
541 +MODULE_ALIAS_CRYPTO("chacha20-mips");
542 +MODULE_ALIAS_CRYPTO("xchacha20");
543 +MODULE_ALIAS_CRYPTO("xchacha20-mips");
544 +MODULE_ALIAS_CRYPTO("xchacha12");
545 +MODULE_ALIAS_CRYPTO("xchacha12-mips");
546 --- a/crypto/Kconfig
547 +++ b/crypto/Kconfig
548 @@ -1423,6 +1423,12 @@ config CRYPTO_CHACHA20_X86_64
549 SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20,
550 XChaCha20, and XChaCha12 stream ciphers.
551
552 +config CRYPTO_CHACHA_MIPS
553 + tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)"
554 + depends on CPU_MIPS32_R2
555 + select CRYPTO_BLKCIPHER
556 + select CRYPTO_ARCH_HAVE_LIB_CHACHA
557 +
558 config CRYPTO_SEED
559 tristate "SEED cipher algorithm"
560 select CRYPTO_ALGAPI