kernel: 5.4: import wireguard backport
[openwrt/openwrt.git] / target / linux / generic / backport-5.4 / 080-wireguard-0011-crypto-mips-chacha-wire-up-accelerated-32r2-code-fro.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Ard Biesheuvel <ardb@kernel.org>
3 Date: Fri, 8 Nov 2019 13:22:17 +0100
4 Subject: [PATCH] crypto: mips/chacha - wire up accelerated 32r2 code from Zinc
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 commit 3a2f58f3ba4f6f44e33d1a48240d5eadb882cb59 upstream.
10
11 This integrates the accelerated MIPS 32r2 implementation of ChaCha
12 into both the API and library interfaces of the kernel crypto stack.
13
14 The significance of this is that, in addition to becoming available
15 as an accelerated library implementation, it can also be used by
16 existing crypto API code such as Adiantum (for block encryption on
17 ultra low performance cores) or IPsec using chacha20poly1305. These
18 are use cases that have already opted into using the abstract crypto
19 API. In order to support Adiantum, the core assembler routine has
20 been adapted to take the round count as a function argument rather
21 than hardcoding it to 20.
22
23 Co-developed-by: René van Dorst <opensource@vdorst.com>
24 Signed-off-by: René van Dorst <opensource@vdorst.com>
25 Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
26 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
27 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
28 ---
29 arch/mips/Makefile | 2 +-
30 arch/mips/crypto/Makefile | 4 +
31 arch/mips/crypto/chacha-core.S | 159 ++++++++++++++++++++++++---------
32 arch/mips/crypto/chacha-glue.c | 150 +++++++++++++++++++++++++++++++
33 crypto/Kconfig | 6 ++
34 5 files changed, 277 insertions(+), 44 deletions(-)
35 create mode 100644 arch/mips/crypto/chacha-glue.c
36
37 --- a/arch/mips/Makefile
38 +++ b/arch/mips/Makefile
39 @@ -334,7 +334,7 @@ libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/m
40 # See arch/mips/Kbuild for content of core part of the kernel
41 core-y += arch/mips/
42
43 -drivers-$(CONFIG_MIPS_CRC_SUPPORT) += arch/mips/crypto/
44 +drivers-y += arch/mips/crypto/
45 drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/
46
47 # suspend and hibernation support
48 --- a/arch/mips/crypto/Makefile
49 +++ b/arch/mips/crypto/Makefile
50 @@ -4,3 +4,7 @@
51 #
52
53 obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32-mips.o
54 +
55 +obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o
56 +chacha-mips-y := chacha-core.o chacha-glue.o
57 +AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
58 --- a/arch/mips/crypto/chacha-core.S
59 +++ b/arch/mips/crypto/chacha-core.S
60 @@ -125,7 +125,7 @@
61 #define CONCAT3(a,b,c) _CONCAT3(a,b,c)
62
63 #define STORE_UNALIGNED(x) \
64 -CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
65 +CONCAT3(.Lchacha_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
66 .if (x != 12); \
67 lw T0, (x*4)(STATE); \
68 .endif; \
69 @@ -142,7 +142,7 @@ CONCAT3(.Lchacha20_mips_xor_unaligned_,
70 swr X ## x, (x*4)+LSB ## (OUT);
71
72 #define STORE_ALIGNED(x) \
73 -CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
74 +CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
75 .if (x != 12); \
76 lw T0, (x*4)(STATE); \
77 .endif; \
78 @@ -162,9 +162,9 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL
79 * Every jumptable entry must be equal in size.
80 */
81 #define JMPTBL_ALIGNED(x) \
82 -.Lchacha20_mips_jmptbl_aligned_ ## x: ; \
83 +.Lchacha_mips_jmptbl_aligned_ ## x: ; \
84 .set noreorder; \
85 - b .Lchacha20_mips_xor_aligned_ ## x ## _b; \
86 + b .Lchacha_mips_xor_aligned_ ## x ## _b; \
87 .if (x == 12); \
88 addu SAVED_X, X ## x, NONCE_0; \
89 .else; \
90 @@ -173,9 +173,9 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL
91 .set reorder
92
93 #define JMPTBL_UNALIGNED(x) \
94 -.Lchacha20_mips_jmptbl_unaligned_ ## x: ; \
95 +.Lchacha_mips_jmptbl_unaligned_ ## x: ; \
96 .set noreorder; \
97 - b .Lchacha20_mips_xor_unaligned_ ## x ## _b; \
98 + b .Lchacha_mips_xor_unaligned_ ## x ## _b; \
99 .if (x == 12); \
100 addu SAVED_X, X ## x, NONCE_0; \
101 .else; \
102 @@ -200,15 +200,18 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL
103 .text
104 .set reorder
105 .set noat
106 -.globl chacha20_mips
107 -.ent chacha20_mips
108 -chacha20_mips:
109 +.globl chacha_crypt_arch
110 +.ent chacha_crypt_arch
111 +chacha_crypt_arch:
112 .frame $sp, STACK_SIZE, $ra
113
114 + /* Load number of rounds */
115 + lw $at, 16($sp)
116 +
117 addiu $sp, -STACK_SIZE
118
119 /* Return bytes = 0. */
120 - beqz BYTES, .Lchacha20_mips_end
121 + beqz BYTES, .Lchacha_mips_end
122
123 lw NONCE_0, 48(STATE)
124
125 @@ -228,18 +231,15 @@ chacha20_mips:
126 or IS_UNALIGNED, IN, OUT
127 andi IS_UNALIGNED, 0x3
128
129 - /* Set number of rounds */
130 - li $at, 20
131 -
132 - b .Lchacha20_rounds_start
133 + b .Lchacha_rounds_start
134
135 .align 4
136 -.Loop_chacha20_rounds:
137 +.Loop_chacha_rounds:
138 addiu IN, CHACHA20_BLOCK_SIZE
139 addiu OUT, CHACHA20_BLOCK_SIZE
140 addiu NONCE_0, 1
141
142 -.Lchacha20_rounds_start:
143 +.Lchacha_rounds_start:
144 lw X0, 0(STATE)
145 lw X1, 4(STATE)
146 lw X2, 8(STATE)
147 @@ -259,7 +259,7 @@ chacha20_mips:
148 lw X14, 56(STATE)
149 lw X15, 60(STATE)
150
151 -.Loop_chacha20_xor_rounds:
152 +.Loop_chacha_xor_rounds:
153 addiu $at, -2
154 AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
155 AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
156 @@ -269,31 +269,31 @@ chacha20_mips:
157 AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
158 AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
159 AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
160 - bnez $at, .Loop_chacha20_xor_rounds
161 + bnez $at, .Loop_chacha_xor_rounds
162
163 addiu BYTES, -(CHACHA20_BLOCK_SIZE)
164
165 /* Is data src/dst unaligned? Jump */
166 - bnez IS_UNALIGNED, .Loop_chacha20_unaligned
167 + bnez IS_UNALIGNED, .Loop_chacha_unaligned
168
169 /* Set number rounds here to fill delayslot. */
170 - li $at, 20
171 + lw $at, (STACK_SIZE+16)($sp)
172
173 /* BYTES < 0, it has no full block. */
174 - bltz BYTES, .Lchacha20_mips_no_full_block_aligned
175 + bltz BYTES, .Lchacha_mips_no_full_block_aligned
176
177 FOR_EACH_WORD_REV(STORE_ALIGNED)
178
179 /* BYTES > 0? Loop again. */
180 - bgtz BYTES, .Loop_chacha20_rounds
181 + bgtz BYTES, .Loop_chacha_rounds
182
183 /* Place this here to fill delay slot */
184 addiu NONCE_0, 1
185
186 /* BYTES < 0? Handle last bytes */
187 - bltz BYTES, .Lchacha20_mips_xor_bytes
188 + bltz BYTES, .Lchacha_mips_xor_bytes
189
190 -.Lchacha20_mips_xor_done:
191 +.Lchacha_mips_xor_done:
192 /* Restore used registers */
193 lw $s0, 0($sp)
194 lw $s1, 4($sp)
195 @@ -307,11 +307,11 @@ chacha20_mips:
196 /* Write NONCE_0 back to right location in state */
197 sw NONCE_0, 48(STATE)
198
199 -.Lchacha20_mips_end:
200 +.Lchacha_mips_end:
201 addiu $sp, STACK_SIZE
202 jr $ra
203
204 -.Lchacha20_mips_no_full_block_aligned:
205 +.Lchacha_mips_no_full_block_aligned:
206 /* Restore the offset on BYTES */
207 addiu BYTES, CHACHA20_BLOCK_SIZE
208
209 @@ -319,7 +319,7 @@ chacha20_mips:
210 andi $at, BYTES, MASK_U32
211
212 /* Load upper half of jump table addr */
213 - lui T0, %hi(.Lchacha20_mips_jmptbl_aligned_0)
214 + lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0)
215
216 /* Calculate lower half jump table offset */
217 ins T0, $at, 1, 6
218 @@ -328,7 +328,7 @@ chacha20_mips:
219 addu T1, STATE, $at
220
221 /* Add lower half jump table addr */
222 - addiu T0, %lo(.Lchacha20_mips_jmptbl_aligned_0)
223 + addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0)
224
225 /* Read value from STATE */
226 lw SAVED_CA, 0(T1)
227 @@ -342,31 +342,31 @@ chacha20_mips:
228 FOR_EACH_WORD(JMPTBL_ALIGNED)
229
230
231 -.Loop_chacha20_unaligned:
232 +.Loop_chacha_unaligned:
233 /* Set number rounds here to fill delayslot. */
234 - li $at, 20
235 + lw $at, (STACK_SIZE+16)($sp)
236
237 /* BYTES > 0, it has no full block. */
238 - bltz BYTES, .Lchacha20_mips_no_full_block_unaligned
239 + bltz BYTES, .Lchacha_mips_no_full_block_unaligned
240
241 FOR_EACH_WORD_REV(STORE_UNALIGNED)
242
243 /* BYTES > 0? Loop again. */
244 - bgtz BYTES, .Loop_chacha20_rounds
245 + bgtz BYTES, .Loop_chacha_rounds
246
247 /* Write NONCE_0 back to right location in state */
248 sw NONCE_0, 48(STATE)
249
250 .set noreorder
251 /* Fall through to byte handling */
252 - bgez BYTES, .Lchacha20_mips_xor_done
253 -.Lchacha20_mips_xor_unaligned_0_b:
254 -.Lchacha20_mips_xor_aligned_0_b:
255 + bgez BYTES, .Lchacha_mips_xor_done
256 +.Lchacha_mips_xor_unaligned_0_b:
257 +.Lchacha_mips_xor_aligned_0_b:
258 /* Place this here to fill delay slot */
259 addiu NONCE_0, 1
260 .set reorder
261
262 -.Lchacha20_mips_xor_bytes:
263 +.Lchacha_mips_xor_bytes:
264 addu IN, $at
265 addu OUT, $at
266 /* First byte */
267 @@ -376,22 +376,22 @@ chacha20_mips:
268 ROTR(SAVED_X)
269 xor T1, SAVED_X
270 sb T1, 0(OUT)
271 - beqz $at, .Lchacha20_mips_xor_done
272 + beqz $at, .Lchacha_mips_xor_done
273 /* Second byte */
274 lbu T1, 1(IN)
275 addiu $at, BYTES, 2
276 ROTx SAVED_X, 8
277 xor T1, SAVED_X
278 sb T1, 1(OUT)
279 - beqz $at, .Lchacha20_mips_xor_done
280 + beqz $at, .Lchacha_mips_xor_done
281 /* Third byte */
282 lbu T1, 2(IN)
283 ROTx SAVED_X, 8
284 xor T1, SAVED_X
285 sb T1, 2(OUT)
286 - b .Lchacha20_mips_xor_done
287 + b .Lchacha_mips_xor_done
288
289 -.Lchacha20_mips_no_full_block_unaligned:
290 +.Lchacha_mips_no_full_block_unaligned:
291 /* Restore the offset on BYTES */
292 addiu BYTES, CHACHA20_BLOCK_SIZE
293
294 @@ -399,7 +399,7 @@ chacha20_mips:
295 andi $at, BYTES, MASK_U32
296
297 /* Load upper half of jump table addr */
298 - lui T0, %hi(.Lchacha20_mips_jmptbl_unaligned_0)
299 + lui T0, %hi(.Lchacha_mips_jmptbl_unaligned_0)
300
301 /* Calculate lower half jump table offset */
302 ins T0, $at, 1, 6
303 @@ -408,7 +408,7 @@ chacha20_mips:
304 addu T1, STATE, $at
305
306 /* Add lower half jump table addr */
307 - addiu T0, %lo(.Lchacha20_mips_jmptbl_unaligned_0)
308 + addiu T0, %lo(.Lchacha_mips_jmptbl_unaligned_0)
309
310 /* Read value from STATE */
311 lw SAVED_CA, 0(T1)
312 @@ -420,5 +420,78 @@ chacha20_mips:
313
314 /* Jump table */
315 FOR_EACH_WORD(JMPTBL_UNALIGNED)
316 -.end chacha20_mips
317 +.end chacha_crypt_arch
318 +.set at
319 +
320 +/* Input arguments
321 + * STATE $a0
322 + * OUT $a1
323 + * NROUND $a2
324 + */
325 +
326 +#undef X12
327 +#undef X13
328 +#undef X14
329 +#undef X15
330 +
331 +#define X12 $a3
332 +#define X13 $at
333 +#define X14 $v0
334 +#define X15 STATE
335 +
336 +.set noat
337 +.globl hchacha_block_arch
338 +.ent hchacha_block_arch
339 +hchacha_block_arch:
340 + .frame $sp, STACK_SIZE, $ra
341 +
342 + addiu $sp, -STACK_SIZE
343 +
344 + /* Save X11(s6) */
345 + sw X11, 0($sp)
346 +
347 + lw X0, 0(STATE)
348 + lw X1, 4(STATE)
349 + lw X2, 8(STATE)
350 + lw X3, 12(STATE)
351 + lw X4, 16(STATE)
352 + lw X5, 20(STATE)
353 + lw X6, 24(STATE)
354 + lw X7, 28(STATE)
355 + lw X8, 32(STATE)
356 + lw X9, 36(STATE)
357 + lw X10, 40(STATE)
358 + lw X11, 44(STATE)
359 + lw X12, 48(STATE)
360 + lw X13, 52(STATE)
361 + lw X14, 56(STATE)
362 + lw X15, 60(STATE)
363 +
364 +.Loop_hchacha_xor_rounds:
365 + addiu $a2, -2
366 + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
367 + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
368 + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
369 + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
370 + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
371 + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
372 + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
373 + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
374 + bnez $a2, .Loop_hchacha_xor_rounds
375 +
376 + /* Restore used register */
377 + lw X11, 0($sp)
378 +
379 + sw X0, 0(OUT)
380 + sw X1, 4(OUT)
381 + sw X2, 8(OUT)
382 + sw X3, 12(OUT)
383 + sw X12, 16(OUT)
384 + sw X13, 20(OUT)
385 + sw X14, 24(OUT)
386 + sw X15, 28(OUT)
387 +
388 + addiu $sp, STACK_SIZE
389 + jr $ra
390 +.end hchacha_block_arch
391 .set at
392 --- /dev/null
393 +++ b/arch/mips/crypto/chacha-glue.c
394 @@ -0,0 +1,150 @@
395 +// SPDX-License-Identifier: GPL-2.0
396 +/*
397 + * MIPS accelerated ChaCha and XChaCha stream ciphers,
398 + * including ChaCha20 (RFC7539)
399 + *
400 + * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org>
401 + */
402 +
403 +#include <asm/byteorder.h>
404 +#include <crypto/algapi.h>
405 +#include <crypto/internal/chacha.h>
406 +#include <crypto/internal/skcipher.h>
407 +#include <linux/kernel.h>
408 +#include <linux/module.h>
409 +
410 +asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
411 + unsigned int bytes, int nrounds);
412 +EXPORT_SYMBOL(chacha_crypt_arch);
413 +
414 +asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds);
415 +EXPORT_SYMBOL(hchacha_block_arch);
416 +
417 +void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
418 +{
419 + chacha_init_generic(state, key, iv);
420 +}
421 +EXPORT_SYMBOL(chacha_init_arch);
422 +
423 +static int chacha_mips_stream_xor(struct skcipher_request *req,
424 + const struct chacha_ctx *ctx, const u8 *iv)
425 +{
426 + struct skcipher_walk walk;
427 + u32 state[16];
428 + int err;
429 +
430 + err = skcipher_walk_virt(&walk, req, false);
431 +
432 + chacha_init_generic(state, ctx->key, iv);
433 +
434 + while (walk.nbytes > 0) {
435 + unsigned int nbytes = walk.nbytes;
436 +
437 + if (nbytes < walk.total)
438 + nbytes = round_down(nbytes, walk.stride);
439 +
440 + chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr,
441 + nbytes, ctx->nrounds);
442 + err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
443 + }
444 +
445 + return err;
446 +}
447 +
448 +static int chacha_mips(struct skcipher_request *req)
449 +{
450 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
451 + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
452 +
453 + return chacha_mips_stream_xor(req, ctx, req->iv);
454 +}
455 +
456 +static int xchacha_mips(struct skcipher_request *req)
457 +{
458 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
459 + struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
460 + struct chacha_ctx subctx;
461 + u32 state[16];
462 + u8 real_iv[16];
463 +
464 + chacha_init_generic(state, ctx->key, req->iv);
465 +
466 + hchacha_block(state, subctx.key, ctx->nrounds);
467 + subctx.nrounds = ctx->nrounds;
468 +
469 + memcpy(&real_iv[0], req->iv + 24, 8);
470 + memcpy(&real_iv[8], req->iv + 16, 8);
471 + return chacha_mips_stream_xor(req, &subctx, real_iv);
472 +}
473 +
474 +static struct skcipher_alg algs[] = {
475 + {
476 + .base.cra_name = "chacha20",
477 + .base.cra_driver_name = "chacha20-mips",
478 + .base.cra_priority = 200,
479 + .base.cra_blocksize = 1,
480 + .base.cra_ctxsize = sizeof(struct chacha_ctx),
481 + .base.cra_module = THIS_MODULE,
482 +
483 + .min_keysize = CHACHA_KEY_SIZE,
484 + .max_keysize = CHACHA_KEY_SIZE,
485 + .ivsize = CHACHA_IV_SIZE,
486 + .chunksize = CHACHA_BLOCK_SIZE,
487 + .setkey = chacha20_setkey,
488 + .encrypt = chacha_mips,
489 + .decrypt = chacha_mips,
490 + }, {
491 + .base.cra_name = "xchacha20",
492 + .base.cra_driver_name = "xchacha20-mips",
493 + .base.cra_priority = 200,
494 + .base.cra_blocksize = 1,
495 + .base.cra_ctxsize = sizeof(struct chacha_ctx),
496 + .base.cra_module = THIS_MODULE,
497 +
498 + .min_keysize = CHACHA_KEY_SIZE,
499 + .max_keysize = CHACHA_KEY_SIZE,
500 + .ivsize = XCHACHA_IV_SIZE,
501 + .chunksize = CHACHA_BLOCK_SIZE,
502 + .setkey = chacha20_setkey,
503 + .encrypt = xchacha_mips,
504 + .decrypt = xchacha_mips,
505 + }, {
506 + .base.cra_name = "xchacha12",
507 + .base.cra_driver_name = "xchacha12-mips",
508 + .base.cra_priority = 200,
509 + .base.cra_blocksize = 1,
510 + .base.cra_ctxsize = sizeof(struct chacha_ctx),
511 + .base.cra_module = THIS_MODULE,
512 +
513 + .min_keysize = CHACHA_KEY_SIZE,
514 + .max_keysize = CHACHA_KEY_SIZE,
515 + .ivsize = XCHACHA_IV_SIZE,
516 + .chunksize = CHACHA_BLOCK_SIZE,
517 + .setkey = chacha12_setkey,
518 + .encrypt = xchacha_mips,
519 + .decrypt = xchacha_mips,
520 + }
521 +};
522 +
523 +static int __init chacha_simd_mod_init(void)
524 +{
525 + return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
526 +}
527 +
528 +static void __exit chacha_simd_mod_fini(void)
529 +{
530 + crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
531 +}
532 +
533 +module_init(chacha_simd_mod_init);
534 +module_exit(chacha_simd_mod_fini);
535 +
536 +MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)");
537 +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
538 +MODULE_LICENSE("GPL v2");
539 +MODULE_ALIAS_CRYPTO("chacha20");
540 +MODULE_ALIAS_CRYPTO("chacha20-mips");
541 +MODULE_ALIAS_CRYPTO("xchacha20");
542 +MODULE_ALIAS_CRYPTO("xchacha20-mips");
543 +MODULE_ALIAS_CRYPTO("xchacha12");
544 +MODULE_ALIAS_CRYPTO("xchacha12-mips");
545 --- a/crypto/Kconfig
546 +++ b/crypto/Kconfig
547 @@ -1423,6 +1423,12 @@ config CRYPTO_CHACHA20_X86_64
548 SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20,
549 XChaCha20, and XChaCha12 stream ciphers.
550
551 +config CRYPTO_CHACHA_MIPS
552 + tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)"
553 + depends on CPU_MIPS32_R2
554 + select CRYPTO_BLKCIPHER
555 + select CRYPTO_ARCH_HAVE_LIB_CHACHA
556 +
557 config CRYPTO_SEED
558 tristate "SEED cipher algorithm"
559 select CRYPTO_ALGAPI