kernel: 5.4: import wireguard backport
[openwrt/openwrt.git] / target / linux / generic / backport-5.4 / 080-wireguard-0010-crypto-mips-chacha-import-32r2-ChaCha-code-from-Zinc.patch
1 From f9b4c68865fdb7f3327f7d82fbc82c76c8773d53 Mon Sep 17 00:00:00 2001
2 From: "Jason A. Donenfeld" <Jason@zx2c4.com>
3 Date: Fri, 8 Nov 2019 13:22:16 +0100
4 Subject: [PATCH 010/124] crypto: mips/chacha - import 32r2 ChaCha code from
5 Zinc
6 MIME-Version: 1.0
7 Content-Type: text/plain; charset=UTF-8
8 Content-Transfer-Encoding: 8bit
9
10 commit 49aa7c00eddf8d8f462b0256bd82e81762d7b0c6 upstream.
11
12 This imports the accelerated MIPS 32r2 ChaCha20 implementation from the
13 Zinc patch set.
14
15 Co-developed-by: René van Dorst <opensource@vdorst.com>
16 Signed-off-by: René van Dorst <opensource@vdorst.com>
17 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
18 Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
19 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
20 Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
21 ---
22 arch/mips/crypto/chacha-core.S | 424 +++++++++++++++++++++++++++++++++
23 1 file changed, 424 insertions(+)
24 create mode 100644 arch/mips/crypto/chacha-core.S
25
26 --- /dev/null
27 +++ b/arch/mips/crypto/chacha-core.S
28 @@ -0,0 +1,424 @@
29 +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
30 +/*
31 + * Copyright (C) 2016-2018 René van Dorst <opensource@vdorst.com>. All Rights Reserved.
32 + * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
33 + */
34 +
35 +#define MASK_U32 0x3c
36 +#define CHACHA20_BLOCK_SIZE 64
37 +#define STACK_SIZE 32
38 +
39 +#define X0 $t0
40 +#define X1 $t1
41 +#define X2 $t2
42 +#define X3 $t3
43 +#define X4 $t4
44 +#define X5 $t5
45 +#define X6 $t6
46 +#define X7 $t7
47 +#define X8 $t8
48 +#define X9 $t9
49 +#define X10 $v1
50 +#define X11 $s6
51 +#define X12 $s5
52 +#define X13 $s4
53 +#define X14 $s3
54 +#define X15 $s2
55 +/* Use regs which are overwritten on exit for Tx so we don't leak clear data. */
56 +#define T0 $s1
57 +#define T1 $s0
58 +#define T(n) T ## n
59 +#define X(n) X ## n
60 +
61 +/* Input arguments */
62 +#define STATE $a0
63 +#define OUT $a1
64 +#define IN $a2
65 +#define BYTES $a3
66 +
67 +/* Output argument */
68 +/* NONCE[0] is kept in a register and not in memory.
69 + * We don't want to touch original value in memory.
70 + * Must be incremented every loop iteration.
71 + */
72 +#define NONCE_0 $v0
73 +
74 +/* SAVED_X and SAVED_CA are set in the jump table.
75 + * Use regs which are overwritten on exit else we don't leak clear data.
76 + * They are used to handling the last bytes which are not multiple of 4.
77 + */
78 +#define SAVED_X X15
79 +#define SAVED_CA $s7
80 +
81 +#define IS_UNALIGNED $s7
82 +
83 +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
84 +#define MSB 0
85 +#define LSB 3
86 +#define ROTx rotl
87 +#define ROTR(n) rotr n, 24
88 +#define CPU_TO_LE32(n) \
89 + wsbh n; \
90 + rotr n, 16;
91 +#else
92 +#define MSB 3
93 +#define LSB 0
94 +#define ROTx rotr
95 +#define CPU_TO_LE32(n)
96 +#define ROTR(n)
97 +#endif
98 +
99 +#define FOR_EACH_WORD(x) \
100 + x( 0); \
101 + x( 1); \
102 + x( 2); \
103 + x( 3); \
104 + x( 4); \
105 + x( 5); \
106 + x( 6); \
107 + x( 7); \
108 + x( 8); \
109 + x( 9); \
110 + x(10); \
111 + x(11); \
112 + x(12); \
113 + x(13); \
114 + x(14); \
115 + x(15);
116 +
117 +#define FOR_EACH_WORD_REV(x) \
118 + x(15); \
119 + x(14); \
120 + x(13); \
121 + x(12); \
122 + x(11); \
123 + x(10); \
124 + x( 9); \
125 + x( 8); \
126 + x( 7); \
127 + x( 6); \
128 + x( 5); \
129 + x( 4); \
130 + x( 3); \
131 + x( 2); \
132 + x( 1); \
133 + x( 0);
134 +
135 +#define PLUS_ONE_0 1
136 +#define PLUS_ONE_1 2
137 +#define PLUS_ONE_2 3
138 +#define PLUS_ONE_3 4
139 +#define PLUS_ONE_4 5
140 +#define PLUS_ONE_5 6
141 +#define PLUS_ONE_6 7
142 +#define PLUS_ONE_7 8
143 +#define PLUS_ONE_8 9
144 +#define PLUS_ONE_9 10
145 +#define PLUS_ONE_10 11
146 +#define PLUS_ONE_11 12
147 +#define PLUS_ONE_12 13
148 +#define PLUS_ONE_13 14
149 +#define PLUS_ONE_14 15
150 +#define PLUS_ONE_15 16
151 +#define PLUS_ONE(x) PLUS_ONE_ ## x
152 +#define _CONCAT3(a,b,c) a ## b ## c
153 +#define CONCAT3(a,b,c) _CONCAT3(a,b,c)
154 +
155 +#define STORE_UNALIGNED(x) \
156 +CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \
157 + .if (x != 12); \
158 + lw T0, (x*4)(STATE); \
159 + .endif; \
160 + lwl T1, (x*4)+MSB ## (IN); \
161 + lwr T1, (x*4)+LSB ## (IN); \
162 + .if (x == 12); \
163 + addu X ## x, NONCE_0; \
164 + .else; \
165 + addu X ## x, T0; \
166 + .endif; \
167 + CPU_TO_LE32(X ## x); \
168 + xor X ## x, T1; \
169 + swl X ## x, (x*4)+MSB ## (OUT); \
170 + swr X ## x, (x*4)+LSB ## (OUT);
171 +
172 +#define STORE_ALIGNED(x) \
173 +CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \
174 + .if (x != 12); \
175 + lw T0, (x*4)(STATE); \
176 + .endif; \
177 + lw T1, (x*4) ## (IN); \
178 + .if (x == 12); \
179 + addu X ## x, NONCE_0; \
180 + .else; \
181 + addu X ## x, T0; \
182 + .endif; \
183 + CPU_TO_LE32(X ## x); \
184 + xor X ## x, T1; \
185 + sw X ## x, (x*4) ## (OUT);
186 +
187 +/* Jump table macro.
188 + * Used for setup and handling the last bytes, which are not multiple of 4.
189 + * X15 is free to store Xn
190 + * Every jumptable entry must be equal in size.
191 + */
192 +#define JMPTBL_ALIGNED(x) \
193 +.Lchacha20_mips_jmptbl_aligned_ ## x: ; \
194 + .set noreorder; \
195 + b .Lchacha20_mips_xor_aligned_ ## x ## _b; \
196 + .if (x == 12); \
197 + addu SAVED_X, X ## x, NONCE_0; \
198 + .else; \
199 + addu SAVED_X, X ## x, SAVED_CA; \
200 + .endif; \
201 + .set reorder
202 +
203 +#define JMPTBL_UNALIGNED(x) \
204 +.Lchacha20_mips_jmptbl_unaligned_ ## x: ; \
205 + .set noreorder; \
206 + b .Lchacha20_mips_xor_unaligned_ ## x ## _b; \
207 + .if (x == 12); \
208 + addu SAVED_X, X ## x, NONCE_0; \
209 + .else; \
210 + addu SAVED_X, X ## x, SAVED_CA; \
211 + .endif; \
212 + .set reorder
213 +
214 +#define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \
215 + addu X(A), X(K); \
216 + addu X(B), X(L); \
217 + addu X(C), X(M); \
218 + addu X(D), X(N); \
219 + xor X(V), X(A); \
220 + xor X(W), X(B); \
221 + xor X(Y), X(C); \
222 + xor X(Z), X(D); \
223 + rotl X(V), S; \
224 + rotl X(W), S; \
225 + rotl X(Y), S; \
226 + rotl X(Z), S;
227 +
228 +.text
229 +.set reorder
230 +.set noat
231 +.globl chacha20_mips
232 +.ent chacha20_mips
233 +chacha20_mips:
234 + .frame $sp, STACK_SIZE, $ra
235 +
236 + addiu $sp, -STACK_SIZE
237 +
238 + /* Return bytes = 0. */
239 + beqz BYTES, .Lchacha20_mips_end
240 +
241 + lw NONCE_0, 48(STATE)
242 +
243 + /* Save s0-s7 */
244 + sw $s0, 0($sp)
245 + sw $s1, 4($sp)
246 + sw $s2, 8($sp)
247 + sw $s3, 12($sp)
248 + sw $s4, 16($sp)
249 + sw $s5, 20($sp)
250 + sw $s6, 24($sp)
251 + sw $s7, 28($sp)
252 +
253 + /* Test IN or OUT is unaligned.
254 + * IS_UNALIGNED = ( IN | OUT ) & 0x00000003
255 + */
256 + or IS_UNALIGNED, IN, OUT
257 + andi IS_UNALIGNED, 0x3
258 +
259 + /* Set number of rounds */
260 + li $at, 20
261 +
262 + b .Lchacha20_rounds_start
263 +
264 +.align 4
265 +.Loop_chacha20_rounds:
266 + addiu IN, CHACHA20_BLOCK_SIZE
267 + addiu OUT, CHACHA20_BLOCK_SIZE
268 + addiu NONCE_0, 1
269 +
270 +.Lchacha20_rounds_start:
271 + lw X0, 0(STATE)
272 + lw X1, 4(STATE)
273 + lw X2, 8(STATE)
274 + lw X3, 12(STATE)
275 +
276 + lw X4, 16(STATE)
277 + lw X5, 20(STATE)
278 + lw X6, 24(STATE)
279 + lw X7, 28(STATE)
280 + lw X8, 32(STATE)
281 + lw X9, 36(STATE)
282 + lw X10, 40(STATE)
283 + lw X11, 44(STATE)
284 +
285 + move X12, NONCE_0
286 + lw X13, 52(STATE)
287 + lw X14, 56(STATE)
288 + lw X15, 60(STATE)
289 +
290 +.Loop_chacha20_xor_rounds:
291 + addiu $at, -2
292 + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16);
293 + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12);
294 + AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8);
295 + AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7);
296 + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16);
297 + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12);
298 + AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8);
299 + AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7);
300 + bnez $at, .Loop_chacha20_xor_rounds
301 +
302 + addiu BYTES, -(CHACHA20_BLOCK_SIZE)
303 +
304 + /* Is data src/dst unaligned? Jump */
305 + bnez IS_UNALIGNED, .Loop_chacha20_unaligned
306 +
307 + /* Set number rounds here to fill delayslot. */
308 + li $at, 20
309 +
310 + /* BYTES < 0, it has no full block. */
311 + bltz BYTES, .Lchacha20_mips_no_full_block_aligned
312 +
313 + FOR_EACH_WORD_REV(STORE_ALIGNED)
314 +
315 + /* BYTES > 0? Loop again. */
316 + bgtz BYTES, .Loop_chacha20_rounds
317 +
318 + /* Place this here to fill delay slot */
319 + addiu NONCE_0, 1
320 +
321 + /* BYTES < 0? Handle last bytes */
322 + bltz BYTES, .Lchacha20_mips_xor_bytes
323 +
324 +.Lchacha20_mips_xor_done:
325 + /* Restore used registers */
326 + lw $s0, 0($sp)
327 + lw $s1, 4($sp)
328 + lw $s2, 8($sp)
329 + lw $s3, 12($sp)
330 + lw $s4, 16($sp)
331 + lw $s5, 20($sp)
332 + lw $s6, 24($sp)
333 + lw $s7, 28($sp)
334 +
335 + /* Write NONCE_0 back to right location in state */
336 + sw NONCE_0, 48(STATE)
337 +
338 +.Lchacha20_mips_end:
339 + addiu $sp, STACK_SIZE
340 + jr $ra
341 +
342 +.Lchacha20_mips_no_full_block_aligned:
343 + /* Restore the offset on BYTES */
344 + addiu BYTES, CHACHA20_BLOCK_SIZE
345 +
346 + /* Get number of full WORDS */
347 + andi $at, BYTES, MASK_U32
348 +
349 + /* Load upper half of jump table addr */
350 + lui T0, %hi(.Lchacha20_mips_jmptbl_aligned_0)
351 +
352 + /* Calculate lower half jump table offset */
353 + ins T0, $at, 1, 6
354 +
355 + /* Add offset to STATE */
356 + addu T1, STATE, $at
357 +
358 + /* Add lower half jump table addr */
359 + addiu T0, %lo(.Lchacha20_mips_jmptbl_aligned_0)
360 +
361 + /* Read value from STATE */
362 + lw SAVED_CA, 0(T1)
363 +
364 + /* Store remaining bytecounter as negative value */
365 + subu BYTES, $at, BYTES
366 +
367 + jr T0
368 +
369 + /* Jump table */
370 + FOR_EACH_WORD(JMPTBL_ALIGNED)
371 +
372 +
373 +.Loop_chacha20_unaligned:
374 + /* Set number rounds here to fill delayslot. */
375 + li $at, 20
376 +
377 + /* BYTES > 0, it has no full block. */
378 + bltz BYTES, .Lchacha20_mips_no_full_block_unaligned
379 +
380 + FOR_EACH_WORD_REV(STORE_UNALIGNED)
381 +
382 + /* BYTES > 0? Loop again. */
383 + bgtz BYTES, .Loop_chacha20_rounds
384 +
385 + /* Write NONCE_0 back to right location in state */
386 + sw NONCE_0, 48(STATE)
387 +
388 + .set noreorder
389 + /* Fall through to byte handling */
390 + bgez BYTES, .Lchacha20_mips_xor_done
391 +.Lchacha20_mips_xor_unaligned_0_b:
392 +.Lchacha20_mips_xor_aligned_0_b:
393 + /* Place this here to fill delay slot */
394 + addiu NONCE_0, 1
395 + .set reorder
396 +
397 +.Lchacha20_mips_xor_bytes:
398 + addu IN, $at
399 + addu OUT, $at
400 + /* First byte */
401 + lbu T1, 0(IN)
402 + addiu $at, BYTES, 1
403 + CPU_TO_LE32(SAVED_X)
404 + ROTR(SAVED_X)
405 + xor T1, SAVED_X
406 + sb T1, 0(OUT)
407 + beqz $at, .Lchacha20_mips_xor_done
408 + /* Second byte */
409 + lbu T1, 1(IN)
410 + addiu $at, BYTES, 2
411 + ROTx SAVED_X, 8
412 + xor T1, SAVED_X
413 + sb T1, 1(OUT)
414 + beqz $at, .Lchacha20_mips_xor_done
415 + /* Third byte */
416 + lbu T1, 2(IN)
417 + ROTx SAVED_X, 8
418 + xor T1, SAVED_X
419 + sb T1, 2(OUT)
420 + b .Lchacha20_mips_xor_done
421 +
422 +.Lchacha20_mips_no_full_block_unaligned:
423 + /* Restore the offset on BYTES */
424 + addiu BYTES, CHACHA20_BLOCK_SIZE
425 +
426 + /* Get number of full WORDS */
427 + andi $at, BYTES, MASK_U32
428 +
429 + /* Load upper half of jump table addr */
430 + lui T0, %hi(.Lchacha20_mips_jmptbl_unaligned_0)
431 +
432 + /* Calculate lower half jump table offset */
433 + ins T0, $at, 1, 6
434 +
435 + /* Add offset to STATE */
436 + addu T1, STATE, $at
437 +
438 + /* Add lower half jump table addr */
439 + addiu T0, %lo(.Lchacha20_mips_jmptbl_unaligned_0)
440 +
441 + /* Read value from STATE */
442 + lw SAVED_CA, 0(T1)
443 +
444 + /* Store remaining bytecounter as negative value */
445 + subu BYTES, $at, BYTES
446 +
447 + jr T0
448 +
449 + /* Jump table */
450 + FOR_EACH_WORD(JMPTBL_UNALIGNED)
451 +.end chacha20_mips
452 +.set at