fix typo in 910-cryptodev_backport.patch (#3933)
[openwrt/openwrt.git] / target / linux / generic-2.6 / patches-2.6.26 / 910-cryptodev_backport.patch
1 --- a/crypto/Kconfig
2 +++ b/crypto/Kconfig
3 @@ -65,6 +65,7 @@ config CRYPTO_NULL
4 config CRYPTO_CRYPTD
5 tristate "Software async crypto daemon"
6 select CRYPTO_BLKCIPHER
7 + select CRYPTO_HASH
8 select CRYPTO_MANAGER
9 help
10 This is a generic software asynchronous crypto daemon that
11 @@ -212,7 +213,7 @@ comment "Digest"
12
13 config CRYPTO_CRC32C
14 tristate "CRC32c CRC algorithm"
15 - select CRYPTO_ALGAPI
16 + select CRYPTO_HASH
17 select LIBCRC32C
18 help
19 Castagnoli, et al Cyclic Redundancy-Check Algorithm. Used
20 @@ -241,6 +242,57 @@ config CRYPTO_MICHAEL_MIC
21 should not be used for other purposes because of the weakness
22 of the algorithm.
23
24 +config CRYPTO_RMD128
25 + tristate "RIPEMD-128 digest algorithm"
26 + select CRYPTO_ALGAPI
27 + help
28 + RIPEMD-128 (ISO/IEC 10118-3:2004).
29 +
30 + RIPEMD-128 is a 128-bit cryptographic hash function. It should only
31 + to be used as a secure replacement for RIPEMD. For other use cases
32 + RIPEMD-160 should be used.
33 +
34 + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
35 + See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
36 +
37 +config CRYPTO_RMD160
38 + tristate "RIPEMD-160 digest algorithm"
39 + select CRYPTO_ALGAPI
40 + help
41 + RIPEMD-160 (ISO/IEC 10118-3:2004).
42 +
43 + RIPEMD-160 is a 160-bit cryptographic hash function. It is intended
44 + to be used as a secure replacement for the 128-bit hash functions
45 + MD4, MD5 and it's predecessor RIPEMD (not to be confused with RIPEMD-128).
46 +
47 + It's speed is comparable to SHA1 and there are no known attacks against
48 + RIPEMD-160.
49 +
50 + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
51 + See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
52 +
53 +config CRYPTO_RMD256
54 + tristate "RIPEMD-256 digest algorithm"
55 + select CRYPTO_ALGAPI
56 + help
57 + RIPEMD-256 is an optional extension of RIPEMD-128 with a 256 bit hash.
58 + It is intended for applications that require longer hash-results, without
59 + needing a larger security level (than RIPEMD-128).
60 +
61 + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
62 + See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
63 +
64 +config CRYPTO_RMD320
65 + tristate "RIPEMD-320 digest algorithm"
66 + select CRYPTO_ALGAPI
67 + help
68 + RIPEMD-320 is an optional extension of RIPEMD-160 with a 320 bit hash.
69 + It is intended for applications that require longer hash-results, without
70 + needing a larger security level (than RIPEMD-160).
71 +
72 + Developed by Hans Dobbertin, Antoon Bosselaers and Bart Preneel.
73 + See <http://home.esat.kuleuven.be/~bosselae/ripemd160.html>
74 +
75 config CRYPTO_SHA1
76 tristate "SHA1 digest algorithm"
77 select CRYPTO_ALGAPI
78 @@ -614,6 +666,15 @@ config CRYPTO_LZO
79 help
80 This is the LZO algorithm.
81
82 +comment "Random Number Generation"
83 +
84 +config CRYPTO_PRNG
85 + tristate "Pseudo Random Number Generation for Cryptographic modules"
86 + help
87 + This option enables the generic pseudo random number generator
88 + for cryptographic modules. Uses the Algorithm specified in
89 + ANSI X9.31 A.2.4
90 +
91 source "drivers/crypto/Kconfig"
92
93 endif # if CRYPTO
94 --- a/crypto/Makefile
95 +++ b/crypto/Makefile
96 @@ -19,6 +19,7 @@ obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto
97 obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
98
99 crypto_hash-objs := hash.o
100 +crypto_hash-objs += ahash.o
101 obj-$(CONFIG_CRYPTO_HASH) += crypto_hash.o
102
103 obj-$(CONFIG_CRYPTO_MANAGER) += cryptomgr.o
104 @@ -27,6 +28,10 @@ obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
105 obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
106 obj-$(CONFIG_CRYPTO_MD4) += md4.o
107 obj-$(CONFIG_CRYPTO_MD5) += md5.o
108 +obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
109 +obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
110 +obj-$(CONFIG_CRYPTO_RMD256) += rmd256.o
111 +obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
112 obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
113 obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
114 obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
115 @@ -64,7 +69,7 @@ obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += mich
116 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c.o
117 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o
118 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
119 -
120 +obj-$(CONFIG_CRYPTO_PRNG) += prng.o
121 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
122
123 #
124 --- /dev/null
125 +++ b/crypto/ahash.c
126 @@ -0,0 +1,194 @@
127 +/*
128 + * Asynchronous Cryptographic Hash operations.
129 + *
130 + * This is the asynchronous version of hash.c with notification of
131 + * completion via a callback.
132 + *
133 + * Copyright (c) 2008 Loc Ho <lho@amcc.com>
134 + *
135 + * This program is free software; you can redistribute it and/or modify it
136 + * under the terms of the GNU General Public License as published by the Free
137 + * Software Foundation; either version 2 of the License, or (at your option)
138 + * any later version.
139 + *
140 + */
141 +
142 +#include <crypto/internal/hash.h>
143 +#include <crypto/scatterwalk.h>
144 +#include <linux/err.h>
145 +#include <linux/kernel.h>
146 +#include <linux/module.h>
147 +#include <linux/sched.h>
148 +#include <linux/slab.h>
149 +#include <linux/seq_file.h>
150 +
151 +#include "internal.h"
152 +
153 +static int hash_walk_next(struct crypto_hash_walk *walk)
154 +{
155 + unsigned int alignmask = walk->alignmask;
156 + unsigned int offset = walk->offset;
157 + unsigned int nbytes = min(walk->entrylen,
158 + ((unsigned int)(PAGE_SIZE)) - offset);
159 +
160 + walk->data = crypto_kmap(walk->pg, 0);
161 + walk->data += offset;
162 +
163 + if (offset & alignmask)
164 + nbytes = alignmask + 1 - (offset & alignmask);
165 +
166 + walk->entrylen -= nbytes;
167 + return nbytes;
168 +}
169 +
170 +static int hash_walk_new_entry(struct crypto_hash_walk *walk)
171 +{
172 + struct scatterlist *sg;
173 +
174 + sg = walk->sg;
175 + walk->pg = sg_page(sg);
176 + walk->offset = sg->offset;
177 + walk->entrylen = sg->length;
178 +
179 + if (walk->entrylen > walk->total)
180 + walk->entrylen = walk->total;
181 + walk->total -= walk->entrylen;
182 +
183 + return hash_walk_next(walk);
184 +}
185 +
186 +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
187 +{
188 + unsigned int alignmask = walk->alignmask;
189 + unsigned int nbytes = walk->entrylen;
190 +
191 + walk->data -= walk->offset;
192 +
193 + if (nbytes && walk->offset & alignmask && !err) {
194 + walk->offset += alignmask - 1;
195 + walk->offset = ALIGN(walk->offset, alignmask + 1);
196 + walk->data += walk->offset;
197 +
198 + nbytes = min(nbytes,
199 + ((unsigned int)(PAGE_SIZE)) - walk->offset);
200 + walk->entrylen -= nbytes;
201 +
202 + return nbytes;
203 + }
204 +
205 + crypto_kunmap(walk->data, 0);
206 + crypto_yield(walk->flags);
207 +
208 + if (err)
209 + return err;
210 +
211 + walk->offset = 0;
212 +
213 + if (nbytes)
214 + return hash_walk_next(walk);
215 +
216 + if (!walk->total)
217 + return 0;
218 +
219 + walk->sg = scatterwalk_sg_next(walk->sg);
220 +
221 + return hash_walk_new_entry(walk);
222 +}
223 +EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
224 +
225 +int crypto_hash_walk_first(struct ahash_request *req,
226 + struct crypto_hash_walk *walk)
227 +{
228 + walk->total = req->nbytes;
229 +
230 + if (!walk->total)
231 + return 0;
232 +
233 + walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
234 + walk->sg = req->src;
235 + walk->flags = req->base.flags;
236 +
237 + return hash_walk_new_entry(walk);
238 +}
239 +EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
240 +
241 +static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
242 + unsigned int keylen)
243 +{
244 + struct ahash_alg *ahash = crypto_ahash_alg(tfm);
245 + unsigned long alignmask = crypto_ahash_alignmask(tfm);
246 + int ret;
247 + u8 *buffer, *alignbuffer;
248 + unsigned long absize;
249 +
250 + absize = keylen + alignmask;
251 + buffer = kmalloc(absize, GFP_ATOMIC);
252 + if (!buffer)
253 + return -ENOMEM;
254 +
255 + alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
256 + memcpy(alignbuffer, key, keylen);
257 + ret = ahash->setkey(tfm, alignbuffer, keylen);
258 + memset(alignbuffer, 0, keylen);
259 + kfree(buffer);
260 + return ret;
261 +}
262 +
263 +static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
264 + unsigned int keylen)
265 +{
266 + struct ahash_alg *ahash = crypto_ahash_alg(tfm);
267 + unsigned long alignmask = crypto_ahash_alignmask(tfm);
268 +
269 + if ((unsigned long)key & alignmask)
270 + return ahash_setkey_unaligned(tfm, key, keylen);
271 +
272 + return ahash->setkey(tfm, key, keylen);
273 +}
274 +
275 +static unsigned int crypto_ahash_ctxsize(struct crypto_alg *alg, u32 type,
276 + u32 mask)
277 +{
278 + return alg->cra_ctxsize;
279 +}
280 +
281 +static int crypto_init_ahash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
282 +{
283 + struct ahash_alg *alg = &tfm->__crt_alg->cra_ahash;
284 + struct ahash_tfm *crt = &tfm->crt_ahash;
285 +
286 + if (alg->digestsize > PAGE_SIZE / 8)
287 + return -EINVAL;
288 +
289 + crt->init = alg->init;
290 + crt->update = alg->update;
291 + crt->final = alg->final;
292 + crt->digest = alg->digest;
293 + crt->setkey = ahash_setkey;
294 + crt->digestsize = alg->digestsize;
295 +
296 + return 0;
297 +}
298 +
299 +static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
300 + __attribute__ ((unused));
301 +static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
302 +{
303 + seq_printf(m, "type : ahash\n");
304 + seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
305 + "yes" : "no");
306 + seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
307 + seq_printf(m, "digestsize : %u\n", alg->cra_hash.digestsize);
308 +}
309 +
310 +const struct crypto_type crypto_ahash_type = {
311 + .ctxsize = crypto_ahash_ctxsize,
312 + .init = crypto_init_ahash_ops,
313 +#ifdef CONFIG_PROC_FS
314 + .show = crypto_ahash_show,
315 +#endif
316 +};
317 +EXPORT_SYMBOL_GPL(crypto_ahash_type);
318 +
319 +MODULE_LICENSE("GPL");
320 +MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
321 --- a/crypto/api.c
322 +++ b/crypto/api.c
323 @@ -235,8 +235,12 @@ static int crypto_init_ops(struct crypto
324 return crypto_init_cipher_ops(tfm);
325
326 case CRYPTO_ALG_TYPE_DIGEST:
327 - return crypto_init_digest_ops(tfm);
328 -
329 + if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) !=
330 + CRYPTO_ALG_TYPE_HASH_MASK)
331 + return crypto_init_digest_ops_async(tfm);
332 + else
333 + return crypto_init_digest_ops(tfm);
334 +
335 case CRYPTO_ALG_TYPE_COMPRESS:
336 return crypto_init_compress_ops(tfm);
337
338 --- a/crypto/camellia.c
339 +++ b/crypto/camellia.c
340 @@ -35,6 +35,8 @@
341 #include <linux/init.h>
342 #include <linux/kernel.h>
343 #include <linux/module.h>
344 +#include <linux/bitops.h>
345 +#include <asm/unaligned.h>
346
347 static const u32 camellia_sp1110[256] = {
348 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
349 @@ -335,20 +337,6 @@ static const u32 camellia_sp4404[256] =
350 /*
351 * macros
352 */
353 -#define GETU32(v, pt) \
354 - do { \
355 - /* latest breed of gcc is clever enough to use move */ \
356 - memcpy(&(v), (pt), 4); \
357 - (v) = be32_to_cpu(v); \
358 - } while(0)
359 -
360 -/* rotation right shift 1byte */
361 -#define ROR8(x) (((x) >> 8) + ((x) << 24))
362 -/* rotation left shift 1bit */
363 -#define ROL1(x) (((x) << 1) + ((x) >> 31))
364 -/* rotation left shift 1byte */
365 -#define ROL8(x) (((x) << 8) + ((x) >> 24))
366 -
367 #define ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
368 do { \
369 w0 = ll; \
370 @@ -383,7 +371,7 @@ static const u32 camellia_sp4404[256] =
371 ^ camellia_sp3033[(u8)(il >> 8)] \
372 ^ camellia_sp4404[(u8)(il )]; \
373 yl ^= yr; \
374 - yr = ROR8(yr); \
375 + yr = ror32(yr, 8); \
376 yr ^= yl; \
377 } while(0)
378
379 @@ -405,7 +393,7 @@ static void camellia_setup_tail(u32 *sub
380 subL[7] ^= subL[1]; subR[7] ^= subR[1];
381 subL[1] ^= subR[1] & ~subR[9];
382 dw = subL[1] & subL[9],
383 - subR[1] ^= ROL1(dw); /* modified for FLinv(kl2) */
384 + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl2) */
385 /* round 8 */
386 subL[11] ^= subL[1]; subR[11] ^= subR[1];
387 /* round 10 */
388 @@ -414,7 +402,7 @@ static void camellia_setup_tail(u32 *sub
389 subL[15] ^= subL[1]; subR[15] ^= subR[1];
390 subL[1] ^= subR[1] & ~subR[17];
391 dw = subL[1] & subL[17],
392 - subR[1] ^= ROL1(dw); /* modified for FLinv(kl4) */
393 + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl4) */
394 /* round 14 */
395 subL[19] ^= subL[1]; subR[19] ^= subR[1];
396 /* round 16 */
397 @@ -430,7 +418,7 @@ static void camellia_setup_tail(u32 *sub
398 } else {
399 subL[1] ^= subR[1] & ~subR[25];
400 dw = subL[1] & subL[25],
401 - subR[1] ^= ROL1(dw); /* modified for FLinv(kl6) */
402 + subR[1] ^= rol32(dw, 1); /* modified for FLinv(kl6) */
403 /* round 20 */
404 subL[27] ^= subL[1]; subR[27] ^= subR[1];
405 /* round 22 */
406 @@ -450,7 +438,7 @@ static void camellia_setup_tail(u32 *sub
407 subL[26] ^= kw4l; subR[26] ^= kw4r;
408 kw4l ^= kw4r & ~subR[24];
409 dw = kw4l & subL[24],
410 - kw4r ^= ROL1(dw); /* modified for FL(kl5) */
411 + kw4r ^= rol32(dw, 1); /* modified for FL(kl5) */
412 }
413 /* round 17 */
414 subL[22] ^= kw4l; subR[22] ^= kw4r;
415 @@ -460,7 +448,7 @@ static void camellia_setup_tail(u32 *sub
416 subL[18] ^= kw4l; subR[18] ^= kw4r;
417 kw4l ^= kw4r & ~subR[16];
418 dw = kw4l & subL[16],
419 - kw4r ^= ROL1(dw); /* modified for FL(kl3) */
420 + kw4r ^= rol32(dw, 1); /* modified for FL(kl3) */
421 /* round 11 */
422 subL[14] ^= kw4l; subR[14] ^= kw4r;
423 /* round 9 */
424 @@ -469,7 +457,7 @@ static void camellia_setup_tail(u32 *sub
425 subL[10] ^= kw4l; subR[10] ^= kw4r;
426 kw4l ^= kw4r & ~subR[8];
427 dw = kw4l & subL[8],
428 - kw4r ^= ROL1(dw); /* modified for FL(kl1) */
429 + kw4r ^= rol32(dw, 1); /* modified for FL(kl1) */
430 /* round 5 */
431 subL[6] ^= kw4l; subR[6] ^= kw4r;
432 /* round 3 */
433 @@ -494,7 +482,7 @@ static void camellia_setup_tail(u32 *sub
434 SUBKEY_R(6) = subR[5] ^ subR[7];
435 tl = subL[10] ^ (subR[10] & ~subR[8]);
436 dw = tl & subL[8], /* FL(kl1) */
437 - tr = subR[10] ^ ROL1(dw);
438 + tr = subR[10] ^ rol32(dw, 1);
439 SUBKEY_L(7) = subL[6] ^ tl; /* round 6 */
440 SUBKEY_R(7) = subR[6] ^ tr;
441 SUBKEY_L(8) = subL[8]; /* FL(kl1) */
442 @@ -503,7 +491,7 @@ static void camellia_setup_tail(u32 *sub
443 SUBKEY_R(9) = subR[9];
444 tl = subL[7] ^ (subR[7] & ~subR[9]);
445 dw = tl & subL[9], /* FLinv(kl2) */
446 - tr = subR[7] ^ ROL1(dw);
447 + tr = subR[7] ^ rol32(dw, 1);
448 SUBKEY_L(10) = tl ^ subL[11]; /* round 7 */
449 SUBKEY_R(10) = tr ^ subR[11];
450 SUBKEY_L(11) = subL[10] ^ subL[12]; /* round 8 */
451 @@ -516,7 +504,7 @@ static void camellia_setup_tail(u32 *sub
452 SUBKEY_R(14) = subR[13] ^ subR[15];
453 tl = subL[18] ^ (subR[18] & ~subR[16]);
454 dw = tl & subL[16], /* FL(kl3) */
455 - tr = subR[18] ^ ROL1(dw);
456 + tr = subR[18] ^ rol32(dw, 1);
457 SUBKEY_L(15) = subL[14] ^ tl; /* round 12 */
458 SUBKEY_R(15) = subR[14] ^ tr;
459 SUBKEY_L(16) = subL[16]; /* FL(kl3) */
460 @@ -525,7 +513,7 @@ static void camellia_setup_tail(u32 *sub
461 SUBKEY_R(17) = subR[17];
462 tl = subL[15] ^ (subR[15] & ~subR[17]);
463 dw = tl & subL[17], /* FLinv(kl4) */
464 - tr = subR[15] ^ ROL1(dw);
465 + tr = subR[15] ^ rol32(dw, 1);
466 SUBKEY_L(18) = tl ^ subL[19]; /* round 13 */
467 SUBKEY_R(18) = tr ^ subR[19];
468 SUBKEY_L(19) = subL[18] ^ subL[20]; /* round 14 */
469 @@ -544,7 +532,7 @@ static void camellia_setup_tail(u32 *sub
470 } else {
471 tl = subL[26] ^ (subR[26] & ~subR[24]);
472 dw = tl & subL[24], /* FL(kl5) */
473 - tr = subR[26] ^ ROL1(dw);
474 + tr = subR[26] ^ rol32(dw, 1);
475 SUBKEY_L(23) = subL[22] ^ tl; /* round 18 */
476 SUBKEY_R(23) = subR[22] ^ tr;
477 SUBKEY_L(24) = subL[24]; /* FL(kl5) */
478 @@ -553,7 +541,7 @@ static void camellia_setup_tail(u32 *sub
479 SUBKEY_R(25) = subR[25];
480 tl = subL[23] ^ (subR[23] & ~subR[25]);
481 dw = tl & subL[25], /* FLinv(kl6) */
482 - tr = subR[23] ^ ROL1(dw);
483 + tr = subR[23] ^ rol32(dw, 1);
484 SUBKEY_L(26) = tl ^ subL[27]; /* round 19 */
485 SUBKEY_R(26) = tr ^ subR[27];
486 SUBKEY_L(27) = subL[26] ^ subL[28]; /* round 20 */
487 @@ -573,17 +561,17 @@ static void camellia_setup_tail(u32 *sub
488 /* apply the inverse of the last half of P-function */
489 i = 2;
490 do {
491 - dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = ROL8(dw);/* round 1 */
492 + dw = SUBKEY_L(i + 0) ^ SUBKEY_R(i + 0); dw = rol32(dw, 8);/* round 1 */
493 SUBKEY_R(i + 0) = SUBKEY_L(i + 0) ^ dw; SUBKEY_L(i + 0) = dw;
494 - dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = ROL8(dw);/* round 2 */
495 + dw = SUBKEY_L(i + 1) ^ SUBKEY_R(i + 1); dw = rol32(dw, 8);/* round 2 */
496 SUBKEY_R(i + 1) = SUBKEY_L(i + 1) ^ dw; SUBKEY_L(i + 1) = dw;
497 - dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = ROL8(dw);/* round 3 */
498 + dw = SUBKEY_L(i + 2) ^ SUBKEY_R(i + 2); dw = rol32(dw, 8);/* round 3 */
499 SUBKEY_R(i + 2) = SUBKEY_L(i + 2) ^ dw; SUBKEY_L(i + 2) = dw;
500 - dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = ROL8(dw);/* round 4 */
501 + dw = SUBKEY_L(i + 3) ^ SUBKEY_R(i + 3); dw = rol32(dw, 8);/* round 4 */
502 SUBKEY_R(i + 3) = SUBKEY_L(i + 3) ^ dw; SUBKEY_L(i + 3) = dw;
503 - dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = ROL8(dw);/* round 5 */
504 + dw = SUBKEY_L(i + 4) ^ SUBKEY_R(i + 4); dw = rol32(dw, 8);/* round 5 */
505 SUBKEY_R(i + 4) = SUBKEY_L(i + 4) ^ dw; SUBKEY_L(i + 4) = dw;
506 - dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = ROL8(dw);/* round 6 */
507 + dw = SUBKEY_L(i + 5) ^ SUBKEY_R(i + 5); dw = rol32(dw, 8);/* round 6 */
508 SUBKEY_R(i + 5) = SUBKEY_L(i + 5) ^ dw; SUBKEY_L(i + 5) = dw;
509 i += 8;
510 } while (i < max);
511 @@ -599,10 +587,10 @@ static void camellia_setup128(const unsi
512 /**
513 * k == kll || klr || krl || krr (|| is concatenation)
514 */
515 - GETU32(kll, key );
516 - GETU32(klr, key + 4);
517 - GETU32(krl, key + 8);
518 - GETU32(krr, key + 12);
519 + kll = get_unaligned_be32(key);
520 + klr = get_unaligned_be32(key + 4);
521 + krl = get_unaligned_be32(key + 8);
522 + krr = get_unaligned_be32(key + 12);
523
524 /* generate KL dependent subkeys */
525 /* kw1 */
526 @@ -707,14 +695,14 @@ static void camellia_setup256(const unsi
527 * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
528 * (|| is concatenation)
529 */
530 - GETU32(kll, key );
531 - GETU32(klr, key + 4);
532 - GETU32(krl, key + 8);
533 - GETU32(krr, key + 12);
534 - GETU32(krll, key + 16);
535 - GETU32(krlr, key + 20);
536 - GETU32(krrl, key + 24);
537 - GETU32(krrr, key + 28);
538 + kll = get_unaligned_be32(key);
539 + klr = get_unaligned_be32(key + 4);
540 + krl = get_unaligned_be32(key + 8);
541 + krr = get_unaligned_be32(key + 12);
542 + krll = get_unaligned_be32(key + 16);
543 + krlr = get_unaligned_be32(key + 20);
544 + krrl = get_unaligned_be32(key + 24);
545 + krrr = get_unaligned_be32(key + 28);
546
547 /* generate KL dependent subkeys */
548 /* kw1 */
549 @@ -870,13 +858,13 @@ static void camellia_setup192(const unsi
550 t0 &= ll; \
551 t2 |= rr; \
552 rl ^= t2; \
553 - lr ^= ROL1(t0); \
554 + lr ^= rol32(t0, 1); \
555 t3 = krl; \
556 t1 = klr; \
557 t3 &= rl; \
558 t1 |= lr; \
559 ll ^= t1; \
560 - rr ^= ROL1(t3); \
561 + rr ^= rol32(t3, 1); \
562 } while(0)
563
564 #define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir) \
565 @@ -892,7 +880,7 @@ static void camellia_setup192(const unsi
566 il ^= kl; \
567 ir ^= il ^ kr; \
568 yl ^= ir; \
569 - yr ^= ROR8(il) ^ ir; \
570 + yr ^= ror32(il, 8) ^ ir; \
571 } while(0)
572
573 /* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
574 --- a/crypto/crc32c.c
575 +++ b/crypto/crc32c.c
576 @@ -1,24 +1,27 @@
577 -/*
578 +/*
579 * Cryptographic API.
580 *
581 * CRC32C chksum
582 *
583 * This module file is a wrapper to invoke the lib/crc32c routines.
584 *
585 + * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
586 + *
587 * This program is free software; you can redistribute it and/or modify it
588 * under the terms of the GNU General Public License as published by the Free
589 - * Software Foundation; either version 2 of the License, or (at your option)
590 + * Software Foundation; either version 2 of the License, or (at your option)
591 * any later version.
592 *
593 */
594 +
595 +#include <crypto/internal/hash.h>
596 #include <linux/init.h>
597 #include <linux/module.h>
598 #include <linux/string.h>
599 -#include <linux/crypto.h>
600 #include <linux/crc32c.h>
601 #include <linux/kernel.h>
602
603 -#define CHKSUM_BLOCK_SIZE 32
604 +#define CHKSUM_BLOCK_SIZE 1
605 #define CHKSUM_DIGEST_SIZE 4
606
607 struct chksum_ctx {
608 @@ -27,7 +30,7 @@ struct chksum_ctx {
609 };
610
611 /*
612 - * Steps through buffer one byte at at time, calculates reflected
613 + * Steps through buffer one byte at at time, calculates reflected
614 * crc using table.
615 */
616
617 @@ -67,11 +70,11 @@ static void chksum_update(struct crypto_
618 static void chksum_final(struct crypto_tfm *tfm, u8 *out)
619 {
620 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
621 -
622 +
623 *(__le32 *)out = ~cpu_to_le32(mctx->crc);
624 }
625
626 -static int crc32c_cra_init(struct crypto_tfm *tfm)
627 +static int crc32c_cra_init_old(struct crypto_tfm *tfm)
628 {
629 struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
630
631 @@ -79,14 +82,14 @@ static int crc32c_cra_init(struct crypto
632 return 0;
633 }
634
635 -static struct crypto_alg alg = {
636 +static struct crypto_alg old_alg = {
637 .cra_name = "crc32c",
638 .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
639 .cra_blocksize = CHKSUM_BLOCK_SIZE,
640 .cra_ctxsize = sizeof(struct chksum_ctx),
641 .cra_module = THIS_MODULE,
642 - .cra_list = LIST_HEAD_INIT(alg.cra_list),
643 - .cra_init = crc32c_cra_init,
644 + .cra_list = LIST_HEAD_INIT(old_alg.cra_list),
645 + .cra_init = crc32c_cra_init_old,
646 .cra_u = {
647 .digest = {
648 .dia_digestsize= CHKSUM_DIGEST_SIZE,
649 @@ -98,14 +101,125 @@ static struct crypto_alg alg = {
650 }
651 };
652
653 +/*
654 + * Setting the seed allows arbitrary accumulators and flexible XOR policy
655 + * If your algorithm starts with ~0, then XOR with ~0 before you set
656 + * the seed.
657 + */
658 +static int crc32c_setkey(struct crypto_ahash *hash, const u8 *key,
659 + unsigned int keylen)
660 +{
661 + u32 *mctx = crypto_ahash_ctx(hash);
662 +
663 + if (keylen != sizeof(u32)) {
664 + crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
665 + return -EINVAL;
666 + }
667 + *mctx = le32_to_cpup((__le32 *)key);
668 + return 0;
669 +}
670 +
671 +static int crc32c_init(struct ahash_request *req)
672 +{
673 + u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
674 + u32 *crcp = ahash_request_ctx(req);
675 +
676 + *crcp = *mctx;
677 + return 0;
678 +}
679 +
680 +static int crc32c_update(struct ahash_request *req)
681 +{
682 + struct crypto_hash_walk walk;
683 + u32 *crcp = ahash_request_ctx(req);
684 + u32 crc = *crcp;
685 + int nbytes;
686 +
687 + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
688 + nbytes = crypto_hash_walk_done(&walk, 0))
689 + crc = crc32c(crc, walk.data, nbytes);
690 +
691 + *crcp = crc;
692 + return 0;
693 +}
694 +
695 +static int crc32c_final(struct ahash_request *req)
696 +{
697 + u32 *crcp = ahash_request_ctx(req);
698 +
699 + *(__le32 *)req->result = ~cpu_to_le32p(crcp);
700 + return 0;
701 +}
702 +
703 +static int crc32c_digest(struct ahash_request *req)
704 +{
705 + struct crypto_hash_walk walk;
706 + u32 *mctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
707 + u32 crc = *mctx;
708 + int nbytes;
709 +
710 + for (nbytes = crypto_hash_walk_first(req, &walk); nbytes;
711 + nbytes = crypto_hash_walk_done(&walk, 0))
712 + crc = crc32c(crc, walk.data, nbytes);
713 +
714 + *(__le32 *)req->result = ~cpu_to_le32(crc);
715 + return 0;
716 +}
717 +
718 +static int crc32c_cra_init(struct crypto_tfm *tfm)
719 +{
720 + u32 *key = crypto_tfm_ctx(tfm);
721 +
722 + *key = ~0;
723 +
724 + tfm->crt_ahash.reqsize = sizeof(u32);
725 +
726 + return 0;
727 +}
728 +
729 +static struct crypto_alg alg = {
730 + .cra_name = "crc32c",
731 + .cra_driver_name = "crc32c-generic",
732 + .cra_priority = 100,
733 + .cra_flags = CRYPTO_ALG_TYPE_AHASH,
734 + .cra_blocksize = CHKSUM_BLOCK_SIZE,
735 + .cra_alignmask = 3,
736 + .cra_ctxsize = sizeof(u32),
737 + .cra_module = THIS_MODULE,
738 + .cra_list = LIST_HEAD_INIT(alg.cra_list),
739 + .cra_init = crc32c_cra_init,
740 + .cra_type = &crypto_ahash_type,
741 + .cra_u = {
742 + .ahash = {
743 + .digestsize = CHKSUM_DIGEST_SIZE,
744 + .setkey = crc32c_setkey,
745 + .init = crc32c_init,
746 + .update = crc32c_update,
747 + .final = crc32c_final,
748 + .digest = crc32c_digest,
749 + }
750 + }
751 +};
752 +
753 static int __init crc32c_mod_init(void)
754 {
755 - return crypto_register_alg(&alg);
756 + int err;
757 +
758 + err = crypto_register_alg(&old_alg);
759 + if (err)
760 + return err;
761 +
762 + err = crypto_register_alg(&alg);
763 + if (err)
764 + crypto_unregister_alg(&old_alg);
765 +
766 + return err;
767 }
768
769 static void __exit crc32c_mod_fini(void)
770 {
771 crypto_unregister_alg(&alg);
772 + crypto_unregister_alg(&old_alg);
773 }
774
775 module_init(crc32c_mod_init);
776 --- a/crypto/cryptd.c
777 +++ b/crypto/cryptd.c
778 @@ -11,6 +11,7 @@
779 */
780
781 #include <crypto/algapi.h>
782 +#include <crypto/internal/hash.h>
783 #include <linux/err.h>
784 #include <linux/init.h>
785 #include <linux/kernel.h>
786 @@ -45,6 +46,13 @@ struct cryptd_blkcipher_request_ctx {
787 crypto_completion_t complete;
788 };
789
790 +struct cryptd_hash_ctx {
791 + struct crypto_hash *child;
792 +};
793 +
794 +struct cryptd_hash_request_ctx {
795 + crypto_completion_t complete;
796 +};
797
798 static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm)
799 {
800 @@ -82,10 +90,8 @@ static void cryptd_blkcipher_crypt(struc
801
802 rctx = ablkcipher_request_ctx(req);
803
804 - if (unlikely(err == -EINPROGRESS)) {
805 - rctx->complete(&req->base, err);
806 - return;
807 - }
808 + if (unlikely(err == -EINPROGRESS))
809 + goto out;
810
811 desc.tfm = child;
812 desc.info = req->info;
813 @@ -95,8 +101,9 @@ static void cryptd_blkcipher_crypt(struc
814
815 req->base.complete = rctx->complete;
816
817 +out:
818 local_bh_disable();
819 - req->base.complete(&req->base, err);
820 + rctx->complete(&req->base, err);
821 local_bh_enable();
822 }
823
824 @@ -261,6 +268,240 @@ out_put_alg:
825 return inst;
826 }
827
828 +static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
829 +{
830 + struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
831 + struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
832 + struct crypto_spawn *spawn = &ictx->spawn;
833 + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
834 + struct crypto_hash *cipher;
835 +
836 + cipher = crypto_spawn_hash(spawn);
837 + if (IS_ERR(cipher))
838 + return PTR_ERR(cipher);
839 +
840 + ctx->child = cipher;
841 + tfm->crt_ahash.reqsize =
842 + sizeof(struct cryptd_hash_request_ctx);
843 + return 0;
844 +}
845 +
846 +static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
847 +{
848 + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
849 + struct cryptd_state *state = cryptd_get_state(tfm);
850 + int active;
851 +
852 + mutex_lock(&state->mutex);
853 + active = ahash_tfm_in_queue(&state->queue,
854 + __crypto_ahash_cast(tfm));
855 + mutex_unlock(&state->mutex);
856 +
857 + BUG_ON(active);
858 +
859 + crypto_free_hash(ctx->child);
860 +}
861 +
862 +static int cryptd_hash_setkey(struct crypto_ahash *parent,
863 + const u8 *key, unsigned int keylen)
864 +{
865 + struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
866 + struct crypto_hash *child = ctx->child;
867 + int err;
868 +
869 + crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
870 + crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
871 + CRYPTO_TFM_REQ_MASK);
872 + err = crypto_hash_setkey(child, key, keylen);
873 + crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
874 + CRYPTO_TFM_RES_MASK);
875 + return err;
876 +}
877 +
878 +static int cryptd_hash_enqueue(struct ahash_request *req,
879 + crypto_completion_t complete)
880 +{
881 + struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
882 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
883 + struct cryptd_state *state =
884 + cryptd_get_state(crypto_ahash_tfm(tfm));
885 + int err;
886 +
887 + rctx->complete = req->base.complete;
888 + req->base.complete = complete;
889 +
890 + spin_lock_bh(&state->lock);
891 + err = ahash_enqueue_request(&state->queue, req);
892 + spin_unlock_bh(&state->lock);
893 +
894 + wake_up_process(state->task);
895 + return err;
896 +}
897 +
898 +static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
899 +{
900 + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
901 + struct crypto_hash *child = ctx->child;
902 + struct ahash_request *req = ahash_request_cast(req_async);
903 + struct cryptd_hash_request_ctx *rctx;
904 + struct hash_desc desc;
905 +
906 + rctx = ahash_request_ctx(req);
907 +
908 + if (unlikely(err == -EINPROGRESS))
909 + goto out;
910 +
911 + desc.tfm = child;
912 + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
913 +
914 + err = crypto_hash_crt(child)->init(&desc);
915 +
916 + req->base.complete = rctx->complete;
917 +
918 +out:
919 + local_bh_disable();
920 + rctx->complete(&req->base, err);
921 + local_bh_enable();
922 +}
923 +
924 +static int cryptd_hash_init_enqueue(struct ahash_request *req)
925 +{
926 + return cryptd_hash_enqueue(req, cryptd_hash_init);
927 +}
928 +
929 +static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
930 +{
931 + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
932 + struct crypto_hash *child = ctx->child;
933 + struct ahash_request *req = ahash_request_cast(req_async);
934 + struct cryptd_hash_request_ctx *rctx;
935 + struct hash_desc desc;
936 +
937 + rctx = ahash_request_ctx(req);
938 +
939 + if (unlikely(err == -EINPROGRESS))
940 + goto out;
941 +
942 + desc.tfm = child;
943 + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
944 +
945 + err = crypto_hash_crt(child)->update(&desc,
946 + req->src,
947 + req->nbytes);
948 +
949 + req->base.complete = rctx->complete;
950 +
951 +out:
952 + local_bh_disable();
953 + rctx->complete(&req->base, err);
954 + local_bh_enable();
955 +}
956 +
957 +static int cryptd_hash_update_enqueue(struct ahash_request *req)
958 +{
959 + return cryptd_hash_enqueue(req, cryptd_hash_update);
960 +}
961 +
962 +static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
963 +{
964 + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
965 + struct crypto_hash *child = ctx->child;
966 + struct ahash_request *req = ahash_request_cast(req_async);
967 + struct cryptd_hash_request_ctx *rctx;
968 + struct hash_desc desc;
969 +
970 + rctx = ahash_request_ctx(req);
971 +
972 + if (unlikely(err == -EINPROGRESS))
973 + goto out;
974 +
975 + desc.tfm = child;
976 + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
977 +
978 + err = crypto_hash_crt(child)->final(&desc, req->result);
979 +
980 + req->base.complete = rctx->complete;
981 +
982 +out:
983 + local_bh_disable();
984 + rctx->complete(&req->base, err);
985 + local_bh_enable();
986 +}
987 +
988 +static int cryptd_hash_final_enqueue(struct ahash_request *req)
989 +{
990 + return cryptd_hash_enqueue(req, cryptd_hash_final);
991 +}
992 +
993 +static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
994 +{
995 + struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
996 + struct crypto_hash *child = ctx->child;
997 + struct ahash_request *req = ahash_request_cast(req_async);
998 + struct cryptd_hash_request_ctx *rctx;
999 + struct hash_desc desc;
1000 +
1001 + rctx = ahash_request_ctx(req);
1002 +
1003 + if (unlikely(err == -EINPROGRESS))
1004 + goto out;
1005 +
1006 + desc.tfm = child;
1007 + desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
1008 +
1009 + err = crypto_hash_crt(child)->digest(&desc,
1010 + req->src,
1011 + req->nbytes,
1012 + req->result);
1013 +
1014 + req->base.complete = rctx->complete;
1015 +
1016 +out:
1017 + local_bh_disable();
1018 + rctx->complete(&req->base, err);
1019 + local_bh_enable();
1020 +}
1021 +
1022 +static int cryptd_hash_digest_enqueue(struct ahash_request *req)
1023 +{
1024 + return cryptd_hash_enqueue(req, cryptd_hash_digest);
1025 +}
1026 +
1027 +static struct crypto_instance *cryptd_alloc_hash(
1028 + struct rtattr **tb, struct cryptd_state *state)
1029 +{
1030 + struct crypto_instance *inst;
1031 + struct crypto_alg *alg;
1032 +
1033 + alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
1034 + CRYPTO_ALG_TYPE_HASH_MASK);
1035 + if (IS_ERR(alg))
1036 + return ERR_PTR(PTR_ERR(alg));
1037 +
1038 + inst = cryptd_alloc_instance(alg, state);
1039 + if (IS_ERR(inst))
1040 + goto out_put_alg;
1041 +
1042 + inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
1043 + inst->alg.cra_type = &crypto_ahash_type;
1044 +
1045 + inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
1046 + inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
1047 +
1048 + inst->alg.cra_init = cryptd_hash_init_tfm;
1049 + inst->alg.cra_exit = cryptd_hash_exit_tfm;
1050 +
1051 + inst->alg.cra_ahash.init = cryptd_hash_init_enqueue;
1052 + inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
1053 + inst->alg.cra_ahash.final = cryptd_hash_final_enqueue;
1054 + inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
1055 + inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
1056 +
1057 +out_put_alg:
1058 + crypto_mod_put(alg);
1059 + return inst;
1060 +}
1061 +
1062 static struct cryptd_state state;
1063
1064 static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
1065 @@ -274,6 +515,8 @@ static struct crypto_instance *cryptd_al
1066 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1067 case CRYPTO_ALG_TYPE_BLKCIPHER:
1068 return cryptd_alloc_blkcipher(tb, &state);
1069 + case CRYPTO_ALG_TYPE_DIGEST:
1070 + return cryptd_alloc_hash(tb, &state);
1071 }
1072
1073 return ERR_PTR(-EINVAL);
1074 --- a/crypto/digest.c
1075 +++ b/crypto/digest.c
1076 @@ -12,6 +12,7 @@
1077 *
1078 */
1079
1080 +#include <crypto/internal/hash.h>
1081 #include <crypto/scatterwalk.h>
1082 #include <linux/mm.h>
1083 #include <linux/errno.h>
1084 @@ -141,7 +142,7 @@ int crypto_init_digest_ops(struct crypto
1085 struct hash_tfm *ops = &tfm->crt_hash;
1086 struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
1087
1088 - if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
1089 + if (dalg->dia_digestsize > PAGE_SIZE / 8)
1090 return -EINVAL;
1091
1092 ops->init = init;
1093 @@ -157,3 +158,83 @@ int crypto_init_digest_ops(struct crypto
1094 void crypto_exit_digest_ops(struct crypto_tfm *tfm)
1095 {
1096 }
1097 +
1098 +static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
1099 + unsigned int keylen)
1100 +{
1101 + crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
1102 + return -ENOSYS;
1103 +}
1104 +
1105 +static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
1106 + unsigned int keylen)
1107 +{
1108 + struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
1109 + struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
1110 +
1111 + crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
1112 + return dalg->dia_setkey(tfm, key, keylen);
1113 +}
1114 +
1115 +static int digest_async_init(struct ahash_request *req)
1116 +{
1117 + struct crypto_tfm *tfm = req->base.tfm;
1118 + struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
1119 +
1120 + dalg->dia_init(tfm);
1121 + return 0;
1122 +}
1123 +
1124 +static int digest_async_update(struct ahash_request *req)
1125 +{
1126 + struct crypto_tfm *tfm = req->base.tfm;
1127 + struct hash_desc desc = {
1128 + .tfm = __crypto_hash_cast(tfm),
1129 + .flags = req->base.flags,
1130 + };
1131 +
1132 + update(&desc, req->src, req->nbytes);
1133 + return 0;
1134 +}
1135 +
1136 +static int digest_async_final(struct ahash_request *req)
1137 +{
1138 + struct crypto_tfm *tfm = req->base.tfm;
1139 + struct hash_desc desc = {
1140 + .tfm = __crypto_hash_cast(tfm),
1141 + .flags = req->base.flags,
1142 + };
1143 +
1144 + final(&desc, req->result);
1145 + return 0;
1146 +}
1147 +
1148 +static int digest_async_digest(struct ahash_request *req)
1149 +{
1150 + struct crypto_tfm *tfm = req->base.tfm;
1151 + struct hash_desc desc = {
1152 + .tfm = __crypto_hash_cast(tfm),
1153 + .flags = req->base.flags,
1154 + };
1155 +
1156 + return digest(&desc, req->src, req->nbytes, req->result);
1157 +}
1158 +
1159 +int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
1160 +{
1161 + struct ahash_tfm *crt = &tfm->crt_ahash;
1162 + struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
1163 +
1164 + if (dalg->dia_digestsize > crypto_tfm_alg_blocksize(tfm))
1165 + return -EINVAL;
1166 +
1167 + crt->init = digest_async_init;
1168 + crt->update = digest_async_update;
1169 + crt->final = digest_async_final;
1170 + crt->digest = digest_async_digest;
1171 + crt->setkey = dalg->dia_setkey ? digest_async_setkey :
1172 + digest_async_nosetkey;
1173 + crt->digestsize = dalg->dia_digestsize;
1174 +
1175 + return 0;
1176 +}
1177 --- a/crypto/hash.c
1178 +++ b/crypto/hash.c
1179 @@ -9,6 +9,7 @@
1180 * any later version.
1181 */
1182
1183 +#include <crypto/internal/hash.h>
1184 #include <linux/errno.h>
1185 #include <linux/kernel.h>
1186 #include <linux/module.h>
1187 @@ -59,24 +60,107 @@ static int hash_setkey(struct crypto_has
1188 return alg->setkey(crt, key, keylen);
1189 }
1190
1191 -static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
1192 +static int hash_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
1193 + unsigned int keylen)
1194 +{
1195 + struct crypto_tfm *tfm = crypto_ahash_tfm(tfm_async);
1196 + struct crypto_hash *tfm_hash = __crypto_hash_cast(tfm);
1197 + struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
1198 +
1199 + return alg->setkey(tfm_hash, key, keylen);
1200 +}
1201 +
1202 +static int hash_async_init(struct ahash_request *req)
1203 +{
1204 + struct crypto_tfm *tfm = req->base.tfm;
1205 + struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
1206 + struct hash_desc desc = {
1207 + .tfm = __crypto_hash_cast(tfm),
1208 + .flags = req->base.flags,
1209 + };
1210 +
1211 + return alg->init(&desc);
1212 +}
1213 +
1214 +static int hash_async_update(struct ahash_request *req)
1215 +{
1216 + struct crypto_tfm *tfm = req->base.tfm;
1217 + struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
1218 + struct hash_desc desc = {
1219 + .tfm = __crypto_hash_cast(tfm),
1220 + .flags = req->base.flags,
1221 + };
1222 +
1223 + return alg->update(&desc, req->src, req->nbytes);
1224 +}
1225 +
1226 +static int hash_async_final(struct ahash_request *req)
1227 +{
1228 + struct crypto_tfm *tfm = req->base.tfm;
1229 + struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
1230 + struct hash_desc desc = {
1231 + .tfm = __crypto_hash_cast(tfm),
1232 + .flags = req->base.flags,
1233 + };
1234 +
1235 + return alg->final(&desc, req->result);
1236 +}
1237 +
1238 +static int hash_async_digest(struct ahash_request *req)
1239 +{
1240 + struct crypto_tfm *tfm = req->base.tfm;
1241 + struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
1242 + struct hash_desc desc = {
1243 + .tfm = __crypto_hash_cast(tfm),
1244 + .flags = req->base.flags,
1245 + };
1246 +
1247 + return alg->digest(&desc, req->src, req->nbytes, req->result);
1248 +}
1249 +
1250 +static int crypto_init_hash_ops_async(struct crypto_tfm *tfm)
1251 +{
1252 + struct ahash_tfm *crt = &tfm->crt_ahash;
1253 + struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
1254 +
1255 + crt->init = hash_async_init;
1256 + crt->update = hash_async_update;
1257 + crt->final = hash_async_final;
1258 + crt->digest = hash_async_digest;
1259 + crt->setkey = hash_async_setkey;
1260 + crt->digestsize = alg->digestsize;
1261 +
1262 + return 0;
1263 +}
1264 +
1265 +static int crypto_init_hash_ops_sync(struct crypto_tfm *tfm)
1266 {
1267 struct hash_tfm *crt = &tfm->crt_hash;
1268 struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
1269
1270 - if (alg->digestsize > crypto_tfm_alg_blocksize(tfm))
1271 - return -EINVAL;
1272 -
1273 - crt->init = alg->init;
1274 - crt->update = alg->update;
1275 - crt->final = alg->final;
1276 - crt->digest = alg->digest;
1277 - crt->setkey = hash_setkey;
1278 + crt->init = alg->init;
1279 + crt->update = alg->update;
1280 + crt->final = alg->final;
1281 + crt->digest = alg->digest;
1282 + crt->setkey = hash_setkey;
1283 crt->digestsize = alg->digestsize;
1284
1285 return 0;
1286 }
1287
1288 +static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
1289 +{
1290 + struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
1291 +
1292 + if (alg->digestsize > PAGE_SIZE / 8)
1293 + return -EINVAL;
1294 +
1295 + if ((mask & CRYPTO_ALG_TYPE_HASH_MASK) != CRYPTO_ALG_TYPE_HASH_MASK)
1296 + return crypto_init_hash_ops_async(tfm);
1297 + else
1298 + return crypto_init_hash_ops_sync(tfm);
1299 +}
1300 +
1301 static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
1302 __attribute__ ((unused));
1303 static void crypto_hash_show(struct seq_file *m, struct crypto_alg *alg)
1304 --- a/crypto/hmac.c
1305 +++ b/crypto/hmac.c
1306 @@ -226,6 +226,7 @@ static struct crypto_instance *hmac_allo
1307 struct crypto_instance *inst;
1308 struct crypto_alg *alg;
1309 int err;
1310 + int ds;
1311
1312 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_HASH);
1313 if (err)
1314 @@ -236,6 +237,13 @@ static struct crypto_instance *hmac_allo
1315 if (IS_ERR(alg))
1316 return ERR_CAST(alg);
1317
1318 + inst = ERR_PTR(-EINVAL);
1319 + ds = (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
1320 + CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
1321 + alg->cra_digest.dia_digestsize;
1322 + if (ds > alg->cra_blocksize)
1323 + goto out_put_alg;
1324 +
1325 inst = crypto_alloc_instance("hmac", alg);
1326 if (IS_ERR(inst))
1327 goto out_put_alg;
1328 @@ -246,14 +254,10 @@ static struct crypto_instance *hmac_allo
1329 inst->alg.cra_alignmask = alg->cra_alignmask;
1330 inst->alg.cra_type = &crypto_hash_type;
1331
1332 - inst->alg.cra_hash.digestsize =
1333 - (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
1334 - CRYPTO_ALG_TYPE_HASH ? alg->cra_hash.digestsize :
1335 - alg->cra_digest.dia_digestsize;
1336 + inst->alg.cra_hash.digestsize = ds;
1337
1338 inst->alg.cra_ctxsize = sizeof(struct hmac_ctx) +
1339 - ALIGN(inst->alg.cra_blocksize * 2 +
1340 - inst->alg.cra_hash.digestsize,
1341 + ALIGN(inst->alg.cra_blocksize * 2 + ds,
1342 sizeof(void *));
1343
1344 inst->alg.cra_init = hmac_init_tfm;
1345 --- a/crypto/internal.h
1346 +++ b/crypto/internal.h
1347 @@ -86,6 +86,7 @@ struct crypto_alg *__crypto_alg_lookup(c
1348 struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
1349
1350 int crypto_init_digest_ops(struct crypto_tfm *tfm);
1351 +int crypto_init_digest_ops_async(struct crypto_tfm *tfm);
1352 int crypto_init_cipher_ops(struct crypto_tfm *tfm);
1353 int crypto_init_compress_ops(struct crypto_tfm *tfm);
1354
1355 --- /dev/null
1356 +++ b/crypto/prng.c
1357 @@ -0,0 +1,410 @@
1358 +/*
1359 + * PRNG: Pseudo Random Number Generator
1360 + * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using
1361 + * AES 128 cipher in RFC3686 ctr mode
1362 + *
1363 + * (C) Neil Horman <nhorman@tuxdriver.com>
1364 + *
1365 + * This program is free software; you can redistribute it and/or modify it
1366 + * under the terms of the GNU General Public License as published by the
1367 + * Free Software Foundation; either version 2 of the License, or (at your
1368 + * any later version.
1369 + *
1370 + *
1371 + */
1372 +
1373 +#include <linux/err.h>
1374 +#include <linux/init.h>
1375 +#include <linux/module.h>
1376 +#include <linux/mm.h>
1377 +#include <linux/slab.h>
1378 +#include <linux/fs.h>
1379 +#include <linux/scatterlist.h>
1380 +#include <linux/string.h>
1381 +#include <linux/crypto.h>
1382 +#include <linux/highmem.h>
1383 +#include <linux/moduleparam.h>
1384 +#include <linux/jiffies.h>
1385 +#include <linux/timex.h>
1386 +#include <linux/interrupt.h>
1387 +#include <linux/miscdevice.h>
1388 +#include "prng.h"
1389 +
1390 +#define TEST_PRNG_ON_START 0
1391 +
1392 +#define DEFAULT_PRNG_KEY "0123456789abcdef1011"
1393 +#define DEFAULT_PRNG_KSZ 20
1394 +#define DEFAULT_PRNG_IV "defaultv"
1395 +#define DEFAULT_PRNG_IVSZ 8
1396 +#define DEFAULT_BLK_SZ 16
1397 +#define DEFAULT_V_SEED "zaybxcwdveuftgsh"
1398 +
1399 +/*
1400 + * Flags for the prng_context flags field
1401 + */
1402 +
1403 +#define PRNG_FIXED_SIZE 0x1
1404 +#define PRNG_NEED_RESET 0x2
1405 +
1406 +/*
1407 + * Note: DT is our counter value
1408 + * I is our intermediate value
1409 + * V is our seed vector
1410 + * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf
1411 + * for implementation details
1412 + */
1413 +
1414 +
1415 +struct prng_context {
1416 + char *prng_key;
1417 + char *prng_iv;
1418 + spinlock_t prng_lock;
1419 + unsigned char rand_data[DEFAULT_BLK_SZ];
1420 + unsigned char last_rand_data[DEFAULT_BLK_SZ];
1421 + unsigned char DT[DEFAULT_BLK_SZ];
1422 + unsigned char I[DEFAULT_BLK_SZ];
1423 + unsigned char V[DEFAULT_BLK_SZ];
1424 + u32 rand_data_valid;
1425 + struct crypto_blkcipher *tfm;
1426 + u32 flags;
1427 +};
1428 +
1429 +static int dbg;
1430 +
1431 +static void hexdump(char *note, unsigned char *buf, unsigned int len)
1432 +{
1433 + if (dbg) {
1434 + printk(KERN_CRIT "%s", note);
1435 + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
1436 + 16, 1,
1437 + buf, len, false);
1438 + }
1439 +}
1440 +
1441 +#define dbgprint(format, args...) do {if(dbg) printk(format, ##args);} while(0)
1442 +
1443 +static void xor_vectors(unsigned char *in1, unsigned char *in2,
1444 + unsigned char *out, unsigned int size)
1445 +{
1446 + int i;
1447 +
1448 + for (i=0;i<size;i++)
1449 + out[i] = in1[i] ^ in2[i];
1450 +
1451 +}
1452 +/*
1453 + * Returns DEFAULT_BLK_SZ bytes of random data per call
1454 + * returns 0 if generation succeded, <0 if something went wrong
1455 + */
1456 +static int _get_more_prng_bytes(struct prng_context *ctx)
1457 +{
1458 + int i;
1459 + struct blkcipher_desc desc;
1460 + struct scatterlist sg_in, sg_out;
1461 + int ret;
1462 + unsigned char tmp[DEFAULT_BLK_SZ];
1463 +
1464 + desc.tfm = ctx->tfm;
1465 + desc.flags = 0;
1466 +
1467 +
1468 + dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n",ctx);
1469 +
1470 + hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ);
1471 + hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ);
1472 + hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ);
1473 +
1474 + /*
1475 + * This algorithm is a 3 stage state machine
1476 + */
1477 + for (i=0;i<3;i++) {
1478 +
1479 + desc.tfm = ctx->tfm;
1480 + desc.flags = 0;
1481 + switch (i) {
1482 + case 0:
1483 + /*
1484 + * Start by encrypting the counter value
1485 + * This gives us an intermediate value I
1486 + */
1487 + memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ);
1488 + sg_init_one(&sg_out, &ctx->I[0], DEFAULT_BLK_SZ);
1489 + hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ);
1490 + break;
1491 + case 1:
1492 +
1493 + /*
1494 + * Next xor I with our secret vector V
1495 + * encrypt that result to obtain our
1496 + * pseudo random data which we output
1497 + */
1498 + xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ);
1499 + sg_init_one(&sg_out, &ctx->rand_data[0], DEFAULT_BLK_SZ);
1500 + hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ);
1501 + break;
1502 + case 2:
1503 + /*
1504 + * First check that we didn't produce the same random data
1505 + * that we did last time around through this
1506 + */
1507 + if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) {
1508 + printk(KERN_ERR "ctx %p Failed repetition check!\n",
1509 + ctx);
1510 + ctx->flags |= PRNG_NEED_RESET;
1511 + return -1;
1512 + }
1513 + memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ);
1514 +
1515 + /*
1516 + * Lastly xor the random data with I
1517 + * and encrypt that to obtain a new secret vector V
1518 + */
1519 + xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ);
1520 + sg_init_one(&sg_out, &ctx->V[0], DEFAULT_BLK_SZ);
1521 + hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ);
1522 + break;
1523 + }
1524 +
1525 + /* Initialize our input buffer */
1526 + sg_init_one(&sg_in, &tmp[0], DEFAULT_BLK_SZ);
1527 +
1528 + /* do the encryption */
1529 + ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, DEFAULT_BLK_SZ);
1530 +
1531 + /* And check the result */
1532 + if (ret) {
1533 + dbgprint(KERN_CRIT "Encryption of new block failed for context %p\n",ctx);
1534 + ctx->rand_data_valid = DEFAULT_BLK_SZ;
1535 + return -1;
1536 + }
1537 +
1538 + }
1539 +
1540 + /*
1541 + * Now update our DT value
1542 + */
1543 + for (i=DEFAULT_BLK_SZ-1;i>0;i--) {
1544 + ctx->DT[i] = ctx->DT[i-1];
1545 + }
1546 + ctx->DT[0] += 1;
1547 +
1548 + dbgprint("Returning new block for context %p\n",ctx);
1549 + ctx->rand_data_valid = 0;
1550 +
1551 + hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ);
1552 + hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ);
1553 + hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ);
1554 + hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ);
1555 +
1556 + return 0;
1557 +}
1558 +
1559 +/* Our exported functions */
1560 +int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx)
1561 +{
1562 + unsigned long flags;
1563 + unsigned char *ptr = buf;
1564 + unsigned int byte_count = (unsigned int)nbytes;
1565 + int err;
1566 +
1567 +
1568 + if (nbytes < 0)
1569 + return -EINVAL;
1570 +
1571 + spin_lock_irqsave(&ctx->prng_lock, flags);
1572 +
1573 + err = -EFAULT;
1574 + if (ctx->flags & PRNG_NEED_RESET)
1575 + goto done;
1576 +
1577 + /*
1578 + * If the FIXED_SIZE flag is on, only return whole blocks of
1579 + * pseudo random data
1580 + */
1581 + err = -EINVAL;
1582 + if (ctx->flags & PRNG_FIXED_SIZE) {
1583 + if (nbytes < DEFAULT_BLK_SZ)
1584 + goto done;
1585 + byte_count = DEFAULT_BLK_SZ;
1586 + }
1587 +
1588 + err = byte_count;
1589 +
1590 + dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",byte_count, ctx);
1591 +
1592 +
1593 +remainder:
1594 + if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
1595 + if (_get_more_prng_bytes(ctx) < 0) {
1596 + memset(buf, 0, nbytes);
1597 + err = -EFAULT;
1598 + goto done;
1599 + }
1600 + }
1601 +
1602 + /*
1603 + * Copy up to the next whole block size
1604 + */
1605 + if (byte_count < DEFAULT_BLK_SZ) {
1606 + for (;ctx->rand_data_valid < DEFAULT_BLK_SZ; ctx->rand_data_valid++) {
1607 + *ptr = ctx->rand_data[ctx->rand_data_valid];
1608 + ptr++;
1609 + byte_count--;
1610 + if (byte_count == 0)
1611 + goto done;
1612 + }
1613 + }
1614 +
1615 + /*
1616 + * Now copy whole blocks
1617 + */
1618 + for(;byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
1619 + if (_get_more_prng_bytes(ctx) < 0) {
1620 + memset(buf, 0, nbytes);
1621 + err = -1;
1622 + goto done;
1623 + }
1624 + memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
1625 + ctx->rand_data_valid += DEFAULT_BLK_SZ;
1626 + ptr += DEFAULT_BLK_SZ;
1627 + }
1628 +
1629 + /*
1630 + * Now copy any extra partial data
1631 + */
1632 + if (byte_count)
1633 + goto remainder;
1634 +
1635 +done:
1636 + spin_unlock_irqrestore(&ctx->prng_lock, flags);
1637 + dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",err, ctx);
1638 + return err;
1639 +}
1640 +EXPORT_SYMBOL_GPL(get_prng_bytes);
1641 +
1642 +struct prng_context *alloc_prng_context(void)
1643 +{
1644 + struct prng_context *ctx=kzalloc(sizeof(struct prng_context), GFP_KERNEL);
1645 +
1646 + spin_lock_init(&ctx->prng_lock);
1647 +
1648 + if (reset_prng_context(ctx, NULL, NULL, NULL, NULL)) {
1649 + kfree(ctx);
1650 + ctx = NULL;
1651 + }
1652 +
1653 + dbgprint(KERN_CRIT "returning context %p\n",ctx);
1654 + return ctx;
1655 +}
1656 +
1657 +EXPORT_SYMBOL_GPL(alloc_prng_context);
1658 +
1659 +void free_prng_context(struct prng_context *ctx)
1660 +{
1661 + crypto_free_blkcipher(ctx->tfm);
1662 + kfree(ctx);
1663 +}
1664 +EXPORT_SYMBOL_GPL(free_prng_context);
1665 +
1666 +int reset_prng_context(struct prng_context *ctx,
1667 + unsigned char *key, unsigned char *iv,
1668 + unsigned char *V, unsigned char *DT)
1669 +{
1670 + int ret;
1671 + int iv_len;
1672 + int rc = -EFAULT;
1673 +
1674 + spin_lock(&ctx->prng_lock);
1675 + ctx->flags |= PRNG_NEED_RESET;
1676 +
1677 + if (key)
1678 + memcpy(ctx->prng_key,key,strlen(ctx->prng_key));
1679 + else
1680 + ctx->prng_key = DEFAULT_PRNG_KEY;
1681 +
1682 + if (iv)
1683 + memcpy(ctx->prng_iv,iv, strlen(ctx->prng_iv));
1684 + else
1685 + ctx->prng_iv = DEFAULT_PRNG_IV;
1686 +
1687 + if (V)
1688 + memcpy(ctx->V,V,DEFAULT_BLK_SZ);
1689 + else
1690 + memcpy(ctx->V,DEFAULT_V_SEED,DEFAULT_BLK_SZ);
1691 +
1692 + if (DT)
1693 + memcpy(ctx->DT, DT, DEFAULT_BLK_SZ);
1694 + else
1695 + memset(ctx->DT, 0, DEFAULT_BLK_SZ);
1696 +
1697 + memset(ctx->rand_data,0,DEFAULT_BLK_SZ);
1698 + memset(ctx->last_rand_data,0,DEFAULT_BLK_SZ);
1699 +
1700 + if (ctx->tfm)
1701 + crypto_free_blkcipher(ctx->tfm);
1702 +
1703 + ctx->tfm = crypto_alloc_blkcipher("rfc3686(ctr(aes))",0,0);
1704 + if (!ctx->tfm) {
1705 + dbgprint(KERN_CRIT "Failed to alloc crypto tfm for context %p\n",ctx->tfm);
1706 + goto out;
1707 + }
1708 +
1709 + ctx->rand_data_valid = DEFAULT_BLK_SZ;
1710 +
1711 + ret = crypto_blkcipher_setkey(ctx->tfm, ctx->prng_key, strlen(ctx->prng_key));
1712 + if (ret) {
1713 + dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n",
1714 + crypto_blkcipher_get_flags(ctx->tfm));
1715 + crypto_free_blkcipher(ctx->tfm);
1716 + goto out;
1717 + }
1718 +
1719 + iv_len = crypto_blkcipher_ivsize(ctx->tfm);
1720 + if (iv_len) {
1721 + crypto_blkcipher_set_iv(ctx->tfm, ctx->prng_iv, iv_len);
1722 + }
1723 + rc = 0;
1724 + ctx->flags &= ~PRNG_NEED_RESET;
1725 +out:
1726 + spin_unlock(&ctx->prng_lock);
1727 +
1728 + return rc;
1729 +
1730 +}
1731 +EXPORT_SYMBOL_GPL(reset_prng_context);
1732 +
1733 +/* Module initalization */
1734 +static int __init prng_mod_init(void)
1735 +{
1736 +
1737 +#ifdef TEST_PRNG_ON_START
1738 + int i;
1739 + unsigned char tmpbuf[DEFAULT_BLK_SZ];
1740 +
1741 + struct prng_context *ctx = alloc_prng_context();
1742 + if (ctx == NULL)
1743 + return -EFAULT;
1744 + for (i=0;i<16;i++) {
1745 + if (get_prng_bytes(tmpbuf, DEFAULT_BLK_SZ, ctx) < 0) {
1746 + free_prng_context(ctx);
1747 + return -EFAULT;
1748 + }
1749 + }
1750 + free_prng_context(ctx);
1751 +#endif
1752 +
1753 + return 0;
1754 +}
1755 +
1756 +static void __exit prng_mod_fini(void)
1757 +{
1758 + return;
1759 +}
1760 +
1761 +MODULE_LICENSE("GPL");
1762 +MODULE_DESCRIPTION("Software Pseudo Random Number Generator");
1763 +MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
1764 +module_param(dbg, int, 0);
1765 +MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
1766 +module_init(prng_mod_init);
1767 +module_exit(prng_mod_fini);
1768 --- /dev/null
1769 +++ b/crypto/prng.h
1770 @@ -0,0 +1,27 @@
1771 +/*
1772 + * PRNG: Pseudo Random Number Generator
1773 + *
1774 + * (C) Neil Horman <nhorman@tuxdriver.com>
1775 + *
1776 + * This program is free software; you can redistribute it and/or modify it
1777 + * under the terms of the GNU General Public License as published by the
1778 + * Free Software Foundation; either version 2 of the License, or (at your
1779 + * any later version.
1780 + *
1781 + *
1782 + */
1783 +
1784 +#ifndef _PRNG_H_
1785 +#define _PRNG_H_
1786 +struct prng_context;
1787 +
1788 +int get_prng_bytes(char *buf, int nbytes, struct prng_context *ctx);
1789 +struct prng_context *alloc_prng_context(void);
1790 +int reset_prng_context(struct prng_context *ctx,
1791 + unsigned char *key, unsigned char *iv,
1792 + unsigned char *V,
1793 + unsigned char *DT);
1794 +void free_prng_context(struct prng_context *ctx);
1795 +
1796 +#endif
1797 +
1798 --- /dev/null
1799 +++ b/crypto/ripemd.h
1800 @@ -0,0 +1,43 @@
1801 +/*
1802 + * Common values for RIPEMD algorithms
1803 + */
1804 +
1805 +#ifndef _CRYPTO_RMD_H
1806 +#define _CRYPTO_RMD_H
1807 +
1808 +#define RMD128_DIGEST_SIZE 16
1809 +#define RMD128_BLOCK_SIZE 64
1810 +
1811 +#define RMD160_DIGEST_SIZE 20
1812 +#define RMD160_BLOCK_SIZE 64
1813 +
1814 +#define RMD256_DIGEST_SIZE 32
1815 +#define RMD256_BLOCK_SIZE 64
1816 +
1817 +#define RMD320_DIGEST_SIZE 40
1818 +#define RMD320_BLOCK_SIZE 64
1819 +
1820 +/* initial values */
1821 +#define RMD_H0 0x67452301UL
1822 +#define RMD_H1 0xefcdab89UL
1823 +#define RMD_H2 0x98badcfeUL
1824 +#define RMD_H3 0x10325476UL
1825 +#define RMD_H4 0xc3d2e1f0UL
1826 +#define RMD_H5 0x76543210UL
1827 +#define RMD_H6 0xfedcba98UL
1828 +#define RMD_H7 0x89abcdefUL
1829 +#define RMD_H8 0x01234567UL
1830 +#define RMD_H9 0x3c2d1e0fUL
1831 +
1832 +/* constants */
1833 +#define RMD_K1 0x00000000UL
1834 +#define RMD_K2 0x5a827999UL
1835 +#define RMD_K3 0x6ed9eba1UL
1836 +#define RMD_K4 0x8f1bbcdcUL
1837 +#define RMD_K5 0xa953fd4eUL
1838 +#define RMD_K6 0x50a28be6UL
1839 +#define RMD_K7 0x5c4dd124UL
1840 +#define RMD_K8 0x6d703ef3UL
1841 +#define RMD_K9 0x7a6d76e9UL
1842 +
1843 +#endif
1844 --- /dev/null
1845 +++ b/crypto/rmd128.c
1846 @@ -0,0 +1,325 @@
1847 +/*
1848 + * Cryptographic API.
1849 + *
1850 + * RIPEMD-128 - RACE Integrity Primitives Evaluation Message Digest.
1851 + *
1852 + * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
1853 + *
1854 + * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
1855 + *
1856 + * This program is free software; you can redistribute it and/or modify it
1857 + * under the terms of the GNU General Public License as published by the Free
1858 + * Software Foundation; either version 2 of the License, or (at your option)
1859 + * any later version.
1860 + *
1861 + */
1862 +#include <linux/init.h>
1863 +#include <linux/module.h>
1864 +#include <linux/mm.h>
1865 +#include <linux/crypto.h>
1866 +#include <linux/cryptohash.h>
1867 +#include <linux/types.h>
1868 +#include <asm/byteorder.h>
1869 +
1870 +#include "ripemd.h"
1871 +
1872 +struct rmd128_ctx {
1873 + u64 byte_count;
1874 + u32 state[4];
1875 + __le32 buffer[16];
1876 +};
1877 +
1878 +#define K1 RMD_K1
1879 +#define K2 RMD_K2
1880 +#define K3 RMD_K3
1881 +#define K4 RMD_K4
1882 +#define KK1 RMD_K6
1883 +#define KK2 RMD_K7
1884 +#define KK3 RMD_K8
1885 +#define KK4 RMD_K1
1886 +
1887 +#define F1(x, y, z) (x ^ y ^ z) /* XOR */
1888 +#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
1889 +#define F3(x, y, z) ((x | ~y) ^ z)
1890 +#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
1891 +
1892 +#define ROUND(a, b, c, d, f, k, x, s) { \
1893 + (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
1894 + (a) = rol32((a), (s)); \
1895 +}
1896 +
1897 +static void rmd128_transform(u32 *state, const __le32 *in)
1898 +{
1899 + u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd;
1900 +
1901 + /* Initialize left lane */
1902 + aa = state[0];
1903 + bb = state[1];
1904 + cc = state[2];
1905 + dd = state[3];
1906 +
1907 + /* Initialize right lane */
1908 + aaa = state[0];
1909 + bbb = state[1];
1910 + ccc = state[2];
1911 + ddd = state[3];
1912 +
1913 + /* round 1: left lane */
1914 + ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
1915 + ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
1916 + ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
1917 + ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
1918 + ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
1919 + ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
1920 + ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
1921 + ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
1922 + ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
1923 + ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
1924 + ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
1925 + ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
1926 + ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
1927 + ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
1928 + ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
1929 + ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
1930 +
1931 + /* round 2: left lane */
1932 + ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
1933 + ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
1934 + ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
1935 + ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
1936 + ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
1937 + ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
1938 + ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
1939 + ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
1940 + ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
1941 + ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
1942 + ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
1943 + ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
1944 + ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
1945 + ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
1946 + ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
1947 + ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
1948 +
1949 + /* round 3: left lane */
1950 + ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
1951 + ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
1952 + ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
1953 + ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
1954 + ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
1955 + ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
1956 + ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
1957 + ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
1958 + ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
1959 + ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
1960 + ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
1961 + ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
1962 + ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
1963 + ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
1964 + ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
1965 + ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
1966 +
1967 + /* round 4: left lane */
1968 + ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
1969 + ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
1970 + ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
1971 + ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
1972 + ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
1973 + ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
1974 + ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
1975 + ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
1976 + ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
1977 + ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
1978 + ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
1979 + ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
1980 + ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
1981 + ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
1982 + ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
1983 + ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
1984 +
1985 + /* round 1: right lane */
1986 + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
1987 + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
1988 + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
1989 + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
1990 + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
1991 + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
1992 + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
1993 + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
1994 + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
1995 + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
1996 + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
1997 + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
1998 + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
1999 + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
2000 + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
2001 + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
2002 +
2003 + /* round 2: right lane */
2004 + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
2005 + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
2006 + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
2007 + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
2008 + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
2009 + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
2010 + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
2011 + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
2012 + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
2013 + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
2014 + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
2015 + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
2016 + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
2017 + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
2018 + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
2019 + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
2020 +
2021 + /* round 3: right lane */
2022 + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
2023 + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
2024 + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
2025 + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
2026 + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
2027 + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
2028 + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
2029 + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
2030 + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
2031 + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
2032 + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
2033 + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
2034 + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
2035 + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
2036 + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
2037 + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
2038 +
2039 + /* round 4: right lane */
2040 + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
2041 + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
2042 + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
2043 + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
2044 + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
2045 + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
2046 + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
2047 + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
2048 + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
2049 + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
2050 + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
2051 + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
2052 + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
2053 + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
2054 + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
2055 + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
2056 +
2057 + /* combine results */
2058 + ddd += cc + state[1]; /* final result for state[0] */
2059 + state[1] = state[2] + dd + aaa;
2060 + state[2] = state[3] + aa + bbb;
2061 + state[3] = state[0] + bb + ccc;
2062 + state[0] = ddd;
2063 +
2064 + return;
2065 +}
2066 +
2067 +static void rmd128_init(struct crypto_tfm *tfm)
2068 +{
2069 + struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
2070 +
2071 + rctx->byte_count = 0;
2072 +
2073 + rctx->state[0] = RMD_H0;
2074 + rctx->state[1] = RMD_H1;
2075 + rctx->state[2] = RMD_H2;
2076 + rctx->state[3] = RMD_H3;
2077 +
2078 + memset(rctx->buffer, 0, sizeof(rctx->buffer));
2079 +}
2080 +
2081 +static void rmd128_update(struct crypto_tfm *tfm, const u8 *data,
2082 + unsigned int len)
2083 +{
2084 + struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
2085 + const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
2086 +
2087 + rctx->byte_count += len;
2088 +
2089 + /* Enough space in buffer? If so copy and we're done */
2090 + if (avail > len) {
2091 + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
2092 + data, len);
2093 + return;
2094 + }
2095 +
2096 + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
2097 + data, avail);
2098 +
2099 + rmd128_transform(rctx->state, rctx->buffer);
2100 + data += avail;
2101 + len -= avail;
2102 +
2103 + while (len >= sizeof(rctx->buffer)) {
2104 + memcpy(rctx->buffer, data, sizeof(rctx->buffer));
2105 + rmd128_transform(rctx->state, rctx->buffer);
2106 + data += sizeof(rctx->buffer);
2107 + len -= sizeof(rctx->buffer);
2108 + }
2109 +
2110 + memcpy(rctx->buffer, data, len);
2111 +}
2112 +
2113 +/* Add padding and return the message digest. */
2114 +static void rmd128_final(struct crypto_tfm *tfm, u8 *out)
2115 +{
2116 + struct rmd128_ctx *rctx = crypto_tfm_ctx(tfm);
2117 + u32 i, index, padlen;
2118 + __le64 bits;
2119 + __le32 *dst = (__le32 *)out;
2120 + static const u8 padding[64] = { 0x80, };
2121 +
2122 + bits = cpu_to_le64(rctx->byte_count << 3);
2123 +
2124 + /* Pad out to 56 mod 64 */
2125 + index = rctx->byte_count & 0x3f;
2126 + padlen = (index < 56) ? (56 - index) : ((64+56) - index);
2127 + rmd128_update(tfm, padding, padlen);
2128 +
2129 + /* Append length */
2130 + rmd128_update(tfm, (const u8 *)&bits, sizeof(bits));
2131 +
2132 + /* Store state in digest */
2133 + for (i = 0; i < 4; i++)
2134 + dst[i] = cpu_to_le32p(&rctx->state[i]);
2135 +
2136 + /* Wipe context */
2137 + memset(rctx, 0, sizeof(*rctx));
2138 +}
2139 +
2140 +static struct crypto_alg alg = {
2141 + .cra_name = "rmd128",
2142 + .cra_driver_name = "rmd128",
2143 + .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
2144 + .cra_blocksize = RMD128_BLOCK_SIZE,
2145 + .cra_ctxsize = sizeof(struct rmd128_ctx),
2146 + .cra_module = THIS_MODULE,
2147 + .cra_list = LIST_HEAD_INIT(alg.cra_list),
2148 + .cra_u = { .digest = {
2149 + .dia_digestsize = RMD128_DIGEST_SIZE,
2150 + .dia_init = rmd128_init,
2151 + .dia_update = rmd128_update,
2152 + .dia_final = rmd128_final } }
2153 +};
2154 +
2155 +static int __init rmd128_mod_init(void)
2156 +{
2157 + return crypto_register_alg(&alg);
2158 +}
2159 +
2160 +static void __exit rmd128_mod_fini(void)
2161 +{
2162 + crypto_unregister_alg(&alg);
2163 +}
2164 +
2165 +module_init(rmd128_mod_init);
2166 +module_exit(rmd128_mod_fini);
2167 +
2168 +MODULE_LICENSE("GPL");
2169 +MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
2170 +
2171 +MODULE_ALIAS("rmd128");
2172 --- /dev/null
2173 +++ b/crypto/rmd160.c
2174 @@ -0,0 +1,369 @@
2175 +/*
2176 + * Cryptographic API.
2177 + *
2178 + * RIPEMD-160 - RACE Integrity Primitives Evaluation Message Digest.
2179 + *
2180 + * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
2181 + *
2182 + * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
2183 + *
2184 + * This program is free software; you can redistribute it and/or modify it
2185 + * under the terms of the GNU General Public License as published by the Free
2186 + * Software Foundation; either version 2 of the License, or (at your option)
2187 + * any later version.
2188 + *
2189 + */
2190 +#include <linux/init.h>
2191 +#include <linux/module.h>
2192 +#include <linux/mm.h>
2193 +#include <linux/crypto.h>
2194 +#include <linux/cryptohash.h>
2195 +#include <linux/types.h>
2196 +#include <asm/byteorder.h>
2197 +
2198 +#include "ripemd.h"
2199 +
2200 +struct rmd160_ctx {
2201 + u64 byte_count;
2202 + u32 state[5];
2203 + __le32 buffer[16];
2204 +};
2205 +
2206 +#define K1 RMD_K1
2207 +#define K2 RMD_K2
2208 +#define K3 RMD_K3
2209 +#define K4 RMD_K4
2210 +#define K5 RMD_K5
2211 +#define KK1 RMD_K6
2212 +#define KK2 RMD_K7
2213 +#define KK3 RMD_K8
2214 +#define KK4 RMD_K9
2215 +#define KK5 RMD_K1
2216 +
2217 +#define F1(x, y, z) (x ^ y ^ z) /* XOR */
2218 +#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
2219 +#define F3(x, y, z) ((x | ~y) ^ z)
2220 +#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
2221 +#define F5(x, y, z) (x ^ (y | ~z))
2222 +
2223 +#define ROUND(a, b, c, d, e, f, k, x, s) { \
2224 + (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
2225 + (a) = rol32((a), (s)) + (e); \
2226 + (c) = rol32((c), 10); \
2227 +}
2228 +
2229 +static void rmd160_transform(u32 *state, const __le32 *in)
2230 +{
2231 + u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee;
2232 +
2233 + /* Initialize left lane */
2234 + aa = state[0];
2235 + bb = state[1];
2236 + cc = state[2];
2237 + dd = state[3];
2238 + ee = state[4];
2239 +
2240 + /* Initialize right lane */
2241 + aaa = state[0];
2242 + bbb = state[1];
2243 + ccc = state[2];
2244 + ddd = state[3];
2245 + eee = state[4];
2246 +
2247 + /* round 1: left lane */
2248 + ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
2249 + ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
2250 + ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
2251 + ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
2252 + ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
2253 + ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
2254 + ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
2255 + ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
2256 + ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
2257 + ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
2258 + ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
2259 + ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
2260 + ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
2261 + ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
2262 + ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
2263 + ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
2264 +
2265 + /* round 2: left lane" */
2266 + ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
2267 + ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
2268 + ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
2269 + ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
2270 + ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
2271 + ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
2272 + ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
2273 + ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
2274 + ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
2275 + ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
2276 + ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
2277 + ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
2278 + ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
2279 + ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
2280 + ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
2281 + ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
2282 +
2283 + /* round 3: left lane" */
2284 + ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
2285 + ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
2286 + ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
2287 + ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
2288 + ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
2289 + ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
2290 + ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
2291 + ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
2292 + ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
2293 + ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
2294 + ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
2295 + ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
2296 + ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
2297 + ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
2298 + ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
2299 + ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
2300 +
2301 + /* round 4: left lane" */
2302 + ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
2303 + ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
2304 + ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
2305 + ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
2306 + ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
2307 + ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
2308 + ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
2309 + ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
2310 + ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
2311 + ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
2312 + ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
2313 + ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
2314 + ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
2315 + ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
2316 + ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
2317 + ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
2318 +
2319 + /* round 5: left lane" */
2320 + ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
2321 + ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
2322 + ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
2323 + ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
2324 + ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
2325 + ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
2326 + ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
2327 + ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
2328 + ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
2329 + ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
2330 + ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
2331 + ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
2332 + ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
2333 + ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
2334 + ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
2335 + ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
2336 +
2337 + /* round 1: right lane */
2338 + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
2339 + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
2340 + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
2341 + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
2342 + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
2343 + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
2344 + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
2345 + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
2346 + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
2347 + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
2348 + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
2349 + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
2350 + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
2351 + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
2352 + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
2353 + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
2354 +
2355 + /* round 2: right lane */
2356 + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
2357 + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
2358 + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
2359 + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
2360 + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
2361 + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
2362 + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
2363 + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
2364 + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
2365 + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
2366 + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
2367 + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
2368 + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
2369 + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
2370 + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
2371 + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
2372 +
2373 + /* round 3: right lane */
2374 + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
2375 + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
2376 + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
2377 + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
2378 + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
2379 + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
2380 + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
2381 + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
2382 + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
2383 + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
2384 + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
2385 + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
2386 + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
2387 + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
2388 + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
2389 + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
2390 +
2391 + /* round 4: right lane */
2392 + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
2393 + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
2394 + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
2395 + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
2396 + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
2397 + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
2398 + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
2399 + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
2400 + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
2401 + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
2402 + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
2403 + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
2404 + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
2405 + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
2406 + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
2407 + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
2408 +
2409 + /* round 5: right lane */
2410 + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
2411 + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
2412 + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
2413 + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
2414 + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
2415 + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
2416 + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
2417 + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
2418 + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
2419 + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
2420 + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
2421 + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
2422 + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
2423 + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
2424 + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
2425 + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
2426 +
2427 + /* combine results */
2428 + ddd += cc + state[1]; /* final result for state[0] */
2429 + state[1] = state[2] + dd + eee;
2430 + state[2] = state[3] + ee + aaa;
2431 + state[3] = state[4] + aa + bbb;
2432 + state[4] = state[0] + bb + ccc;
2433 + state[0] = ddd;
2434 +
2435 + return;
2436 +}
2437 +
2438 +static void rmd160_init(struct crypto_tfm *tfm)
2439 +{
2440 + struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
2441 +
2442 + rctx->byte_count = 0;
2443 +
2444 + rctx->state[0] = RMD_H0;
2445 + rctx->state[1] = RMD_H1;
2446 + rctx->state[2] = RMD_H2;
2447 + rctx->state[3] = RMD_H3;
2448 + rctx->state[4] = RMD_H4;
2449 +
2450 + memset(rctx->buffer, 0, sizeof(rctx->buffer));
2451 +}
2452 +
2453 +static void rmd160_update(struct crypto_tfm *tfm, const u8 *data,
2454 + unsigned int len)
2455 +{
2456 + struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
2457 + const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
2458 +
2459 + rctx->byte_count += len;
2460 +
2461 + /* Enough space in buffer? If so copy and we're done */
2462 + if (avail > len) {
2463 + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
2464 + data, len);
2465 + return;
2466 + }
2467 +
2468 + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
2469 + data, avail);
2470 +
2471 + rmd160_transform(rctx->state, rctx->buffer);
2472 + data += avail;
2473 + len -= avail;
2474 +
2475 + while (len >= sizeof(rctx->buffer)) {
2476 + memcpy(rctx->buffer, data, sizeof(rctx->buffer));
2477 + rmd160_transform(rctx->state, rctx->buffer);
2478 + data += sizeof(rctx->buffer);
2479 + len -= sizeof(rctx->buffer);
2480 + }
2481 +
2482 + memcpy(rctx->buffer, data, len);
2483 +}
2484 +
2485 +/* Add padding and return the message digest. */
2486 +static void rmd160_final(struct crypto_tfm *tfm, u8 *out)
2487 +{
2488 + struct rmd160_ctx *rctx = crypto_tfm_ctx(tfm);
2489 + u32 i, index, padlen;
2490 + __le64 bits;
2491 + __le32 *dst = (__le32 *)out;
2492 + static const u8 padding[64] = { 0x80, };
2493 +
2494 + bits = cpu_to_le64(rctx->byte_count << 3);
2495 +
2496 + /* Pad out to 56 mod 64 */
2497 + index = rctx->byte_count & 0x3f;
2498 + padlen = (index < 56) ? (56 - index) : ((64+56) - index);
2499 + rmd160_update(tfm, padding, padlen);
2500 +
2501 + /* Append length */
2502 + rmd160_update(tfm, (const u8 *)&bits, sizeof(bits));
2503 +
2504 + /* Store state in digest */
2505 + for (i = 0; i < 5; i++)
2506 + dst[i] = cpu_to_le32p(&rctx->state[i]);
2507 +
2508 + /* Wipe context */
2509 + memset(rctx, 0, sizeof(*rctx));
2510 +}
2511 +
2512 +static struct crypto_alg alg = {
2513 + .cra_name = "rmd160",
2514 + .cra_driver_name = "rmd160",
2515 + .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
2516 + .cra_blocksize = RMD160_BLOCK_SIZE,
2517 + .cra_ctxsize = sizeof(struct rmd160_ctx),
2518 + .cra_module = THIS_MODULE,
2519 + .cra_list = LIST_HEAD_INIT(alg.cra_list),
2520 + .cra_u = { .digest = {
2521 + .dia_digestsize = RMD160_DIGEST_SIZE,
2522 + .dia_init = rmd160_init,
2523 + .dia_update = rmd160_update,
2524 + .dia_final = rmd160_final } }
2525 +};
2526 +
2527 +static int __init rmd160_mod_init(void)
2528 +{
2529 + return crypto_register_alg(&alg);
2530 +}
2531 +
2532 +static void __exit rmd160_mod_fini(void)
2533 +{
2534 + crypto_unregister_alg(&alg);
2535 +}
2536 +
2537 +module_init(rmd160_mod_init);
2538 +module_exit(rmd160_mod_fini);
2539 +
2540 +MODULE_LICENSE("GPL");
2541 +MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
2542 +
2543 +MODULE_ALIAS("rmd160");
2544 --- /dev/null
2545 +++ b/crypto/rmd256.c
2546 @@ -0,0 +1,344 @@
2547 +/*
2548 + * Cryptographic API.
2549 + *
2550 + * RIPEMD-256 - RACE Integrity Primitives Evaluation Message Digest.
2551 + *
2552 + * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
2553 + *
2554 + * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
2555 + *
2556 + * This program is free software; you can redistribute it and/or modify it
2557 + * under the terms of the GNU General Public License as published by the Free
2558 + * Software Foundation; either version 2 of the License, or (at your option)
2559 + * any later version.
2560 + *
2561 + */
2562 +#include <linux/init.h>
2563 +#include <linux/module.h>
2564 +#include <linux/mm.h>
2565 +#include <linux/crypto.h>
2566 +#include <linux/cryptohash.h>
2567 +#include <linux/types.h>
2568 +#include <asm/byteorder.h>
2569 +
2570 +#include "ripemd.h"
2571 +
2572 +struct rmd256_ctx {
2573 + u64 byte_count;
2574 + u32 state[8];
2575 + __le32 buffer[16];
2576 +};
2577 +
2578 +#define K1 RMD_K1
2579 +#define K2 RMD_K2
2580 +#define K3 RMD_K3
2581 +#define K4 RMD_K4
2582 +#define KK1 RMD_K6
2583 +#define KK2 RMD_K7
2584 +#define KK3 RMD_K8
2585 +#define KK4 RMD_K1
2586 +
2587 +#define F1(x, y, z) (x ^ y ^ z) /* XOR */
2588 +#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
2589 +#define F3(x, y, z) ((x | ~y) ^ z)
2590 +#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
2591 +
2592 +#define ROUND(a, b, c, d, f, k, x, s) { \
2593 + (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
2594 + (a) = rol32((a), (s)); \
2595 +}
2596 +
2597 +static void rmd256_transform(u32 *state, const __le32 *in)
2598 +{
2599 + u32 aa, bb, cc, dd, aaa, bbb, ccc, ddd, tmp;
2600 +
2601 + /* Initialize left lane */
2602 + aa = state[0];
2603 + bb = state[1];
2604 + cc = state[2];
2605 + dd = state[3];
2606 +
2607 + /* Initialize right lane */
2608 + aaa = state[4];
2609 + bbb = state[5];
2610 + ccc = state[6];
2611 + ddd = state[7];
2612 +
2613 + /* round 1: left lane */
2614 + ROUND(aa, bb, cc, dd, F1, K1, in[0], 11);
2615 + ROUND(dd, aa, bb, cc, F1, K1, in[1], 14);
2616 + ROUND(cc, dd, aa, bb, F1, K1, in[2], 15);
2617 + ROUND(bb, cc, dd, aa, F1, K1, in[3], 12);
2618 + ROUND(aa, bb, cc, dd, F1, K1, in[4], 5);
2619 + ROUND(dd, aa, bb, cc, F1, K1, in[5], 8);
2620 + ROUND(cc, dd, aa, bb, F1, K1, in[6], 7);
2621 + ROUND(bb, cc, dd, aa, F1, K1, in[7], 9);
2622 + ROUND(aa, bb, cc, dd, F1, K1, in[8], 11);
2623 + ROUND(dd, aa, bb, cc, F1, K1, in[9], 13);
2624 + ROUND(cc, dd, aa, bb, F1, K1, in[10], 14);
2625 + ROUND(bb, cc, dd, aa, F1, K1, in[11], 15);
2626 + ROUND(aa, bb, cc, dd, F1, K1, in[12], 6);
2627 + ROUND(dd, aa, bb, cc, F1, K1, in[13], 7);
2628 + ROUND(cc, dd, aa, bb, F1, K1, in[14], 9);
2629 + ROUND(bb, cc, dd, aa, F1, K1, in[15], 8);
2630 +
2631 + /* round 1: right lane */
2632 + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[5], 8);
2633 + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[14], 9);
2634 + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[7], 9);
2635 + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[0], 11);
2636 + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[9], 13);
2637 + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[2], 15);
2638 + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[11], 15);
2639 + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[4], 5);
2640 + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[13], 7);
2641 + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[6], 7);
2642 + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[15], 8);
2643 + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[8], 11);
2644 + ROUND(aaa, bbb, ccc, ddd, F4, KK1, in[1], 14);
2645 + ROUND(ddd, aaa, bbb, ccc, F4, KK1, in[10], 14);
2646 + ROUND(ccc, ddd, aaa, bbb, F4, KK1, in[3], 12);
2647 + ROUND(bbb, ccc, ddd, aaa, F4, KK1, in[12], 6);
2648 +
2649 + /* Swap contents of "a" registers */
2650 + tmp = aa; aa = aaa; aaa = tmp;
2651 +
2652 + /* round 2: left lane */
2653 + ROUND(aa, bb, cc, dd, F2, K2, in[7], 7);
2654 + ROUND(dd, aa, bb, cc, F2, K2, in[4], 6);
2655 + ROUND(cc, dd, aa, bb, F2, K2, in[13], 8);
2656 + ROUND(bb, cc, dd, aa, F2, K2, in[1], 13);
2657 + ROUND(aa, bb, cc, dd, F2, K2, in[10], 11);
2658 + ROUND(dd, aa, bb, cc, F2, K2, in[6], 9);
2659 + ROUND(cc, dd, aa, bb, F2, K2, in[15], 7);
2660 + ROUND(bb, cc, dd, aa, F2, K2, in[3], 15);
2661 + ROUND(aa, bb, cc, dd, F2, K2, in[12], 7);
2662 + ROUND(dd, aa, bb, cc, F2, K2, in[0], 12);
2663 + ROUND(cc, dd, aa, bb, F2, K2, in[9], 15);
2664 + ROUND(bb, cc, dd, aa, F2, K2, in[5], 9);
2665 + ROUND(aa, bb, cc, dd, F2, K2, in[2], 11);
2666 + ROUND(dd, aa, bb, cc, F2, K2, in[14], 7);
2667 + ROUND(cc, dd, aa, bb, F2, K2, in[11], 13);
2668 + ROUND(bb, cc, dd, aa, F2, K2, in[8], 12);
2669 +
2670 + /* round 2: right lane */
2671 + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[6], 9);
2672 + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[11], 13);
2673 + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[3], 15);
2674 + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[7], 7);
2675 + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[0], 12);
2676 + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[13], 8);
2677 + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[5], 9);
2678 + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[10], 11);
2679 + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[14], 7);
2680 + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[15], 7);
2681 + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[8], 12);
2682 + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[12], 7);
2683 + ROUND(aaa, bbb, ccc, ddd, F3, KK2, in[4], 6);
2684 + ROUND(ddd, aaa, bbb, ccc, F3, KK2, in[9], 15);
2685 + ROUND(ccc, ddd, aaa, bbb, F3, KK2, in[1], 13);
2686 + ROUND(bbb, ccc, ddd, aaa, F3, KK2, in[2], 11);
2687 +
2688 + /* Swap contents of "b" registers */
2689 + tmp = bb; bb = bbb; bbb = tmp;
2690 +
2691 + /* round 3: left lane */
2692 + ROUND(aa, bb, cc, dd, F3, K3, in[3], 11);
2693 + ROUND(dd, aa, bb, cc, F3, K3, in[10], 13);
2694 + ROUND(cc, dd, aa, bb, F3, K3, in[14], 6);
2695 + ROUND(bb, cc, dd, aa, F3, K3, in[4], 7);
2696 + ROUND(aa, bb, cc, dd, F3, K3, in[9], 14);
2697 + ROUND(dd, aa, bb, cc, F3, K3, in[15], 9);
2698 + ROUND(cc, dd, aa, bb, F3, K3, in[8], 13);
2699 + ROUND(bb, cc, dd, aa, F3, K3, in[1], 15);
2700 + ROUND(aa, bb, cc, dd, F3, K3, in[2], 14);
2701 + ROUND(dd, aa, bb, cc, F3, K3, in[7], 8);
2702 + ROUND(cc, dd, aa, bb, F3, K3, in[0], 13);
2703 + ROUND(bb, cc, dd, aa, F3, K3, in[6], 6);
2704 + ROUND(aa, bb, cc, dd, F3, K3, in[13], 5);
2705 + ROUND(dd, aa, bb, cc, F3, K3, in[11], 12);
2706 + ROUND(cc, dd, aa, bb, F3, K3, in[5], 7);
2707 + ROUND(bb, cc, dd, aa, F3, K3, in[12], 5);
2708 +
2709 + /* round 3: right lane */
2710 + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[15], 9);
2711 + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[5], 7);
2712 + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[1], 15);
2713 + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[3], 11);
2714 + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[7], 8);
2715 + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[14], 6);
2716 + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[6], 6);
2717 + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[9], 14);
2718 + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[11], 12);
2719 + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[8], 13);
2720 + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[12], 5);
2721 + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[2], 14);
2722 + ROUND(aaa, bbb, ccc, ddd, F2, KK3, in[10], 13);
2723 + ROUND(ddd, aaa, bbb, ccc, F2, KK3, in[0], 13);
2724 + ROUND(ccc, ddd, aaa, bbb, F2, KK3, in[4], 7);
2725 + ROUND(bbb, ccc, ddd, aaa, F2, KK3, in[13], 5);
2726 +
2727 + /* Swap contents of "c" registers */
2728 + tmp = cc; cc = ccc; ccc = tmp;
2729 +
2730 + /* round 4: left lane */
2731 + ROUND(aa, bb, cc, dd, F4, K4, in[1], 11);
2732 + ROUND(dd, aa, bb, cc, F4, K4, in[9], 12);
2733 + ROUND(cc, dd, aa, bb, F4, K4, in[11], 14);
2734 + ROUND(bb, cc, dd, aa, F4, K4, in[10], 15);
2735 + ROUND(aa, bb, cc, dd, F4, K4, in[0], 14);
2736 + ROUND(dd, aa, bb, cc, F4, K4, in[8], 15);
2737 + ROUND(cc, dd, aa, bb, F4, K4, in[12], 9);
2738 + ROUND(bb, cc, dd, aa, F4, K4, in[4], 8);
2739 + ROUND(aa, bb, cc, dd, F4, K4, in[13], 9);
2740 + ROUND(dd, aa, bb, cc, F4, K4, in[3], 14);
2741 + ROUND(cc, dd, aa, bb, F4, K4, in[7], 5);
2742 + ROUND(bb, cc, dd, aa, F4, K4, in[15], 6);
2743 + ROUND(aa, bb, cc, dd, F4, K4, in[14], 8);
2744 + ROUND(dd, aa, bb, cc, F4, K4, in[5], 6);
2745 + ROUND(cc, dd, aa, bb, F4, K4, in[6], 5);
2746 + ROUND(bb, cc, dd, aa, F4, K4, in[2], 12);
2747 +
2748 + /* round 4: right lane */
2749 + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[8], 15);
2750 + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[6], 5);
2751 + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[4], 8);
2752 + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[1], 11);
2753 + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[3], 14);
2754 + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[11], 14);
2755 + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[15], 6);
2756 + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[0], 14);
2757 + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[5], 6);
2758 + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[12], 9);
2759 + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[2], 12);
2760 + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[13], 9);
2761 + ROUND(aaa, bbb, ccc, ddd, F1, KK4, in[9], 12);
2762 + ROUND(ddd, aaa, bbb, ccc, F1, KK4, in[7], 5);
2763 + ROUND(ccc, ddd, aaa, bbb, F1, KK4, in[10], 15);
2764 + ROUND(bbb, ccc, ddd, aaa, F1, KK4, in[14], 8);
2765 +
2766 + /* Swap contents of "d" registers */
2767 + tmp = dd; dd = ddd; ddd = tmp;
2768 +
2769 + /* combine results */
2770 + state[0] += aa;
2771 + state[1] += bb;
2772 + state[2] += cc;
2773 + state[3] += dd;
2774 + state[4] += aaa;
2775 + state[5] += bbb;
2776 + state[6] += ccc;
2777 + state[7] += ddd;
2778 +
2779 + return;
2780 +}
2781 +
2782 +static void rmd256_init(struct crypto_tfm *tfm)
2783 +{
2784 + struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
2785 +
2786 + rctx->byte_count = 0;
2787 +
2788 + rctx->state[0] = RMD_H0;
2789 + rctx->state[1] = RMD_H1;
2790 + rctx->state[2] = RMD_H2;
2791 + rctx->state[3] = RMD_H3;
2792 + rctx->state[4] = RMD_H5;
2793 + rctx->state[5] = RMD_H6;
2794 + rctx->state[6] = RMD_H7;
2795 + rctx->state[7] = RMD_H8;
2796 +
2797 + memset(rctx->buffer, 0, sizeof(rctx->buffer));
2798 +}
2799 +
2800 +static void rmd256_update(struct crypto_tfm *tfm, const u8 *data,
2801 + unsigned int len)
2802 +{
2803 + struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
2804 + const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
2805 +
2806 + rctx->byte_count += len;
2807 +
2808 + /* Enough space in buffer? If so copy and we're done */
2809 + if (avail > len) {
2810 + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
2811 + data, len);
2812 + return;
2813 + }
2814 +
2815 + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
2816 + data, avail);
2817 +
2818 + rmd256_transform(rctx->state, rctx->buffer);
2819 + data += avail;
2820 + len -= avail;
2821 +
2822 + while (len >= sizeof(rctx->buffer)) {
2823 + memcpy(rctx->buffer, data, sizeof(rctx->buffer));
2824 + rmd256_transform(rctx->state, rctx->buffer);
2825 + data += sizeof(rctx->buffer);
2826 + len -= sizeof(rctx->buffer);
2827 + }
2828 +
2829 + memcpy(rctx->buffer, data, len);
2830 +}
2831 +
2832 +/* Add padding and return the message digest. */
2833 +static void rmd256_final(struct crypto_tfm *tfm, u8 *out)
2834 +{
2835 + struct rmd256_ctx *rctx = crypto_tfm_ctx(tfm);
2836 + u32 i, index, padlen;
2837 + __le64 bits;
2838 + __le32 *dst = (__le32 *)out;
2839 + static const u8 padding[64] = { 0x80, };
2840 +
2841 + bits = cpu_to_le64(rctx->byte_count << 3);
2842 +
2843 + /* Pad out to 56 mod 64 */
2844 + index = rctx->byte_count & 0x3f;
2845 + padlen = (index < 56) ? (56 - index) : ((64+56) - index);
2846 + rmd256_update(tfm, padding, padlen);
2847 +
2848 + /* Append length */
2849 + rmd256_update(tfm, (const u8 *)&bits, sizeof(bits));
2850 +
2851 + /* Store state in digest */
2852 + for (i = 0; i < 8; i++)
2853 + dst[i] = cpu_to_le32p(&rctx->state[i]);
2854 +
2855 + /* Wipe context */
2856 + memset(rctx, 0, sizeof(*rctx));
2857 +}
2858 +
2859 +static struct crypto_alg alg = {
2860 + .cra_name = "rmd256",
2861 + .cra_driver_name = "rmd256",
2862 + .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
2863 + .cra_blocksize = RMD256_BLOCK_SIZE,
2864 + .cra_ctxsize = sizeof(struct rmd256_ctx),
2865 + .cra_module = THIS_MODULE,
2866 + .cra_list = LIST_HEAD_INIT(alg.cra_list),
2867 + .cra_u = { .digest = {
2868 + .dia_digestsize = RMD256_DIGEST_SIZE,
2869 + .dia_init = rmd256_init,
2870 + .dia_update = rmd256_update,
2871 + .dia_final = rmd256_final } }
2872 +};
2873 +
2874 +static int __init rmd256_mod_init(void)
2875 +{
2876 + return crypto_register_alg(&alg);
2877 +}
2878 +
2879 +static void __exit rmd256_mod_fini(void)
2880 +{
2881 + crypto_unregister_alg(&alg);
2882 +}
2883 +
2884 +module_init(rmd256_mod_init);
2885 +module_exit(rmd256_mod_fini);
2886 +
2887 +MODULE_LICENSE("GPL");
2888 +MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
2889 +
2890 +MODULE_ALIAS("rmd256");
2891 --- /dev/null
2892 +++ b/crypto/rmd320.c
2893 @@ -0,0 +1,393 @@
2894 +/*
2895 + * Cryptographic API.
2896 + *
2897 + * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest.
2898 + *
2899 + * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
2900 + *
2901 + * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
2902 + *
2903 + * This program is free software; you can redistribute it and/or modify it
2904 + * under the terms of the GNU General Public License as published by the Free
2905 + * Software Foundation; either version 2 of the License, or (at your option)
2906 + * any later version.
2907 + *
2908 + */
2909 +#include <linux/init.h>
2910 +#include <linux/module.h>
2911 +#include <linux/mm.h>
2912 +#include <linux/crypto.h>
2913 +#include <linux/cryptohash.h>
2914 +#include <linux/types.h>
2915 +#include <asm/byteorder.h>
2916 +
2917 +#include "ripemd.h"
2918 +
2919 +struct rmd320_ctx {
2920 + u64 byte_count;
2921 + u32 state[10];
2922 + __le32 buffer[16];
2923 +};
2924 +
2925 +#define K1 RMD_K1
2926 +#define K2 RMD_K2
2927 +#define K3 RMD_K3
2928 +#define K4 RMD_K4
2929 +#define K5 RMD_K5
2930 +#define KK1 RMD_K6
2931 +#define KK2 RMD_K7
2932 +#define KK3 RMD_K8
2933 +#define KK4 RMD_K9
2934 +#define KK5 RMD_K1
2935 +
2936 +#define F1(x, y, z) (x ^ y ^ z) /* XOR */
2937 +#define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */
2938 +#define F3(x, y, z) ((x | ~y) ^ z)
2939 +#define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */
2940 +#define F5(x, y, z) (x ^ (y | ~z))
2941 +
2942 +#define ROUND(a, b, c, d, e, f, k, x, s) { \
2943 + (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \
2944 + (a) = rol32((a), (s)) + (e); \
2945 + (c) = rol32((c), 10); \
2946 +}
2947 +
2948 +static void rmd320_transform(u32 *state, const __le32 *in)
2949 +{
2950 + u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp;
2951 +
2952 + /* Initialize left lane */
2953 + aa = state[0];
2954 + bb = state[1];
2955 + cc = state[2];
2956 + dd = state[3];
2957 + ee = state[4];
2958 +
2959 + /* Initialize right lane */
2960 + aaa = state[5];
2961 + bbb = state[6];
2962 + ccc = state[7];
2963 + ddd = state[8];
2964 + eee = state[9];
2965 +
2966 + /* round 1: left lane */
2967 + ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11);
2968 + ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14);
2969 + ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15);
2970 + ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12);
2971 + ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5);
2972 + ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8);
2973 + ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7);
2974 + ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9);
2975 + ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11);
2976 + ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13);
2977 + ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14);
2978 + ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15);
2979 + ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6);
2980 + ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7);
2981 + ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9);
2982 + ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8);
2983 +
2984 + /* round 1: right lane */
2985 + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8);
2986 + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9);
2987 + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9);
2988 + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11);
2989 + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13);
2990 + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15);
2991 + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15);
2992 + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5);
2993 + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7);
2994 + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7);
2995 + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8);
2996 + ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11);
2997 + ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14);
2998 + ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14);
2999 + ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12);
3000 + ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6);
3001 +
3002 + /* Swap contents of "a" registers */
3003 + tmp = aa; aa = aaa; aaa = tmp;
3004 +
3005 + /* round 2: left lane" */
3006 + ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7);
3007 + ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6);
3008 + ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8);
3009 + ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13);
3010 + ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11);
3011 + ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9);
3012 + ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7);
3013 + ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15);
3014 + ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7);
3015 + ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12);
3016 + ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15);
3017 + ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9);
3018 + ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11);
3019 + ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7);
3020 + ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13);
3021 + ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12);
3022 +
3023 + /* round 2: right lane */
3024 + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9);
3025 + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13);
3026 + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15);
3027 + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7);
3028 + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12);
3029 + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8);
3030 + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9);
3031 + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11);
3032 + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7);
3033 + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7);
3034 + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12);
3035 + ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7);
3036 + ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6);
3037 + ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15);
3038 + ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13);
3039 + ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11);
3040 +
3041 + /* Swap contents of "b" registers */
3042 + tmp = bb; bb = bbb; bbb = tmp;
3043 +
3044 + /* round 3: left lane" */
3045 + ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11);
3046 + ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13);
3047 + ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6);
3048 + ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7);
3049 + ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14);
3050 + ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9);
3051 + ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13);
3052 + ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15);
3053 + ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14);
3054 + ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8);
3055 + ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13);
3056 + ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6);
3057 + ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5);
3058 + ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12);
3059 + ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7);
3060 + ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5);
3061 +
3062 + /* round 3: right lane */
3063 + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9);
3064 + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7);
3065 + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15);
3066 + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11);
3067 + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8);
3068 + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6);
3069 + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6);
3070 + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14);
3071 + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12);
3072 + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13);
3073 + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5);
3074 + ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14);
3075 + ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13);
3076 + ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13);
3077 + ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7);
3078 + ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5);
3079 +
3080 + /* Swap contents of "c" registers */
3081 + tmp = cc; cc = ccc; ccc = tmp;
3082 +
3083 + /* round 4: left lane" */
3084 + ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11);
3085 + ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12);
3086 + ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14);
3087 + ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15);
3088 + ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14);
3089 + ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15);
3090 + ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9);
3091 + ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8);
3092 + ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9);
3093 + ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14);
3094 + ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5);
3095 + ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6);
3096 + ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8);
3097 + ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6);
3098 + ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5);
3099 + ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12);
3100 +
3101 + /* round 4: right lane */
3102 + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15);
3103 + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5);
3104 + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8);
3105 + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11);
3106 + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14);
3107 + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14);
3108 + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6);
3109 + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14);
3110 + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6);
3111 + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9);
3112 + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12);
3113 + ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9);
3114 + ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12);
3115 + ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5);
3116 + ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15);
3117 + ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8);
3118 +
3119 + /* Swap contents of "d" registers */
3120 + tmp = dd; dd = ddd; ddd = tmp;
3121 +
3122 + /* round 5: left lane" */
3123 + ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9);
3124 + ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15);
3125 + ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5);
3126 + ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11);
3127 + ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6);
3128 + ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8);
3129 + ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13);
3130 + ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12);
3131 + ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5);
3132 + ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12);
3133 + ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13);
3134 + ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14);
3135 + ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11);
3136 + ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8);
3137 + ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5);
3138 + ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6);
3139 +
3140 + /* round 5: right lane */
3141 + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8);
3142 + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5);
3143 + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12);
3144 + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9);
3145 + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12);
3146 + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5);
3147 + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14);
3148 + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6);
3149 + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8);
3150 + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13);
3151 + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6);
3152 + ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5);
3153 + ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15);
3154 + ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13);
3155 + ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11);
3156 + ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11);
3157 +
3158 + /* Swap contents of "e" registers */
3159 + tmp = ee; ee = eee; eee = tmp;
3160 +
3161 + /* combine results */
3162 + state[0] += aa;
3163 + state[1] += bb;
3164 + state[2] += cc;
3165 + state[3] += dd;
3166 + state[4] += ee;
3167 + state[5] += aaa;
3168 + state[6] += bbb;
3169 + state[7] += ccc;
3170 + state[8] += ddd;
3171 + state[9] += eee;
3172 +
3173 + return;
3174 +}
3175 +
3176 +static void rmd320_init(struct crypto_tfm *tfm)
3177 +{
3178 + struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
3179 +
3180 + rctx->byte_count = 0;
3181 +
3182 + rctx->state[0] = RMD_H0;
3183 + rctx->state[1] = RMD_H1;
3184 + rctx->state[2] = RMD_H2;
3185 + rctx->state[3] = RMD_H3;
3186 + rctx->state[4] = RMD_H4;
3187 + rctx->state[5] = RMD_H5;
3188 + rctx->state[6] = RMD_H6;
3189 + rctx->state[7] = RMD_H7;
3190 + rctx->state[8] = RMD_H8;
3191 + rctx->state[9] = RMD_H9;
3192 +
3193 + memset(rctx->buffer, 0, sizeof(rctx->buffer));
3194 +}
3195 +
3196 +static void rmd320_update(struct crypto_tfm *tfm, const u8 *data,
3197 + unsigned int len)
3198 +{
3199 + struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
3200 + const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f);
3201 +
3202 + rctx->byte_count += len;
3203 +
3204 + /* Enough space in buffer? If so copy and we're done */
3205 + if (avail > len) {
3206 + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
3207 + data, len);
3208 + return;
3209 + }
3210 +
3211 + memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail),
3212 + data, avail);
3213 +
3214 + rmd320_transform(rctx->state, rctx->buffer);
3215 + data += avail;
3216 + len -= avail;
3217 +
3218 + while (len >= sizeof(rctx->buffer)) {
3219 + memcpy(rctx->buffer, data, sizeof(rctx->buffer));
3220 + rmd320_transform(rctx->state, rctx->buffer);
3221 + data += sizeof(rctx->buffer);
3222 + len -= sizeof(rctx->buffer);
3223 + }
3224 +
3225 + memcpy(rctx->buffer, data, len);
3226 +}
3227 +
3228 +/* Add padding and return the message digest. */
3229 +static void rmd320_final(struct crypto_tfm *tfm, u8 *out)
3230 +{
3231 + struct rmd320_ctx *rctx = crypto_tfm_ctx(tfm);
3232 + u32 i, index, padlen;
3233 + __le64 bits;
3234 + __le32 *dst = (__le32 *)out;
3235 + static const u8 padding[64] = { 0x80, };
3236 +
3237 + bits = cpu_to_le64(rctx->byte_count << 3);
3238 +
3239 + /* Pad out to 56 mod 64 */
3240 + index = rctx->byte_count & 0x3f;
3241 + padlen = (index < 56) ? (56 - index) : ((64+56) - index);
3242 + rmd320_update(tfm, padding, padlen);
3243 +
3244 + /* Append length */
3245 + rmd320_update(tfm, (const u8 *)&bits, sizeof(bits));
3246 +
3247 + /* Store state in digest */
3248 + for (i = 0; i < 10; i++)
3249 + dst[i] = cpu_to_le32p(&rctx->state[i]);
3250 +
3251 + /* Wipe context */
3252 + memset(rctx, 0, sizeof(*rctx));
3253 +}
3254 +
3255 +static struct crypto_alg alg = {
3256 + .cra_name = "rmd320",
3257 + .cra_driver_name = "rmd320",
3258 + .cra_flags = CRYPTO_ALG_TYPE_DIGEST,
3259 + .cra_blocksize = RMD320_BLOCK_SIZE,
3260 + .cra_ctxsize = sizeof(struct rmd320_ctx),
3261 + .cra_module = THIS_MODULE,
3262 + .cra_list = LIST_HEAD_INIT(alg.cra_list),
3263 + .cra_u = { .digest = {
3264 + .dia_digestsize = RMD320_DIGEST_SIZE,
3265 + .dia_init = rmd320_init,
3266 + .dia_update = rmd320_update,
3267 + .dia_final = rmd320_final } }
3268 +};
3269 +
3270 +static int __init rmd320_mod_init(void)
3271 +{
3272 + return crypto_register_alg(&alg);
3273 +}
3274 +
3275 +static void __exit rmd320_mod_fini(void)
3276 +{
3277 + crypto_unregister_alg(&alg);
3278 +}
3279 +
3280 +module_init(rmd320_mod_init);
3281 +module_exit(rmd320_mod_fini);
3282 +
3283 +MODULE_LICENSE("GPL");
3284 +MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
3285 +
3286 +MODULE_ALIAS("rmd320");
3287 --- a/crypto/tcrypt.c
3288 +++ b/crypto/tcrypt.c
3289 @@ -13,15 +13,9 @@
3290 * Software Foundation; either version 2 of the License, or (at your option)
3291 * any later version.
3292 *
3293 - * 2007-11-13 Added GCM tests
3294 - * 2007-11-13 Added AEAD support
3295 - * 2007-11-06 Added SHA-224 and SHA-224-HMAC tests
3296 - * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
3297 - * 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
3298 - * 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
3299 - *
3300 */
3301
3302 +#include <crypto/hash.h>
3303 #include <linux/err.h>
3304 #include <linux/init.h>
3305 #include <linux/module.h>
3306 @@ -30,7 +24,6 @@
3307 #include <linux/scatterlist.h>
3308 #include <linux/string.h>
3309 #include <linux/crypto.h>
3310 -#include <linux/highmem.h>
3311 #include <linux/moduleparam.h>
3312 #include <linux/jiffies.h>
3313 #include <linux/timex.h>
3314 @@ -38,7 +31,7 @@
3315 #include "tcrypt.h"
3316
3317 /*
3318 - * Need to kmalloc() memory for testing kmap().
3319 + * Need to kmalloc() memory for testing.
3320 */
3321 #define TVMEMSIZE 16384
3322 #define XBUFSIZE 32768
3323 @@ -46,7 +39,7 @@
3324 /*
3325 * Indexes into the xbuf to simulate cross-page access.
3326 */
3327 -#define IDX1 37
3328 +#define IDX1 32
3329 #define IDX2 32400
3330 #define IDX3 1
3331 #define IDX4 8193
3332 @@ -83,7 +76,8 @@ static char *check[] = {
3333 "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
3334 "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
3335 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
3336 - "camellia", "seed", "salsa20", "lzo", "cts", NULL
3337 + "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
3338 + "lzo", "cts", NULL
3339 };
3340
3341 static void hexdump(unsigned char *buf, unsigned int len)
3342 @@ -110,22 +104,30 @@ static void test_hash(char *algo, struct
3343 unsigned int i, j, k, temp;
3344 struct scatterlist sg[8];
3345 char result[64];
3346 - struct crypto_hash *tfm;
3347 - struct hash_desc desc;
3348 + struct crypto_ahash *tfm;
3349 + struct ahash_request *req;
3350 + struct tcrypt_result tresult;
3351 int ret;
3352 void *hash_buff;
3353
3354 printk("\ntesting %s\n", algo);
3355
3356 - tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
3357 + init_completion(&tresult.completion);
3358 +
3359 + tfm = crypto_alloc_ahash(algo, 0, 0);
3360 if (IS_ERR(tfm)) {
3361 printk("failed to load transform for %s: %ld\n", algo,
3362 PTR_ERR(tfm));
3363 return;
3364 }
3365
3366 - desc.tfm = tfm;
3367 - desc.flags = 0;
3368 + req = ahash_request_alloc(tfm, GFP_KERNEL);
3369 + if (!req) {
3370 + printk(KERN_ERR "failed to allocate request for %s\n", algo);
3371 + goto out_noreq;
3372 + }
3373 + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
3374 + tcrypt_complete, &tresult);
3375
3376 for (i = 0; i < tcount; i++) {
3377 printk("test %u:\n", i + 1);
3378 @@ -139,8 +141,9 @@ static void test_hash(char *algo, struct
3379 sg_init_one(&sg[0], hash_buff, template[i].psize);
3380
3381 if (template[i].ksize) {
3382 - ret = crypto_hash_setkey(tfm, template[i].key,
3383 - template[i].ksize);
3384 + crypto_ahash_clear_flags(tfm, ~0);
3385 + ret = crypto_ahash_setkey(tfm, template[i].key,
3386 + template[i].ksize);
3387 if (ret) {
3388 printk("setkey() failed ret=%d\n", ret);
3389 kfree(hash_buff);
3390 @@ -148,17 +151,30 @@ static void test_hash(char *algo, struct
3391 }
3392 }
3393
3394 - ret = crypto_hash_digest(&desc, sg, template[i].psize, result);
3395 - if (ret) {
3396 + ahash_request_set_crypt(req, sg, result, template[i].psize);
3397 + ret = crypto_ahash_digest(req);
3398 + switch (ret) {
3399 + case 0:
3400 + break;
3401 + case -EINPROGRESS:
3402 + case -EBUSY:
3403 + ret = wait_for_completion_interruptible(
3404 + &tresult.completion);
3405 + if (!ret && !(ret = tresult.err)) {
3406 + INIT_COMPLETION(tresult.completion);
3407 + break;
3408 + }
3409 + /* fall through */
3410 + default:
3411 printk("digest () failed ret=%d\n", ret);
3412 kfree(hash_buff);
3413 goto out;
3414 }
3415
3416 - hexdump(result, crypto_hash_digestsize(tfm));
3417 + hexdump(result, crypto_ahash_digestsize(tfm));
3418 printk("%s\n",
3419 memcmp(result, template[i].digest,
3420 - crypto_hash_digestsize(tfm)) ?
3421 + crypto_ahash_digestsize(tfm)) ?
3422 "fail" : "pass");
3423 kfree(hash_buff);
3424 }
3425 @@ -187,8 +203,9 @@ static void test_hash(char *algo, struct
3426 }
3427
3428 if (template[i].ksize) {
3429 - ret = crypto_hash_setkey(tfm, template[i].key,
3430 - template[i].ksize);
3431 + crypto_ahash_clear_flags(tfm, ~0);
3432 + ret = crypto_ahash_setkey(tfm, template[i].key,
3433 + template[i].ksize);
3434
3435 if (ret) {
3436 printk("setkey() failed ret=%d\n", ret);
3437 @@ -196,29 +213,44 @@ static void test_hash(char *algo, struct
3438 }
3439 }
3440
3441 - ret = crypto_hash_digest(&desc, sg, template[i].psize,
3442 - result);
3443 - if (ret) {
3444 + ahash_request_set_crypt(req, sg, result,
3445 + template[i].psize);
3446 + ret = crypto_ahash_digest(req);
3447 + switch (ret) {
3448 + case 0:
3449 + break;
3450 + case -EINPROGRESS:
3451 + case -EBUSY:
3452 + ret = wait_for_completion_interruptible(
3453 + &tresult.completion);
3454 + if (!ret && !(ret = tresult.err)) {
3455 + INIT_COMPLETION(tresult.completion);
3456 + break;
3457 + }
3458 + /* fall through */
3459 + default:
3460 printk("digest () failed ret=%d\n", ret);
3461 goto out;
3462 }
3463
3464 - hexdump(result, crypto_hash_digestsize(tfm));
3465 + hexdump(result, crypto_ahash_digestsize(tfm));
3466 printk("%s\n",
3467 memcmp(result, template[i].digest,
3468 - crypto_hash_digestsize(tfm)) ?
3469 + crypto_ahash_digestsize(tfm)) ?
3470 "fail" : "pass");
3471 }
3472 }
3473
3474 out:
3475 - crypto_free_hash(tfm);
3476 + ahash_request_free(req);
3477 +out_noreq:
3478 + crypto_free_ahash(tfm);
3479 }
3480
3481 static void test_aead(char *algo, int enc, struct aead_testvec *template,
3482 unsigned int tcount)
3483 {
3484 - unsigned int ret, i, j, k, temp;
3485 + unsigned int ret, i, j, k, n, temp;
3486 char *q;
3487 struct crypto_aead *tfm;
3488 char *key;
3489 @@ -344,13 +376,12 @@ static void test_aead(char *algo, int en
3490 goto next_one;
3491 }