0d26aca797743e16d0ec2acd4aac70d5d60158d4
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.14 / 820-sec-support-layerscape.patch
1 From ba8e92b322a3763880fdc4d19e9c7085f5504be7 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 23 Apr 2019 17:41:43 +0800
4 Subject: [PATCH] sec: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch of sec for layerscape
10
11 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
12 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
13 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 Signed-off-by: Carmen Iorga <carmen.iorga@nxp.com>
15 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
18 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
19 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
20 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
23 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
24 ---
25 crypto/Kconfig | 20 +
26 crypto/Makefile | 1 +
27 crypto/chacha20poly1305.c | 2 -
28 crypto/tcrypt.c | 27 +-
29 crypto/testmgr.c | 244 ++
30 crypto/testmgr.h | 219 ++
31 crypto/tls.c | 607 ++++
32 drivers/crypto/Makefile | 2 +-
33 drivers/crypto/caam/Kconfig | 85 +-
34 drivers/crypto/caam/Makefile | 26 +-
35 drivers/crypto/caam/caamalg.c | 468 +++-
36 drivers/crypto/caam/caamalg_desc.c | 903 +++++-
37 drivers/crypto/caam/caamalg_desc.h | 52 +-
38 drivers/crypto/caam/caamalg_qi.c | 1060 ++++++-
39 drivers/crypto/caam/caamalg_qi2.c | 5843 +++++++++++++++++++++++++++++++++++
40 drivers/crypto/caam/caamalg_qi2.h | 276 ++
41 drivers/crypto/caam/caamhash.c | 192 +-
42 drivers/crypto/caam/caamhash_desc.c | 108 +
43 drivers/crypto/caam/caamhash_desc.h | 49 +
44 drivers/crypto/caam/caampkc.c | 52 +-
45 drivers/crypto/caam/caamrng.c | 52 +-
46 drivers/crypto/caam/compat.h | 4 +
47 drivers/crypto/caam/ctrl.c | 194 +-
48 drivers/crypto/caam/desc.h | 89 +-
49 drivers/crypto/caam/desc_constr.h | 59 +-
50 drivers/crypto/caam/dpseci.c | 865 ++++++
51 drivers/crypto/caam/dpseci.h | 433 +++
52 drivers/crypto/caam/dpseci_cmd.h | 287 ++
53 drivers/crypto/caam/error.c | 81 +-
54 drivers/crypto/caam/error.h | 6 +-
55 drivers/crypto/caam/intern.h | 102 +-
56 drivers/crypto/caam/jr.c | 84 +
57 drivers/crypto/caam/jr.h | 2 +
58 drivers/crypto/caam/key_gen.c | 30 -
59 drivers/crypto/caam/key_gen.h | 30 +
60 drivers/crypto/caam/qi.c | 134 +-
61 drivers/crypto/caam/qi.h | 2 +-
62 drivers/crypto/caam/regs.h | 76 +-
63 drivers/crypto/caam/sg_sw_qm.h | 46 +-
64 drivers/crypto/talitos.c | 8 +
65 include/crypto/chacha20.h | 1 +
66 41 files changed, 12088 insertions(+), 733 deletions(-)
67 create mode 100644 crypto/tls.c
68 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
69 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
70 create mode 100644 drivers/crypto/caam/caamhash_desc.c
71 create mode 100644 drivers/crypto/caam/caamhash_desc.h
72 create mode 100644 drivers/crypto/caam/dpseci.c
73 create mode 100644 drivers/crypto/caam/dpseci.h
74 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
75
76 --- a/crypto/Kconfig
77 +++ b/crypto/Kconfig
78 @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
79 a sequence number xored with a salt. This is the default
80 algorithm for CBC.
81
82 +config CRYPTO_TLS
83 + tristate "TLS support"
84 + select CRYPTO_AEAD
85 + select CRYPTO_BLKCIPHER
86 + select CRYPTO_MANAGER
87 + select CRYPTO_HASH
88 + select CRYPTO_NULL
89 + select CRYPTO_AUTHENC
90 + help
91 + Support for TLS 1.0 record encryption and decryption
92 +
93 + This module adds support for encryption/decryption of TLS 1.0 frames
94 + using blockcipher algorithms. The name of the resulting algorithm is
95 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
96 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
97 + accelerated versions will be used automatically if available.
98 +
99 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
100 + operations through AF_ALG or cryptodev interfaces
101 +
102 comment "Block modes"
103
104 config CRYPTO_CBC
105 --- a/crypto/Makefile
106 +++ b/crypto/Makefile
107 @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
108 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
109 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
110 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
111 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
112 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
113 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
114 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
115 --- a/crypto/chacha20poly1305.c
116 +++ b/crypto/chacha20poly1305.c
117 @@ -22,8 +22,6 @@
118
119 #include "internal.h"
120
121 -#define CHACHAPOLY_IV_SIZE 12
122 -
123 struct chachapoly_instance_ctx {
124 struct crypto_skcipher_spawn chacha;
125 struct crypto_ahash_spawn poly;
126 --- a/crypto/tcrypt.c
127 +++ b/crypto/tcrypt.c
128 @@ -76,7 +76,7 @@ static char *check[] = {
129 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
130 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
131 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
132 - NULL
133 + "rsa", NULL
134 };
135
136 struct tcrypt_result {
137 @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
138 iv);
139 aead_request_set_ad(req, aad_size);
140
141 - if (secs)
142 + if (secs) {
143 ret = test_aead_jiffies(req, enc, *b_size,
144 secs);
145 - else
146 + cond_resched();
147 + } else {
148 ret = test_aead_cycles(req, enc, *b_size);
149 + }
150
151 if (ret) {
152 pr_err("%s() failed return code=%d\n", e, ret);
153 @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
154
155 ahash_request_set_crypt(req, sg, output, speed[i].plen);
156
157 - if (secs)
158 + if (secs) {
159 ret = test_ahash_jiffies(req, speed[i].blen,
160 speed[i].plen, output, secs);
161 - else
162 + cond_resched();
163 + } else {
164 ret = test_ahash_cycles(req, speed[i].blen,
165 speed[i].plen, output);
166 + }
167
168 if (ret) {
169 pr_err("hashing failed ret=%d\n", ret);
170 @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
171
172 skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
173
174 - if (secs)
175 + if (secs) {
176 ret = test_acipher_jiffies(req, enc,
177 *b_size, secs);
178 - else
179 + cond_resched();
180 + } else {
181 ret = test_acipher_cycles(req, enc,
182 *b_size);
183 + }
184
185 if (ret) {
186 pr_err("%s() failed flags=%x\n", e,
187 @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
188 ret += tcrypt_test("hmac(sha3-512)");
189 break;
190
191 + case 115:
192 + ret += tcrypt_test("rsa");
193 + break;
194 +
195 case 150:
196 ret += tcrypt_test("ansi_cprng");
197 break;
198 @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
199 case 190:
200 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
201 break;
202 + case 191:
203 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
204 + break;
205 case 200:
206 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
207 speed_template_16_24_32);
208 --- a/crypto/testmgr.c
209 +++ b/crypto/testmgr.c
210 @@ -117,6 +117,13 @@ struct drbg_test_suite {
211 unsigned int count;
212 };
213
214 +struct tls_test_suite {
215 + struct {
216 + struct tls_testvec *vecs;
217 + unsigned int count;
218 + } enc, dec;
219 +};
220 +
221 struct akcipher_test_suite {
222 const struct akcipher_testvec *vecs;
223 unsigned int count;
224 @@ -140,6 +147,7 @@ struct alg_test_desc {
225 struct hash_test_suite hash;
226 struct cprng_test_suite cprng;
227 struct drbg_test_suite drbg;
228 + struct tls_test_suite tls;
229 struct akcipher_test_suite akcipher;
230 struct kpp_test_suite kpp;
231 } suite;
232 @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
233 return 0;
234 }
235
236 +static int __test_tls(struct crypto_aead *tfm, int enc,
237 + struct tls_testvec *template, unsigned int tcount,
238 + const bool diff_dst)
239 +{
240 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
241 + unsigned int i, k, authsize;
242 + char *q;
243 + struct aead_request *req;
244 + struct scatterlist *sg;
245 + struct scatterlist *sgout;
246 + const char *e, *d;
247 + struct tcrypt_result result;
248 + void *input;
249 + void *output;
250 + void *assoc;
251 + char *iv;
252 + char *key;
253 + char *xbuf[XBUFSIZE];
254 + char *xoutbuf[XBUFSIZE];
255 + char *axbuf[XBUFSIZE];
256 + int ret = -ENOMEM;
257 +
258 + if (testmgr_alloc_buf(xbuf))
259 + goto out_noxbuf;
260 +
261 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
262 + goto out_nooutbuf;
263 +
264 + if (testmgr_alloc_buf(axbuf))
265 + goto out_noaxbuf;
266 +
267 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
268 + if (!iv)
269 + goto out_noiv;
270 +
271 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
272 + if (!key)
273 + goto out_nokey;
274 +
275 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
276 + if (!sg)
277 + goto out_nosg;
278 +
279 + sgout = sg + 8;
280 +
281 + d = diff_dst ? "-ddst" : "";
282 + e = enc ? "encryption" : "decryption";
283 +
284 + init_completion(&result.completion);
285 +
286 + req = aead_request_alloc(tfm, GFP_KERNEL);
287 + if (!req) {
288 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
289 + d, algo);
290 + goto out;
291 + }
292 +
293 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
294 + tcrypt_complete, &result);
295 +
296 + for (i = 0; i < tcount; i++) {
297 + input = xbuf[0];
298 + assoc = axbuf[0];
299 +
300 + ret = -EINVAL;
301 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
302 + template[i].alen > PAGE_SIZE))
303 + goto out;
304 +
305 + memcpy(assoc, template[i].assoc, template[i].alen);
306 + memcpy(input, template[i].input, template[i].ilen);
307 +
308 + if (template[i].iv)
309 + memcpy(iv, template[i].iv, MAX_IVLEN);
310 + else
311 + memset(iv, 0, MAX_IVLEN);
312 +
313 + crypto_aead_clear_flags(tfm, ~0);
314 +
315 + if (template[i].klen > MAX_KEYLEN) {
316 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
317 + d, i, algo, template[i].klen, MAX_KEYLEN);
318 + ret = -EINVAL;
319 + goto out;
320 + }
321 + memcpy(key, template[i].key, template[i].klen);
322 +
323 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
324 + if (!ret == template[i].fail) {
325 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
326 + d, i, algo, crypto_aead_get_flags(tfm));
327 + goto out;
328 + } else if (ret)
329 + continue;
330 +
331 + authsize = 20;
332 + ret = crypto_aead_setauthsize(tfm, authsize);
333 + if (ret) {
334 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
335 + d, authsize, i, algo);
336 + goto out;
337 + }
338 +
339 + k = !!template[i].alen;
340 + sg_init_table(sg, k + 1);
341 + sg_set_buf(&sg[0], assoc, template[i].alen);
342 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
343 + template[i].ilen));
344 + output = input;
345 +
346 + if (diff_dst) {
347 + sg_init_table(sgout, k + 1);
348 + sg_set_buf(&sgout[0], assoc, template[i].alen);
349 +
350 + output = xoutbuf[0];
351 + sg_set_buf(&sgout[k], output,
352 + (enc ? template[i].rlen : template[i].ilen));
353 + }
354 +
355 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
356 + template[i].ilen, iv);
357 +
358 + aead_request_set_ad(req, template[i].alen);
359 +
360 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
361 +
362 + switch (ret) {
363 + case 0:
364 + if (template[i].novrfy) {
365 + /* verification was supposed to fail */
366 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
367 + d, e, i, algo);
368 + /* so really, we got a bad message */
369 + ret = -EBADMSG;
370 + goto out;
371 + }
372 + break;
373 + case -EINPROGRESS:
374 + case -EBUSY:
375 + wait_for_completion(&result.completion);
376 + reinit_completion(&result.completion);
377 + ret = result.err;
378 + if (!ret)
379 + break;
380 + case -EBADMSG:
381 + /* verification failure was expected */
382 + if (template[i].novrfy)
383 + continue;
384 + /* fall through */
385 + default:
386 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
387 + d, e, i, algo, -ret);
388 + goto out;
389 + }
390 +
391 + q = output;
392 + if (memcmp(q, template[i].result, template[i].rlen)) {
393 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
394 + d, i, e, algo);
395 + hexdump(q, template[i].rlen);
396 + pr_err("should be:\n");
397 + hexdump(template[i].result, template[i].rlen);
398 + ret = -EINVAL;
399 + goto out;
400 + }
401 + }
402 +
403 +out:
404 + aead_request_free(req);
405 +
406 + kfree(sg);
407 +out_nosg:
408 + kfree(key);
409 +out_nokey:
410 + kfree(iv);
411 +out_noiv:
412 + testmgr_free_buf(axbuf);
413 +out_noaxbuf:
414 + if (diff_dst)
415 + testmgr_free_buf(xoutbuf);
416 +out_nooutbuf:
417 + testmgr_free_buf(xbuf);
418 +out_noxbuf:
419 + return ret;
420 +}
421 +
422 +static int test_tls(struct crypto_aead *tfm, int enc,
423 + struct tls_testvec *template, unsigned int tcount)
424 +{
425 + int ret;
426 + /* test 'dst == src' case */
427 + ret = __test_tls(tfm, enc, template, tcount, false);
428 + if (ret)
429 + return ret;
430 + /* test 'dst != src' case */
431 + return __test_tls(tfm, enc, template, tcount, true);
432 +}
433 +
434 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
435 + u32 type, u32 mask)
436 +{
437 + struct crypto_aead *tfm;
438 + int err = 0;
439 +
440 + tfm = crypto_alloc_aead(driver, type, mask);
441 + if (IS_ERR(tfm)) {
442 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
443 + driver, PTR_ERR(tfm));
444 + return PTR_ERR(tfm);
445 + }
446 +
447 + if (desc->suite.tls.enc.vecs) {
448 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
449 + desc->suite.tls.enc.count);
450 + if (err)
451 + goto out;
452 + }
453 +
454 + if (!err && desc->suite.tls.dec.vecs)
455 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
456 + desc->suite.tls.dec.count);
457 +
458 +out:
459 + crypto_free_aead(tfm);
460 + return err;
461 +}
462 +
463 static int test_cipher(struct crypto_cipher *tfm, int enc,
464 const struct cipher_testvec *template,
465 unsigned int tcount)
466 @@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
467 .hash = __VECS(tgr192_tv_template)
468 }
469 }, {
470 + .alg = "tls10(hmac(sha1),cbc(aes))",
471 + .test = alg_test_tls,
472 + .suite = {
473 + .tls = {
474 + .enc = __VECS(tls_enc_tv_template),
475 + .dec = __VECS(tls_dec_tv_template)
476 + }
477 + }
478 + }, {
479 .alg = "vmac(aes)",
480 .test = alg_test_hash,
481 .suite = {
482 --- a/crypto/testmgr.h
483 +++ b/crypto/testmgr.h
484 @@ -125,6 +125,20 @@ struct drbg_testvec {
485 size_t expectedlen;
486 };
487
488 +struct tls_testvec {
489 + char *key; /* wrapped keys for encryption and authentication */
490 + char *iv; /* initialization vector */
491 + char *input; /* input data */
492 + char *assoc; /* associated data: seq num, type, version, input len */
493 + char *result; /* result data */
494 + unsigned char fail; /* the test failure is expected */
495 + unsigned char novrfy; /* dec verification failure expected */
496 + unsigned char klen; /* key length */
497 + unsigned short ilen; /* input data length */
498 + unsigned short alen; /* associated data length */
499 + unsigned short rlen; /* result length */
500 +};
501 +
502 struct akcipher_testvec {
503 const unsigned char *key;
504 const unsigned char *m;
505 @@ -153,6 +167,211 @@ struct kpp_testvec {
506 static const char zeroed_string[48];
507
508 /*
509 + * TLS1.0 synthetic test vectors
510 + */
511 +static struct tls_testvec tls_enc_tv_template[] = {
512 + {
513 +#ifdef __LITTLE_ENDIAN
514 + .key = "\x08\x00" /* rta length */
515 + "\x01\x00" /* rta type */
516 +#else
517 + .key = "\x00\x08" /* rta length */
518 + "\x00\x01" /* rta type */
519 +#endif
520 + "\x00\x00\x00\x10" /* enc key length */
521 + "authenticationkey20benckeyis16_bytes",
522 + .klen = 8 + 20 + 16,
523 + .iv = "iv0123456789abcd",
524 + .input = "Single block msg",
525 + .ilen = 16,
526 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
527 + "\x00\x03\x01\x00\x10",
528 + .alen = 13,
529 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
530 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
531 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
532 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
533 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
534 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
535 + .rlen = 16 + 20 + 12,
536 + }, {
537 +#ifdef __LITTLE_ENDIAN
538 + .key = "\x08\x00" /* rta length */
539 + "\x01\x00" /* rta type */
540 +#else
541 + .key = "\x00\x08" /* rta length */
542 + "\x00\x01" /* rta type */
543 +#endif
544 + "\x00\x00\x00\x10" /* enc key length */
545 + "authenticationkey20benckeyis16_bytes",
546 + .klen = 8 + 20 + 16,
547 + .iv = "iv0123456789abcd",
548 + .input = "",
549 + .ilen = 0,
550 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
551 + "\x00\x03\x01\x00\x00",
552 + .alen = 13,
553 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
554 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
555 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
556 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
557 + .rlen = 20 + 12,
558 + }, {
559 +#ifdef __LITTLE_ENDIAN
560 + .key = "\x08\x00" /* rta length */
561 + "\x01\x00" /* rta type */
562 +#else
563 + .key = "\x00\x08" /* rta length */
564 + "\x00\x01" /* rta type */
565 +#endif
566 + "\x00\x00\x00\x10" /* enc key length */
567 + "authenticationkey20benckeyis16_bytes",
568 + .klen = 8 + 20 + 16,
569 + .iv = "iv0123456789abcd",
570 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
571 + " plaintext285 bytes plaintext285 bytes plaintext285"
572 + " bytes plaintext285 bytes plaintext285 bytes"
573 + " plaintext285 bytes plaintext285 bytes plaintext285"
574 + " bytes plaintext285 bytes plaintext285 bytes"
575 + " plaintext285 bytes plaintext285 bytes plaintext285"
576 + " bytes plaintext285 bytes plaintext",
577 + .ilen = 285,
578 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
579 + "\x00\x03\x01\x01\x1d",
580 + .alen = 13,
581 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
582 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
583 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
584 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
585 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
586 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
587 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
588 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
589 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
590 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
591 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
592 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
593 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
594 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
595 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
596 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
597 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
598 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
599 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
600 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
601 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
602 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
603 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
604 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
605 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
606 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
607 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
608 + .rlen = 285 + 20 + 15,
609 + }
610 +};
611 +
612 +static struct tls_testvec tls_dec_tv_template[] = {
613 + {
614 +#ifdef __LITTLE_ENDIAN
615 + .key = "\x08\x00" /* rta length */
616 + "\x01\x00" /* rta type */
617 +#else
618 + .key = "\x00\x08" /* rta length */
619 + "\x00\x01" /* rta type */
620 +#endif
621 + "\x00\x00\x00\x10" /* enc key length */
622 + "authenticationkey20benckeyis16_bytes",
623 + .klen = 8 + 20 + 16,
624 + .iv = "iv0123456789abcd",
625 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
626 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
627 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
628 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
629 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
630 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
631 + .ilen = 16 + 20 + 12,
632 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
633 + "\x00\x03\x01\x00\x30",
634 + .alen = 13,
635 + .result = "Single block msg",
636 + .rlen = 16,
637 + }, {
638 +#ifdef __LITTLE_ENDIAN
639 + .key = "\x08\x00" /* rta length */
640 + "\x01\x00" /* rta type */
641 +#else
642 + .key = "\x00\x08" /* rta length */
643 + "\x00\x01" /* rta type */
644 +#endif
645 + "\x00\x00\x00\x10" /* enc key length */
646 + "authenticationkey20benckeyis16_bytes",
647 + .klen = 8 + 20 + 16,
648 + .iv = "iv0123456789abcd",
649 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
650 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
651 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
652 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
653 + .ilen = 20 + 12,
654 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
655 + "\x00\x03\x01\x00\x20",
656 + .alen = 13,
657 + .result = "",
658 + .rlen = 0,
659 + }, {
660 +#ifdef __LITTLE_ENDIAN
661 + .key = "\x08\x00" /* rta length */
662 + "\x01\x00" /* rta type */
663 +#else
664 + .key = "\x00\x08" /* rta length */
665 + "\x00\x01" /* rta type */
666 +#endif
667 + "\x00\x00\x00\x10" /* enc key length */
668 + "authenticationkey20benckeyis16_bytes",
669 + .klen = 8 + 20 + 16,
670 + .iv = "iv0123456789abcd",
671 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
672 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
673 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
674 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
675 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
676 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
677 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
678 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
679 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
680 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
681 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
682 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
683 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
684 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
685 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
686 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
687 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
688 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
689 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
690 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
691 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
692 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
693 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
694 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
695 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
696 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
697 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
698 +
699 + .ilen = 285 + 20 + 15,
700 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
701 + "\x00\x03\x01\x01\x40",
702 + .alen = 13,
703 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
704 + " plaintext285 bytes plaintext285 bytes plaintext285"
705 + " bytes plaintext285 bytes plaintext285 bytes"
706 + " plaintext285 bytes plaintext285 bytes plaintext285"
707 + " bytes plaintext285 bytes plaintext285 bytes"
708 + " plaintext285 bytes plaintext285 bytes plaintext",
709 + .rlen = 285,
710 + }
711 +};
712 +
713 +/*
714 * RSA test vectors. Borrowed from openSSL.
715 */
716 static const struct akcipher_testvec rsa_tv_template[] = {
717 --- /dev/null
718 +++ b/crypto/tls.c
719 @@ -0,0 +1,607 @@
720 +/*
721 + * Copyright 2013 Freescale Semiconductor, Inc.
722 + * Copyright 2017 NXP Semiconductor, Inc.
723 + *
724 + * This program is free software; you can redistribute it and/or modify it
725 + * under the terms of the GNU General Public License as published by the Free
726 + * Software Foundation; either version 2 of the License, or (at your option)
727 + * any later version.
728 + *
729 + */
730 +
731 +#include <crypto/internal/aead.h>
732 +#include <crypto/internal/hash.h>
733 +#include <crypto/internal/skcipher.h>
734 +#include <crypto/authenc.h>
735 +#include <crypto/null.h>
736 +#include <crypto/scatterwalk.h>
737 +#include <linux/err.h>
738 +#include <linux/init.h>
739 +#include <linux/module.h>
740 +#include <linux/rtnetlink.h>
741 +
742 +struct tls_instance_ctx {
743 + struct crypto_ahash_spawn auth;
744 + struct crypto_skcipher_spawn enc;
745 +};
746 +
747 +struct crypto_tls_ctx {
748 + unsigned int reqoff;
749 + struct crypto_ahash *auth;
750 + struct crypto_skcipher *enc;
751 + struct crypto_skcipher *null;
752 +};
753 +
754 +struct tls_request_ctx {
755 + /*
756 + * cryptlen holds the payload length in the case of encryption or
757 + * payload_len + icv_len + padding_len in case of decryption
758 + */
759 + unsigned int cryptlen;
760 + /* working space for partial results */
761 + struct scatterlist tmp[2];
762 + struct scatterlist cipher[2];
763 + struct scatterlist dst[2];
764 + char tail[];
765 +};
766 +
767 +struct async_op {
768 + struct completion completion;
769 + int err;
770 +};
771 +
772 +static void tls_async_op_done(struct crypto_async_request *req, int err)
773 +{
774 + struct async_op *areq = req->data;
775 +
776 + if (err == -EINPROGRESS)
777 + return;
778 +
779 + areq->err = err;
780 + complete(&areq->completion);
781 +}
782 +
783 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
784 + unsigned int keylen)
785 +{
786 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
787 + struct crypto_ahash *auth = ctx->auth;
788 + struct crypto_skcipher *enc = ctx->enc;
789 + struct crypto_authenc_keys keys;
790 + int err = -EINVAL;
791 +
792 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
793 + goto badkey;
794 +
795 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
796 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
797 + CRYPTO_TFM_REQ_MASK);
798 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
799 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
800 + CRYPTO_TFM_RES_MASK);
801 +
802 + if (err)
803 + goto out;
804 +
805 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
806 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
807 + CRYPTO_TFM_REQ_MASK);
808 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
809 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
810 + CRYPTO_TFM_RES_MASK);
811 +
812 +out:
813 + return err;
814 +
815 +badkey:
816 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
817 + goto out;
818 +}
819 +
820 +/**
821 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
822 + * @hash: (output) buffer to save the digest into
823 + * @src: (input) scatterlist with the assoc and payload data
824 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
825 + * @req: (input) aead request
826 + **/
827 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
828 + unsigned int srclen, struct aead_request *req)
829 +{
830 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
831 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
832 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
833 + struct async_op ahash_op;
834 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
835 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
836 + int err = -EBADMSG;
837 +
838 + /* Bail out if the request assoc len is 0 */
839 + if (!req->assoclen)
840 + return err;
841 +
842 + init_completion(&ahash_op.completion);
843 +
844 + /* the hash transform to be executed comes from the original request */
845 + ahash_request_set_tfm(ahreq, ctx->auth);
846 + /* prepare the hash request with input data and result pointer */
847 + ahash_request_set_crypt(ahreq, src, hash, srclen);
848 + /* set the notifier for when the async hash function returns */
849 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
850 + tls_async_op_done, &ahash_op);
851 +
852 + /* Calculate the digest on the given data. The result is put in hash */
853 + err = crypto_ahash_digest(ahreq);
854 + if (err == -EINPROGRESS) {
855 + err = wait_for_completion_interruptible(&ahash_op.completion);
856 + if (!err)
857 + err = ahash_op.err;
858 + }
859 +
860 + return err;
861 +}
862 +
863 +/**
864 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
865 + * @hash: (output) buffer to save the digest and padding into
866 + * @phashlen: (output) the size of digest + padding
867 + * @req: (input) aead request
868 + **/
869 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
870 + struct aead_request *req)
871 +{
872 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
873 + unsigned int hash_size = crypto_aead_authsize(tls);
874 + unsigned int block_size = crypto_aead_blocksize(tls);
875 + unsigned int srclen = req->cryptlen + hash_size;
876 + unsigned int icvlen = req->cryptlen + req->assoclen;
877 + unsigned int padlen;
878 + int err;
879 +
880 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
881 + if (err)
882 + goto out;
883 +
884 + /* add padding after digest */
885 + padlen = block_size - (srclen % block_size);
886 + memset(hash + hash_size, padlen - 1, padlen);
887 +
888 + *phashlen = hash_size + padlen;
889 +out:
890 + return err;
891 +}
892 +
893 +static int crypto_tls_copy_data(struct aead_request *req,
894 + struct scatterlist *src,
895 + struct scatterlist *dst,
896 + unsigned int len)
897 +{
898 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
899 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
900 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
901 +
902 + skcipher_request_set_tfm(skreq, ctx->null);
903 + skcipher_request_set_callback(skreq, aead_request_flags(req),
904 + NULL, NULL);
905 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
906 +
907 + return crypto_skcipher_encrypt(skreq);
908 +}
909 +
910 +static int crypto_tls_encrypt(struct aead_request *req)
911 +{
912 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
913 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
914 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
915 + struct skcipher_request *skreq;
916 + struct scatterlist *cipher = treq_ctx->cipher;
917 + struct scatterlist *tmp = treq_ctx->tmp;
918 + struct scatterlist *sg, *src, *dst;
919 + unsigned int cryptlen, phashlen;
920 + u8 *hash = treq_ctx->tail;
921 + int err;
922 +
923 + /*
924 + * The hash result is saved at the beginning of the tls request ctx
925 + * and is aligned as required by the hash transform. Enough space was
926 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
927 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
928 + * the result is not overwritten by the second (cipher) request.
929 + */
930 + hash = (u8 *)ALIGN((unsigned long)hash +
931 + crypto_ahash_alignmask(ctx->auth),
932 + crypto_ahash_alignmask(ctx->auth) + 1);
933 +
934 + /*
935 + * STEP 1: create ICV together with necessary padding
936 + */
937 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
938 + if (err)
939 + return err;
940 +
941 + /*
942 + * STEP 2: Hash and padding are combined with the payload
943 + * depending on the form it arrives. Scatter tables must have at least
944 + * one page of data before chaining with another table and can't have
945 + * an empty data page. The following code addresses these requirements.
946 + *
947 + * If the payload is empty, only the hash is encrypted, otherwise the
948 + * payload scatterlist is merged with the hash. A special merging case
949 + * is when the payload has only one page of data. In that case the
950 + * payload page is moved to another scatterlist and prepared there for
951 + * encryption.
952 + */
953 + if (req->cryptlen) {
954 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
955 +
956 + sg_init_table(cipher, 2);
957 + sg_set_buf(cipher + 1, hash, phashlen);
958 +
959 + if (sg_is_last(src)) {
960 + sg_set_page(cipher, sg_page(src), req->cryptlen,
961 + src->offset);
962 + src = cipher;
963 + } else {
964 + unsigned int rem_len = req->cryptlen;
965 +
966 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
967 + rem_len -= min(rem_len, sg->length);
968 +
969 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
970 + sg_chain(sg, 1, cipher);
971 + }
972 + } else {
973 + sg_init_one(cipher, hash, phashlen);
974 + src = cipher;
975 + }
976 +
977 + /**
978 + * If src != dst copy the associated data from source to destination.
979 + * In both cases fast-forward passed the associated data in the dest.
980 + */
981 + if (req->src != req->dst) {
982 + err = crypto_tls_copy_data(req, req->src, req->dst,
983 + req->assoclen);
984 + if (err)
985 + return err;
986 + }
987 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
988 +
989 + /*
990 + * STEP 3: encrypt the frame and return the result
991 + */
992 + cryptlen = req->cryptlen + phashlen;
993 +
994 + /*
995 + * The hash and the cipher are applied at different times and their
996 + * requests can use the same memory space without interference
997 + */
998 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
999 + skcipher_request_set_tfm(skreq, ctx->enc);
1000 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1001 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1002 + req->base.complete, req->base.data);
1003 + /*
1004 + * Apply the cipher transform. The result will be in req->dst when the
1005 + * asynchronuous call terminates
1006 + */
1007 + err = crypto_skcipher_encrypt(skreq);
1008 +
1009 + return err;
1010 +}
1011 +
1012 +static int crypto_tls_decrypt(struct aead_request *req)
1013 +{
1014 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
1015 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
1016 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
1017 + unsigned int cryptlen = req->cryptlen;
1018 + unsigned int hash_size = crypto_aead_authsize(tls);
1019 + unsigned int block_size = crypto_aead_blocksize(tls);
1020 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
1021 + struct scatterlist *tmp = treq_ctx->tmp;
1022 + struct scatterlist *src, *dst;
1023 +
1024 + u8 padding[255]; /* padding can be 0-255 bytes */
1025 + u8 pad_size;
1026 + u16 *len_field;
1027 + u8 *ihash, *hash = treq_ctx->tail;
1028 +
1029 + int paderr = 0;
1030 + int err = -EINVAL;
1031 + int i;
1032 + struct async_op ciph_op;
1033 +
1034 + /*
1035 + * Rule out bad packets. The input packet length must be at least one
1036 + * byte more than the hash_size
1037 + */
1038 + if (cryptlen <= hash_size || cryptlen % block_size)
1039 + goto out;
1040 +
1041 + /*
1042 + * Step 1 - Decrypt the source. Fast-forward past the associated data
1043 + * to the encrypted data. The result will be overwritten in place so
1044 + * that the decrypted data will be adjacent to the associated data. The
1045 + * last step (computing the hash) will have it's input data already
1046 + * prepared and ready to be accessed at req->src.
1047 + */
1048 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
1049 + dst = src;
1050 +
1051 + init_completion(&ciph_op.completion);
1052 + skcipher_request_set_tfm(skreq, ctx->enc);
1053 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1054 + tls_async_op_done, &ciph_op);
1055 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1056 + err = crypto_skcipher_decrypt(skreq);
1057 + if (err == -EINPROGRESS) {
1058 + err = wait_for_completion_interruptible(&ciph_op.completion);
1059 + if (!err)
1060 + err = ciph_op.err;
1061 + }
1062 + if (err)
1063 + goto out;
1064 +
1065 + /*
1066 + * Step 2 - Verify padding
1067 + * Retrieve the last byte of the payload; this is the padding size.
1068 + */
1069 + cryptlen -= 1;
1070 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
1071 +
1072 + /* RFC recommendation for invalid padding size. */
1073 + if (cryptlen < pad_size + hash_size) {
1074 + pad_size = 0;
1075 + paderr = -EBADMSG;
1076 + }
1077 + cryptlen -= pad_size;
1078 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
1079 +
1080 + /* Padding content must be equal with pad_size. We verify it all */
1081 + for (i = 0; i < pad_size; i++)
1082 + if (padding[i] != pad_size)
1083 + paderr = -EBADMSG;
1084 +
1085 + /*
1086 + * Step 3 - Verify hash
1087 + * Align the digest result as required by the hash transform. Enough
1088 + * space was allocated in crypto_tls_init_tfm
1089 + */
1090 + hash = (u8 *)ALIGN((unsigned long)hash +
1091 + crypto_ahash_alignmask(ctx->auth),
1092 + crypto_ahash_alignmask(ctx->auth) + 1);
1093 + /*
1094 + * Two bytes at the end of the associated data make the length field.
1095 + * It must be updated with the length of the cleartext message before
1096 + * the hash is calculated.
1097 + */
1098 + len_field = sg_virt(req->src) + req->assoclen - 2;
1099 + cryptlen -= hash_size;
1100 + *len_field = htons(cryptlen);
1101 +
1102 + /* This is the hash from the decrypted packet. Save it for later */
1103 + ihash = hash + hash_size;
1104 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
1105 +
1106 + /* Now compute and compare our ICV with the one from the packet */
1107 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
1108 + if (!err)
1109 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
1110 +
1111 + if (req->src != req->dst) {
1112 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
1113 + req->assoclen);
1114 + if (err)
1115 + goto out;
1116 + }
1117 +
1118 + /* return the first found error */
1119 + if (paderr)
1120 + err = paderr;
1121 +
1122 +out:
1123 + aead_request_complete(req, err);
1124 + return err;
1125 +}
1126 +
1127 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
1128 +{
1129 + struct aead_instance *inst = aead_alg_instance(tfm);
1130 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
1131 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1132 + struct crypto_ahash *auth;
1133 + struct crypto_skcipher *enc;
1134 + struct crypto_skcipher *null;
1135 + int err;
1136 +
1137 + auth = crypto_spawn_ahash(&ictx->auth);
1138 + if (IS_ERR(auth))
1139 + return PTR_ERR(auth);
1140 +
1141 + enc = crypto_spawn_skcipher(&ictx->enc);
1142 + err = PTR_ERR(enc);
1143 + if (IS_ERR(enc))
1144 + goto err_free_ahash;
1145 +
1146 + null = crypto_get_default_null_skcipher2();
1147 + err = PTR_ERR(null);
1148 + if (IS_ERR(null))
1149 + goto err_free_skcipher;
1150 +
1151 + ctx->auth = auth;
1152 + ctx->enc = enc;
1153 + ctx->null = null;
1154 +
1155 + /*
1156 + * Allow enough space for two digests. The two digests will be compared
1157 + * during the decryption phase. One will come from the decrypted packet
1158 + * and the other will be calculated. For encryption, one digest is
1159 + * padded (up to a cipher blocksize) and chained with the payload
1160 + */
1161 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
1162 + crypto_ahash_alignmask(auth),
1163 + crypto_ahash_alignmask(auth) + 1) +
1164 + max(crypto_ahash_digestsize(auth),
1165 + crypto_skcipher_blocksize(enc));
1166 +
1167 + crypto_aead_set_reqsize(tfm,
1168 + sizeof(struct tls_request_ctx) +
1169 + ctx->reqoff +
1170 + max_t(unsigned int,
1171 + crypto_ahash_reqsize(auth) +
1172 + sizeof(struct ahash_request),
1173 + crypto_skcipher_reqsize(enc) +
1174 + sizeof(struct skcipher_request)));
1175 +
1176 + return 0;
1177 +
1178 +err_free_skcipher:
1179 + crypto_free_skcipher(enc);
1180 +err_free_ahash:
1181 + crypto_free_ahash(auth);
1182 + return err;
1183 +}
1184 +
1185 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
1186 +{
1187 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1188 +
1189 + crypto_free_ahash(ctx->auth);
1190 + crypto_free_skcipher(ctx->enc);
1191 + crypto_put_default_null_skcipher2();
1192 +}
1193 +
1194 +static void crypto_tls_free(struct aead_instance *inst)
1195 +{
1196 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
1197 +
1198 + crypto_drop_skcipher(&ctx->enc);
1199 + crypto_drop_ahash(&ctx->auth);
1200 + kfree(inst);
1201 +}
1202 +
1203 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
1204 +{
1205 + struct crypto_attr_type *algt;
1206 + struct aead_instance *inst;
1207 + struct hash_alg_common *auth;
1208 + struct crypto_alg *auth_base;
1209 + struct skcipher_alg *enc;
1210 + struct tls_instance_ctx *ctx;
1211 + const char *enc_name;
1212 + int err;
1213 +
1214 + algt = crypto_get_attr_type(tb);
1215 + if (IS_ERR(algt))
1216 + return PTR_ERR(algt);
1217 +
1218 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
1219 + return -EINVAL;
1220 +
1221 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
1222 + CRYPTO_ALG_TYPE_AHASH_MASK |
1223 + crypto_requires_sync(algt->type, algt->mask));
1224 + if (IS_ERR(auth))
1225 + return PTR_ERR(auth);
1226 +
1227 + auth_base = &auth->base;
1228 +
1229 + enc_name = crypto_attr_alg_name(tb[2]);
1230 + err = PTR_ERR(enc_name);
1231 + if (IS_ERR(enc_name))
1232 + goto out_put_auth;
1233 +
1234 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1235 + err = -ENOMEM;
1236 + if (!inst)
1237 + goto out_put_auth;
1238 +
1239 + ctx = aead_instance_ctx(inst);
1240 +
1241 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
1242 + aead_crypto_instance(inst));
1243 + if (err)
1244 + goto err_free_inst;
1245 +
1246 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
1247 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
1248 + crypto_requires_sync(algt->type,
1249 + algt->mask));
1250 + if (err)
1251 + goto err_drop_auth;
1252 +
1253 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
1254 +
1255 + err = -ENAMETOOLONG;
1256 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
1257 + "tls10(%s,%s)", auth_base->cra_name,
1258 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
1259 + goto err_drop_enc;
1260 +
1261 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1262 + "tls10(%s,%s)", auth_base->cra_driver_name,
1263 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
1264 + goto err_drop_enc;
1265 +
1266 + inst->alg.base.cra_flags = (auth_base->cra_flags |
1267 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
1268 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
1269 + auth_base->cra_priority;
1270 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
1271 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
1272 + enc->base.cra_alignmask;
1273 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
1274 +
1275 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
1276 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
1277 + inst->alg.maxauthsize = auth->digestsize;
1278 +
1279 + inst->alg.init = crypto_tls_init_tfm;
1280 + inst->alg.exit = crypto_tls_exit_tfm;
1281 +
1282 + inst->alg.setkey = crypto_tls_setkey;
1283 + inst->alg.encrypt = crypto_tls_encrypt;
1284 + inst->alg.decrypt = crypto_tls_decrypt;
1285 +
1286 + inst->free = crypto_tls_free;
1287 +
1288 + err = aead_register_instance(tmpl, inst);
1289 + if (err)
1290 + goto err_drop_enc;
1291 +
1292 +out:
1293 + crypto_mod_put(auth_base);
1294 + return err;
1295 +
1296 +err_drop_enc:
1297 + crypto_drop_skcipher(&ctx->enc);
1298 +err_drop_auth:
1299 + crypto_drop_ahash(&ctx->auth);
1300 +err_free_inst:
1301 + kfree(inst);
1302 +out_put_auth:
1303 + goto out;
1304 +}
1305 +
1306 +static struct crypto_template crypto_tls_tmpl = {
1307 + .name = "tls10",
1308 + .create = crypto_tls_create,
1309 + .module = THIS_MODULE,
1310 +};
1311 +
1312 +static int __init crypto_tls_module_init(void)
1313 +{
1314 + return crypto_register_template(&crypto_tls_tmpl);
1315 +}
1316 +
1317 +static void __exit crypto_tls_module_exit(void)
1318 +{
1319 + crypto_unregister_template(&crypto_tls_tmpl);
1320 +}
1321 +
1322 +module_init(crypto_tls_module_init);
1323 +module_exit(crypto_tls_module_exit);
1324 +
1325 +MODULE_LICENSE("GPL");
1326 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
1327 --- a/drivers/crypto/Makefile
1328 +++ b/drivers/crypto/Makefile
1329 @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
1330 obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
1331 obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
1332 obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
1333 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
1334 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
1335 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
1336 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
1337 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
1338 --- a/drivers/crypto/caam/Kconfig
1339 +++ b/drivers/crypto/caam/Kconfig
1340 @@ -1,7 +1,17 @@
1341 +config CRYPTO_DEV_FSL_CAAM_COMMON
1342 + tristate
1343 +
1344 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1345 + tristate
1346 +
1347 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1348 + tristate
1349 +
1350 config CRYPTO_DEV_FSL_CAAM
1351 - tristate "Freescale CAAM-Multicore driver backend"
1352 + tristate "Freescale CAAM-Multicore platform driver backend"
1353 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
1354 select SOC_BUS
1355 + select CRYPTO_DEV_FSL_CAAM_COMMON
1356 help
1357 Enables the driver module for Freescale's Cryptographic Accelerator
1358 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
1359 @@ -12,9 +22,16 @@ config CRYPTO_DEV_FSL_CAAM
1360 To compile this driver as a module, choose M here: the module
1361 will be called caam.
1362
1363 +if CRYPTO_DEV_FSL_CAAM
1364 +
1365 +config CRYPTO_DEV_FSL_CAAM_DEBUG
1366 + bool "Enable debug output in CAAM driver"
1367 + help
1368 + Selecting this will enable printing of various debug
1369 + information in the CAAM driver.
1370 +
1371 config CRYPTO_DEV_FSL_CAAM_JR
1372 tristate "Freescale CAAM Job Ring driver backend"
1373 - depends on CRYPTO_DEV_FSL_CAAM
1374 default y
1375 help
1376 Enables the driver module for Job Rings which are part of
1377 @@ -25,9 +42,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
1378 To compile this driver as a module, choose M here: the module
1379 will be called caam_jr.
1380
1381 +if CRYPTO_DEV_FSL_CAAM_JR
1382 +
1383 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1384 int "Job Ring size"
1385 - depends on CRYPTO_DEV_FSL_CAAM_JR
1386 range 2 9
1387 default "9"
1388 help
1389 @@ -45,7 +63,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1390
1391 config CRYPTO_DEV_FSL_CAAM_INTC
1392 bool "Job Ring interrupt coalescing"
1393 - depends on CRYPTO_DEV_FSL_CAAM_JR
1394 help
1395 Enable the Job Ring's interrupt coalescing feature.
1396
1397 @@ -74,9 +91,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
1398 threshold. Range is 1-65535.
1399
1400 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1401 - tristate "Register algorithm implementations with the Crypto API"
1402 - depends on CRYPTO_DEV_FSL_CAAM_JR
1403 + bool "Register algorithm implementations with the Crypto API"
1404 default y
1405 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1406 select CRYPTO_AEAD
1407 select CRYPTO_AUTHENC
1408 select CRYPTO_BLKCIPHER
1409 @@ -85,13 +102,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1410 scatterlist crypto API (such as the linux native IPSec
1411 stack) to the SEC4 via job ring.
1412
1413 - To compile this as a module, choose M here: the module
1414 - will be called caamalg.
1415 -
1416 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1417 - tristate "Queue Interface as Crypto API backend"
1418 - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
1419 + bool "Queue Interface as Crypto API backend"
1420 + depends on FSL_SDK_DPA && NET
1421 default y
1422 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1423 select CRYPTO_AUTHENC
1424 select CRYPTO_BLKCIPHER
1425 help
1426 @@ -102,36 +117,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1427 assigned to the kernel should also be more than the number of
1428 job rings.
1429
1430 - To compile this as a module, choose M here: the module
1431 - will be called caamalg_qi.
1432 -
1433 config CRYPTO_DEV_FSL_CAAM_AHASH_API
1434 - tristate "Register hash algorithm implementations with Crypto API"
1435 - depends on CRYPTO_DEV_FSL_CAAM_JR
1436 + bool "Register hash algorithm implementations with Crypto API"
1437 default y
1438 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1439 select CRYPTO_HASH
1440 help
1441 Selecting this will offload ahash for users of the
1442 scatterlist crypto API to the SEC4 via job ring.
1443
1444 - To compile this as a module, choose M here: the module
1445 - will be called caamhash.
1446 -
1447 config CRYPTO_DEV_FSL_CAAM_PKC_API
1448 - tristate "Register public key cryptography implementations with Crypto API"
1449 - depends on CRYPTO_DEV_FSL_CAAM_JR
1450 + bool "Register public key cryptography implementations with Crypto API"
1451 default y
1452 select CRYPTO_RSA
1453 help
1454 Selecting this will allow SEC Public key support for RSA.
1455 Supported cryptographic primitives: encryption, decryption,
1456 signature and verification.
1457 - To compile this as a module, choose M here: the module
1458 - will be called caam_pkc.
1459
1460 config CRYPTO_DEV_FSL_CAAM_RNG_API
1461 - tristate "Register caam device for hwrng API"
1462 - depends on CRYPTO_DEV_FSL_CAAM_JR
1463 + bool "Register caam device for hwrng API"
1464 default y
1465 select CRYPTO_RNG
1466 select HW_RANDOM
1467 @@ -139,16 +144,24 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
1468 Selecting this will register the SEC4 hardware rng to
1469 the hw_random API for suppying the kernel entropy pool.
1470
1471 - To compile this as a module, choose M here: the module
1472 - will be called caamrng.
1473 +endif # CRYPTO_DEV_FSL_CAAM_JR
1474
1475 -config CRYPTO_DEV_FSL_CAAM_DEBUG
1476 - bool "Enable debug output in CAAM driver"
1477 - depends on CRYPTO_DEV_FSL_CAAM
1478 - help
1479 - Selecting this will enable printing of various debug
1480 - information in the CAAM driver.
1481 +endif # CRYPTO_DEV_FSL_CAAM
1482
1483 -config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1484 - def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
1485 - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
1486 +config CRYPTO_DEV_FSL_DPAA2_CAAM
1487 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
1488 + depends on FSL_MC_DPIO
1489 + select CRYPTO_DEV_FSL_CAAM_COMMON
1490 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1491 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1492 + select CRYPTO_BLKCIPHER
1493 + select CRYPTO_AUTHENC
1494 + select CRYPTO_AEAD
1495 + select CRYPTO_HASH
1496 + ---help---
1497 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
1498 + It handles DPSECI DPAA2 objects that sit on the Management Complex
1499 + (MC) fsl-mc bus.
1500 +
1501 + To compile this as a module, choose M here: the module
1502 + will be called dpaa2_caam.
1503 --- a/drivers/crypto/caam/Makefile
1504 +++ b/drivers/crypto/caam/Makefile
1505 @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
1506 ccflags-y := -DDEBUG
1507 endif
1508
1509 +ccflags-y += -DVERSION=\"\"
1510 +
1511 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
1512 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
1513 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
1514 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1515 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1516 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
1517 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1518 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1519 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
1520 -
1521 -caam-objs := ctrl.o
1522 -caam_jr-objs := jr.o key_gen.o error.o
1523 -caam_pkc-y := caampkc.o pkc_desc.o
1524 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
1525 +
1526 +caam-y := ctrl.o
1527 +caam_jr-y := jr.o key_gen.o
1528 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1529 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1530 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1531 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1532 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
1533 +
1534 +caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
1535 ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
1536 ccflags-y += -DCONFIG_CAAM_QI
1537 - caam-objs += qi.o
1538 endif
1539 +
1540 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
1541 +
1542 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
1543 --- a/drivers/crypto/caam/caamalg.c
1544 +++ b/drivers/crypto/caam/caamalg.c
1545 @@ -71,6 +71,8 @@
1546 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
1547 CAAM_CMD_SZ * 5)
1548
1549 +#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
1550 +
1551 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
1552 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
1553
1554 @@ -108,6 +110,7 @@ struct caam_ctx {
1555 dma_addr_t sh_desc_dec_dma;
1556 dma_addr_t sh_desc_givenc_dma;
1557 dma_addr_t key_dma;
1558 + enum dma_data_direction dir;
1559 struct device *jrdev;
1560 struct alginfo adata;
1561 struct alginfo cdata;
1562 @@ -118,6 +121,7 @@ static int aead_null_set_sh_desc(struct
1563 {
1564 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1565 struct device *jrdev = ctx->jrdev;
1566 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1567 u32 *desc;
1568 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
1569 ctx->adata.keylen_pad;
1570 @@ -136,9 +140,10 @@ static int aead_null_set_sh_desc(struct
1571
1572 /* aead_encrypt shared descriptor */
1573 desc = ctx->sh_desc_enc;
1574 - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
1575 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
1576 + ctrlpriv->era);
1577 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1578 - desc_bytes(desc), DMA_TO_DEVICE);
1579 + desc_bytes(desc), ctx->dir);
1580
1581 /*
1582 * Job Descriptor and Shared Descriptors
1583 @@ -154,9 +159,10 @@ static int aead_null_set_sh_desc(struct
1584
1585 /* aead_decrypt shared descriptor */
1586 desc = ctx->sh_desc_dec;
1587 - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
1588 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
1589 + ctrlpriv->era);
1590 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1591 - desc_bytes(desc), DMA_TO_DEVICE);
1592 + desc_bytes(desc), ctx->dir);
1593
1594 return 0;
1595 }
1596 @@ -168,6 +174,7 @@ static int aead_set_sh_desc(struct crypt
1597 unsigned int ivsize = crypto_aead_ivsize(aead);
1598 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1599 struct device *jrdev = ctx->jrdev;
1600 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1601 u32 ctx1_iv_off = 0;
1602 u32 *desc, *nonce = NULL;
1603 u32 inl_mask;
1604 @@ -234,9 +241,9 @@ static int aead_set_sh_desc(struct crypt
1605 desc = ctx->sh_desc_enc;
1606 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
1607 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
1608 - false);
1609 + false, ctrlpriv->era);
1610 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1611 - desc_bytes(desc), DMA_TO_DEVICE);
1612 + desc_bytes(desc), ctx->dir);
1613
1614 skip_enc:
1615 /*
1616 @@ -266,9 +273,9 @@ skip_enc:
1617 desc = ctx->sh_desc_dec;
1618 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
1619 ctx->authsize, alg->caam.geniv, is_rfc3686,
1620 - nonce, ctx1_iv_off, false);
1621 + nonce, ctx1_iv_off, false, ctrlpriv->era);
1622 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1623 - desc_bytes(desc), DMA_TO_DEVICE);
1624 + desc_bytes(desc), ctx->dir);
1625
1626 if (!alg->caam.geniv)
1627 goto skip_givenc;
1628 @@ -300,9 +307,9 @@ skip_enc:
1629 desc = ctx->sh_desc_enc;
1630 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
1631 ctx->authsize, is_rfc3686, nonce,
1632 - ctx1_iv_off, false);
1633 + ctx1_iv_off, false, ctrlpriv->era);
1634 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1635 - desc_bytes(desc), DMA_TO_DEVICE);
1636 + desc_bytes(desc), ctx->dir);
1637
1638 skip_givenc:
1639 return 0;
1640 @@ -323,6 +330,7 @@ static int gcm_set_sh_desc(struct crypto
1641 {
1642 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1643 struct device *jrdev = ctx->jrdev;
1644 + unsigned int ivsize = crypto_aead_ivsize(aead);
1645 u32 *desc;
1646 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1647 ctx->cdata.keylen;
1648 @@ -344,9 +352,9 @@ static int gcm_set_sh_desc(struct crypto
1649 }
1650
1651 desc = ctx->sh_desc_enc;
1652 - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
1653 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1654 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1655 - desc_bytes(desc), DMA_TO_DEVICE);
1656 + desc_bytes(desc), ctx->dir);
1657
1658 /*
1659 * Job Descriptor and Shared Descriptors
1660 @@ -361,9 +369,9 @@ static int gcm_set_sh_desc(struct crypto
1661 }
1662
1663 desc = ctx->sh_desc_dec;
1664 - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
1665 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1666 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1667 - desc_bytes(desc), DMA_TO_DEVICE);
1668 + desc_bytes(desc), ctx->dir);
1669
1670 return 0;
1671 }
1672 @@ -382,6 +390,7 @@ static int rfc4106_set_sh_desc(struct cr
1673 {
1674 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1675 struct device *jrdev = ctx->jrdev;
1676 + unsigned int ivsize = crypto_aead_ivsize(aead);
1677 u32 *desc;
1678 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1679 ctx->cdata.keylen;
1680 @@ -403,9 +412,10 @@ static int rfc4106_set_sh_desc(struct cr
1681 }
1682
1683 desc = ctx->sh_desc_enc;
1684 - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
1685 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1686 + false);
1687 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1688 - desc_bytes(desc), DMA_TO_DEVICE);
1689 + desc_bytes(desc), ctx->dir);
1690
1691 /*
1692 * Job Descriptor and Shared Descriptors
1693 @@ -420,9 +430,10 @@ static int rfc4106_set_sh_desc(struct cr
1694 }
1695
1696 desc = ctx->sh_desc_dec;
1697 - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
1698 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1699 + false);
1700 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1701 - desc_bytes(desc), DMA_TO_DEVICE);
1702 + desc_bytes(desc), ctx->dir);
1703
1704 return 0;
1705 }
1706 @@ -442,6 +453,7 @@ static int rfc4543_set_sh_desc(struct cr
1707 {
1708 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1709 struct device *jrdev = ctx->jrdev;
1710 + unsigned int ivsize = crypto_aead_ivsize(aead);
1711 u32 *desc;
1712 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1713 ctx->cdata.keylen;
1714 @@ -463,9 +475,10 @@ static int rfc4543_set_sh_desc(struct cr
1715 }
1716
1717 desc = ctx->sh_desc_enc;
1718 - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
1719 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1720 + false);
1721 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1722 - desc_bytes(desc), DMA_TO_DEVICE);
1723 + desc_bytes(desc), ctx->dir);
1724
1725 /*
1726 * Job Descriptor and Shared Descriptors
1727 @@ -480,9 +493,10 @@ static int rfc4543_set_sh_desc(struct cr
1728 }
1729
1730 desc = ctx->sh_desc_dec;
1731 - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
1732 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1733 + false);
1734 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1735 - desc_bytes(desc), DMA_TO_DEVICE);
1736 + desc_bytes(desc), ctx->dir);
1737
1738 return 0;
1739 }
1740 @@ -498,11 +512,67 @@ static int rfc4543_setauthsize(struct cr
1741 return 0;
1742 }
1743
1744 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
1745 +{
1746 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1747 + struct device *jrdev = ctx->jrdev;
1748 + unsigned int ivsize = crypto_aead_ivsize(aead);
1749 + u32 *desc;
1750 +
1751 + if (!ctx->cdata.keylen || !ctx->authsize)
1752 + return 0;
1753 +
1754 + desc = ctx->sh_desc_enc;
1755 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1756 + ctx->authsize, true, false);
1757 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1758 + desc_bytes(desc), ctx->dir);
1759 +
1760 + desc = ctx->sh_desc_dec;
1761 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1762 + ctx->authsize, false, false);
1763 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1764 + desc_bytes(desc), ctx->dir);
1765 +
1766 + return 0;
1767 +}
1768 +
1769 +static int chachapoly_setauthsize(struct crypto_aead *aead,
1770 + unsigned int authsize)
1771 +{
1772 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1773 +
1774 + if (authsize != POLY1305_DIGEST_SIZE)
1775 + return -EINVAL;
1776 +
1777 + ctx->authsize = authsize;
1778 + return chachapoly_set_sh_desc(aead);
1779 +}
1780 +
1781 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
1782 + unsigned int keylen)
1783 +{
1784 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1785 + unsigned int ivsize = crypto_aead_ivsize(aead);
1786 + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
1787 +
1788 + if (keylen != CHACHA20_KEY_SIZE + saltlen) {
1789 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1790 + return -EINVAL;
1791 + }
1792 +
1793 + ctx->cdata.key_virt = key;
1794 + ctx->cdata.keylen = keylen - saltlen;
1795 +
1796 + return chachapoly_set_sh_desc(aead);
1797 +}
1798 +
1799 static int aead_setkey(struct crypto_aead *aead,
1800 const u8 *key, unsigned int keylen)
1801 {
1802 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1803 struct device *jrdev = ctx->jrdev;
1804 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1805 struct crypto_authenc_keys keys;
1806 int ret = 0;
1807
1808 @@ -517,6 +587,27 @@ static int aead_setkey(struct crypto_aea
1809 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1810 #endif
1811
1812 + /*
1813 + * If DKP is supported, use it in the shared descriptor to generate
1814 + * the split key.
1815 + */
1816 + if (ctrlpriv->era >= 6) {
1817 + ctx->adata.keylen = keys.authkeylen;
1818 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
1819 + OP_ALG_ALGSEL_MASK);
1820 +
1821 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1822 + goto badkey;
1823 +
1824 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
1825 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
1826 + keys.enckeylen);
1827 + dma_sync_single_for_device(jrdev, ctx->key_dma,
1828 + ctx->adata.keylen_pad +
1829 + keys.enckeylen, ctx->dir);
1830 + goto skip_split_key;
1831 + }
1832 +
1833 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
1834 keys.authkeylen, CAAM_MAX_KEY_SIZE -
1835 keys.enckeylen);
1836 @@ -527,12 +618,14 @@ static int aead_setkey(struct crypto_aea
1837 /* postpend encryption key to auth split key */
1838 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
1839 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
1840 - keys.enckeylen, DMA_TO_DEVICE);
1841 + keys.enckeylen, ctx->dir);
1842 #ifdef DEBUG
1843 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1844 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1845 ctx->adata.keylen_pad + keys.enckeylen, 1);
1846 #endif
1847 +
1848 +skip_split_key:
1849 ctx->cdata.keylen = keys.enckeylen;
1850 return aead_set_sh_desc(aead);
1851 badkey:
1852 @@ -552,7 +645,7 @@ static int gcm_setkey(struct crypto_aead
1853 #endif
1854
1855 memcpy(ctx->key, key, keylen);
1856 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
1857 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
1858 ctx->cdata.keylen = keylen;
1859
1860 return gcm_set_sh_desc(aead);
1861 @@ -580,7 +673,7 @@ static int rfc4106_setkey(struct crypto_
1862 */
1863 ctx->cdata.keylen = keylen - 4;
1864 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1865 - DMA_TO_DEVICE);
1866 + ctx->dir);
1867 return rfc4106_set_sh_desc(aead);
1868 }
1869
1870 @@ -606,7 +699,7 @@ static int rfc4543_setkey(struct crypto_
1871 */
1872 ctx->cdata.keylen = keylen - 4;
1873 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1874 - DMA_TO_DEVICE);
1875 + ctx->dir);
1876 return rfc4543_set_sh_desc(aead);
1877 }
1878
1879 @@ -658,21 +751,21 @@ static int ablkcipher_setkey(struct cryp
1880 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
1881 ctx1_iv_off);
1882 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1883 - desc_bytes(desc), DMA_TO_DEVICE);
1884 + desc_bytes(desc), ctx->dir);
1885
1886 /* ablkcipher_decrypt shared descriptor */
1887 desc = ctx->sh_desc_dec;
1888 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
1889 ctx1_iv_off);
1890 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1891 - desc_bytes(desc), DMA_TO_DEVICE);
1892 + desc_bytes(desc), ctx->dir);
1893
1894 /* ablkcipher_givencrypt shared descriptor */
1895 desc = ctx->sh_desc_givenc;
1896 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
1897 ctx1_iv_off);
1898 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
1899 - desc_bytes(desc), DMA_TO_DEVICE);
1900 + desc_bytes(desc), ctx->dir);
1901
1902 return 0;
1903 }
1904 @@ -701,13 +794,13 @@ static int xts_ablkcipher_setkey(struct
1905 desc = ctx->sh_desc_enc;
1906 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
1907 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1908 - desc_bytes(desc), DMA_TO_DEVICE);
1909 + desc_bytes(desc), ctx->dir);
1910
1911 /* xts_ablkcipher_decrypt shared descriptor */
1912 desc = ctx->sh_desc_dec;
1913 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
1914 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1915 - desc_bytes(desc), DMA_TO_DEVICE);
1916 + desc_bytes(desc), ctx->dir);
1917
1918 return 0;
1919 }
1920 @@ -987,9 +1080,6 @@ static void init_aead_job(struct aead_re
1921 append_seq_out_ptr(desc, dst_dma,
1922 req->assoclen + req->cryptlen - authsize,
1923 out_options);
1924 -
1925 - /* REG3 = assoclen */
1926 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1927 }
1928
1929 static void init_gcm_job(struct aead_request *req,
1930 @@ -1004,6 +1094,7 @@ static void init_gcm_job(struct aead_req
1931 unsigned int last;
1932
1933 init_aead_job(req, edesc, all_contig, encrypt);
1934 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1935
1936 /* BUG This should not be specific to generic GCM. */
1937 last = 0;
1938 @@ -1021,6 +1112,40 @@ static void init_gcm_job(struct aead_req
1939 /* End of blank commands */
1940 }
1941
1942 +static void init_chachapoly_job(struct aead_request *req,
1943 + struct aead_edesc *edesc, bool all_contig,
1944 + bool encrypt)
1945 +{
1946 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
1947 + unsigned int ivsize = crypto_aead_ivsize(aead);
1948 + unsigned int assoclen = req->assoclen;
1949 + u32 *desc = edesc->hw_desc;
1950 + u32 ctx_iv_off = 4;
1951 +
1952 + init_aead_job(req, edesc, all_contig, encrypt);
1953 +
1954 + if (ivsize != CHACHAPOLY_IV_SIZE) {
1955 + /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1956 + ctx_iv_off += 4;
1957 +
1958 + /*
1959 + * The associated data comes already with the IV but we need
1960 + * to skip it when we authenticate or encrypt...
1961 + */
1962 + assoclen -= ivsize;
1963 + }
1964 +
1965 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1966 +
1967 + /*
1968 + * For IPsec load the IV further in the same register.
1969 + * For RFC7539 simply load the 12 bytes nonce in a single operation
1970 + */
1971 + append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1972 + LDST_SRCDST_BYTE_CONTEXT |
1973 + ctx_iv_off << LDST_OFFSET_SHIFT);
1974 +}
1975 +
1976 static void init_authenc_job(struct aead_request *req,
1977 struct aead_edesc *edesc,
1978 bool all_contig, bool encrypt)
1979 @@ -1030,6 +1155,7 @@ static void init_authenc_job(struct aead
1980 struct caam_aead_alg, aead);
1981 unsigned int ivsize = crypto_aead_ivsize(aead);
1982 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1983 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1984 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1985 OP_ALG_AAI_CTR_MOD128);
1986 const bool is_rfc3686 = alg->caam.rfc3686;
1987 @@ -1053,6 +1179,15 @@ static void init_authenc_job(struct aead
1988
1989 init_aead_job(req, edesc, all_contig, encrypt);
1990
1991 + /*
1992 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1993 + * having DPOVRD as destination.
1994 + */
1995 + if (ctrlpriv->era < 3)
1996 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1997 + else
1998 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1999 +
2000 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2001 append_load_as_imm(desc, req->iv, ivsize,
2002 LDST_CLASS_1_CCB |
2003 @@ -1225,8 +1360,16 @@ static struct aead_edesc *aead_edesc_all
2004 }
2005 }
2006
2007 + /*
2008 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2009 + * the end of the table by allocating more S/G entries.
2010 + */
2011 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
2012 - sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2013 + if (mapped_dst_nents > 1)
2014 + sec4_sg_len += ALIGN(mapped_dst_nents, 4);
2015 + else
2016 + sec4_sg_len = ALIGN(sec4_sg_len, 4);
2017 +
2018 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2019
2020 /* allocate space for base edesc and hw desc commands, link tables */
2021 @@ -1307,6 +1450,72 @@ static int gcm_encrypt(struct aead_reque
2022 return ret;
2023 }
2024
2025 +static int chachapoly_encrypt(struct aead_request *req)
2026 +{
2027 + struct aead_edesc *edesc;
2028 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
2029 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
2030 + struct device *jrdev = ctx->jrdev;
2031 + bool all_contig;
2032 + u32 *desc;
2033 + int ret;
2034 +
2035 + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2036 + true);
2037 + if (IS_ERR(edesc))
2038 + return PTR_ERR(edesc);
2039 +
2040 + desc = edesc->hw_desc;
2041 +
2042 + init_chachapoly_job(req, edesc, all_contig, true);
2043 + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2044 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2045 + 1);
2046 +
2047 + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2048 + if (!ret) {
2049 + ret = -EINPROGRESS;
2050 + } else {
2051 + aead_unmap(jrdev, edesc, req);
2052 + kfree(edesc);
2053 + }
2054 +
2055 + return ret;
2056 +}
2057 +
2058 +static int chachapoly_decrypt(struct aead_request *req)
2059 +{
2060 + struct aead_edesc *edesc;
2061 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
2062 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
2063 + struct device *jrdev = ctx->jrdev;
2064 + bool all_contig;
2065 + u32 *desc;
2066 + int ret;
2067 +
2068 + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2069 + false);
2070 + if (IS_ERR(edesc))
2071 + return PTR_ERR(edesc);
2072 +
2073 + desc = edesc->hw_desc;
2074 +
2075 + init_chachapoly_job(req, edesc, all_contig, false);
2076 + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2077 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2078 + 1);
2079 +
2080 + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2081 + if (!ret) {
2082 + ret = -EINPROGRESS;
2083 + } else {
2084 + aead_unmap(jrdev, edesc, req);
2085 + kfree(edesc);
2086 + }
2087 +
2088 + return ret;
2089 +}
2090 +
2091 static int ipsec_gcm_encrypt(struct aead_request *req)
2092 {
2093 if (req->assoclen < 8)
2094 @@ -1494,7 +1703,25 @@ static struct ablkcipher_edesc *ablkciph
2095
2096 sec4_sg_ents = 1 + mapped_src_nents;
2097 dst_sg_idx = sec4_sg_ents;
2098 - sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2099 +
2100 + /*
2101 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2102 + * the end of the table by allocating more S/G entries. Logic:
2103 + * if (src != dst && output S/G)
2104 + * pad output S/G, if needed
2105 + * else if (src == dst && S/G)
2106 + * overlapping S/Gs; pad one of them
2107 + * else if (input S/G) ...
2108 + * pad input S/G, if needed
2109 + */
2110 + if (mapped_dst_nents > 1)
2111 + sec4_sg_ents += ALIGN(mapped_dst_nents, 4);
2112 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
2113 + sec4_sg_ents = max(ALIGN(sec4_sg_ents, 4),
2114 + 1 + ALIGN(mapped_src_nents, 4));
2115 + else
2116 + sec4_sg_ents = ALIGN(sec4_sg_ents, 4);
2117 +
2118 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
2119
2120 /*
2121 @@ -3196,6 +3423,50 @@ static struct caam_aead_alg driver_aeads
2122 .geniv = true,
2123 },
2124 },
2125 + {
2126 + .aead = {
2127 + .base = {
2128 + .cra_name = "rfc7539(chacha20,poly1305)",
2129 + .cra_driver_name = "rfc7539-chacha20-poly1305-"
2130 + "caam",
2131 + .cra_blocksize = 1,
2132 + },
2133 + .setkey = chachapoly_setkey,
2134 + .setauthsize = chachapoly_setauthsize,
2135 + .encrypt = chachapoly_encrypt,
2136 + .decrypt = chachapoly_decrypt,
2137 + .ivsize = CHACHAPOLY_IV_SIZE,
2138 + .maxauthsize = POLY1305_DIGEST_SIZE,
2139 + },
2140 + .caam = {
2141 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2142 + OP_ALG_AAI_AEAD,
2143 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2144 + OP_ALG_AAI_AEAD,
2145 + },
2146 + },
2147 + {
2148 + .aead = {
2149 + .base = {
2150 + .cra_name = "rfc7539esp(chacha20,poly1305)",
2151 + .cra_driver_name = "rfc7539esp-chacha20-"
2152 + "poly1305-caam",
2153 + .cra_blocksize = 1,
2154 + },
2155 + .setkey = chachapoly_setkey,
2156 + .setauthsize = chachapoly_setauthsize,
2157 + .encrypt = chachapoly_encrypt,
2158 + .decrypt = chachapoly_decrypt,
2159 + .ivsize = 8,
2160 + .maxauthsize = POLY1305_DIGEST_SIZE,
2161 + },
2162 + .caam = {
2163 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2164 + OP_ALG_AAI_AEAD,
2165 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2166 + OP_ALG_AAI_AEAD,
2167 + },
2168 + },
2169 };
2170
2171 struct caam_crypto_alg {
2172 @@ -3204,9 +3475,11 @@ struct caam_crypto_alg {
2173 struct caam_alg_entry caam;
2174 };
2175
2176 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2177 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2178 + bool uses_dkp)
2179 {
2180 dma_addr_t dma_addr;
2181 + struct caam_drv_private *priv;
2182
2183 ctx->jrdev = caam_jr_alloc();
2184 if (IS_ERR(ctx->jrdev)) {
2185 @@ -3214,10 +3487,16 @@ static int caam_init_common(struct caam_
2186 return PTR_ERR(ctx->jrdev);
2187 }
2188
2189 + priv = dev_get_drvdata(ctx->jrdev->parent);
2190 + if (priv->era >= 6 && uses_dkp)
2191 + ctx->dir = DMA_BIDIRECTIONAL;
2192 + else
2193 + ctx->dir = DMA_TO_DEVICE;
2194 +
2195 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
2196 offsetof(struct caam_ctx,
2197 sh_desc_enc_dma),
2198 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2199 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2200 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
2201 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
2202 caam_jr_free(ctx->jrdev);
2203 @@ -3245,7 +3524,7 @@ static int caam_cra_init(struct crypto_t
2204 container_of(alg, struct caam_crypto_alg, crypto_alg);
2205 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2206
2207 - return caam_init_common(ctx, &caam_alg->caam);
2208 + return caam_init_common(ctx, &caam_alg->caam, false);
2209 }
2210
2211 static int caam_aead_init(struct crypto_aead *tfm)
2212 @@ -3255,14 +3534,15 @@ static int caam_aead_init(struct crypto_
2213 container_of(alg, struct caam_aead_alg, aead);
2214 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2215
2216 - return caam_init_common(ctx, &caam_alg->caam);
2217 + return caam_init_common(ctx, &caam_alg->caam,
2218 + alg->setkey == aead_setkey);
2219 }
2220
2221 static void caam_exit_common(struct caam_ctx *ctx)
2222 {
2223 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
2224 offsetof(struct caam_ctx, sh_desc_enc_dma),
2225 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2226 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2227 caam_jr_free(ctx->jrdev);
2228 }
2229
2230 @@ -3276,7 +3556,7 @@ static void caam_aead_exit(struct crypto
2231 caam_exit_common(crypto_aead_ctx(tfm));
2232 }
2233
2234 -static void __exit caam_algapi_exit(void)
2235 +void caam_algapi_exit(void)
2236 {
2237
2238 struct caam_crypto_alg *t_alg, *n;
2239 @@ -3355,56 +3635,52 @@ static void caam_aead_alg_init(struct ca
2240 alg->exit = caam_aead_exit;
2241 }
2242
2243 -static int __init caam_algapi_init(void)
2244 +int caam_algapi_init(struct device *ctrldev)
2245 {
2246 - struct device_node *dev_node;
2247 - struct platform_device *pdev;
2248 - struct device *ctrldev;
2249 - struct caam_drv_private *priv;
2250 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
2251 int i = 0, err = 0;
2252 - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2253 + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
2254 unsigned int md_limit = SHA512_DIGEST_SIZE;
2255 bool registered = false;
2256
2257 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2258 - if (!dev_node) {
2259 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2260 - if (!dev_node)
2261 - return -ENODEV;
2262 - }
2263 -
2264 - pdev = of_find_device_by_node(dev_node);
2265 - if (!pdev) {
2266 - of_node_put(dev_node);
2267 - return -ENODEV;
2268 - }
2269 -
2270 - ctrldev = &pdev->dev;
2271 - priv = dev_get_drvdata(ctrldev);
2272 - of_node_put(dev_node);
2273 -
2274 - /*
2275 - * If priv is NULL, it's probably because the caam driver wasn't
2276 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2277 - */
2278 - if (!priv)
2279 - return -ENODEV;
2280 -
2281 -
2282 INIT_LIST_HEAD(&alg_list);
2283
2284 /*
2285 * Register crypto algorithms the device supports.
2286 * First, detect presence and attributes of DES, AES, and MD blocks.
2287 */
2288 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2289 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2290 - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2291 - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2292 - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2293 + if (priv->era < 10) {
2294 + u32 cha_vid, cha_inst;
2295 +
2296 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2297 + aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2298 + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2299 +
2300 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2301 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2302 + CHA_ID_LS_DES_SHIFT;
2303 + aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2304 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2305 + ccha_inst = 0;
2306 + ptha_inst = 0;
2307 + } else {
2308 + u32 aesa, mdha;
2309 +
2310 + aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2311 + mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2312 +
2313 + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2314 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2315 +
2316 + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2317 + aes_inst = aesa & CHA_VER_NUM_MASK;
2318 + md_inst = mdha & CHA_VER_NUM_MASK;
2319 + ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
2320 + ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
2321 + }
2322
2323 /* If MD is present, limit digest size based on LP256 */
2324 - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2325 + if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
2326 md_limit = SHA256_DIGEST_SIZE;
2327
2328 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2329 @@ -3426,10 +3702,10 @@ static int __init caam_algapi_init(void)
2330 * Check support for AES modes not available
2331 * on LP devices.
2332 */
2333 - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2334 - if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2335 - OP_ALG_AAI_XTS)
2336 - continue;
2337 + if (aes_vid == CHA_VER_VID_AES_LP &&
2338 + (alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2339 + OP_ALG_AAI_XTS)
2340 + continue;
2341
2342 t_alg = caam_alg_alloc(alg);
2343 if (IS_ERR(t_alg)) {
2344 @@ -3468,21 +3744,28 @@ static int __init caam_algapi_init(void)
2345 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2346 continue;
2347
2348 + /* Skip CHACHA20 algorithms if not supported by device */
2349 + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
2350 + continue;
2351 +
2352 + /* Skip POLY1305 algorithms if not supported by device */
2353 + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
2354 + continue;
2355 +
2356 /*
2357 * Check support for AES algorithms not available
2358 * on LP devices.
2359 */
2360 - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2361 - if (alg_aai == OP_ALG_AAI_GCM)
2362 - continue;
2363 + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
2364 + continue;
2365
2366 /*
2367 * Skip algorithms requiring message digests
2368 * if MD or MD size is not supported by device.
2369 */
2370 - if (c2_alg_sel &&
2371 - (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2372 - continue;
2373 + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
2374 + (!md_inst || t_alg->aead.maxauthsize > md_limit))
2375 + continue;
2376
2377 caam_aead_alg_init(t_alg);
2378
2379 @@ -3502,10 +3785,3 @@ static int __init caam_algapi_init(void)
2380
2381 return err;
2382 }
2383 -
2384 -module_init(caam_algapi_init);
2385 -module_exit(caam_algapi_exit);
2386 -
2387 -MODULE_LICENSE("GPL");
2388 -MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2389 -MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2390 --- a/drivers/crypto/caam/caamalg_desc.c
2391 +++ b/drivers/crypto/caam/caamalg_desc.c
2392 @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
2393 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
2394 * (non-protocol) with no (null) encryption.
2395 * @desc: pointer to buffer used for descriptor construction
2396 - * @adata: pointer to authentication transform definitions. Note that since a
2397 - * split key is to be used, the size of the split key itself is
2398 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2399 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2400 + * @adata: pointer to authentication transform definitions.
2401 + * A split key is required for SEC Era < 6; the size of the split key
2402 + * is specified in this case. Valid algorithm values - one of
2403 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2404 + * with OP_ALG_AAI_HMAC_PRECOMP.
2405 * @icvsize: integrity check value (ICV) size (truncated or full)
2406 - *
2407 - * Note: Requires an MDHA split key.
2408 + * @era: SEC Era
2409 */
2410 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
2411 - unsigned int icvsize)
2412 + unsigned int icvsize, int era)
2413 {
2414 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2415
2416 @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
2417 /* Skip if already shared */
2418 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2419 JUMP_COND_SHRD);
2420 - if (adata->key_inline)
2421 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2422 - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
2423 - KEY_ENC);
2424 - else
2425 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2426 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2427 + if (era < 6) {
2428 + if (adata->key_inline)
2429 + append_key_as_imm(desc, adata->key_virt,
2430 + adata->keylen_pad, adata->keylen,
2431 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2432 + KEY_ENC);
2433 + else
2434 + append_key(desc, adata->key_dma, adata->keylen,
2435 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2436 + } else {
2437 + append_proto_dkp(desc, adata);
2438 + }
2439 set_jump_tgt_here(desc, key_jump_cmd);
2440
2441 /* assoclen + cryptlen = seqinlen */
2442 @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
2443 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
2444 * (non-protocol) with no (null) decryption.
2445 * @desc: pointer to buffer used for descriptor construction
2446 - * @adata: pointer to authentication transform definitions. Note that since a
2447 - * split key is to be used, the size of the split key itself is
2448 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2449 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2450 + * @adata: pointer to authentication transform definitions.
2451 + * A split key is required for SEC Era < 6; the size of the split key
2452 + * is specified in this case. Valid algorithm values - one of
2453 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2454 + * with OP_ALG_AAI_HMAC_PRECOMP.
2455 * @icvsize: integrity check value (ICV) size (truncated or full)
2456 - *
2457 - * Note: Requires an MDHA split key.
2458 + * @era: SEC Era
2459 */
2460 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
2461 - unsigned int icvsize)
2462 + unsigned int icvsize, int era)
2463 {
2464 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
2465
2466 @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
2467 /* Skip if already shared */
2468 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2469 JUMP_COND_SHRD);
2470 - if (adata->key_inline)
2471 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2472 - adata->keylen, CLASS_2 |
2473 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2474 - else
2475 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2476 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2477 + if (era < 6) {
2478 + if (adata->key_inline)
2479 + append_key_as_imm(desc, adata->key_virt,
2480 + adata->keylen_pad, adata->keylen,
2481 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2482 + KEY_ENC);
2483 + else
2484 + append_key(desc, adata->key_dma, adata->keylen,
2485 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2486 + } else {
2487 + append_proto_dkp(desc, adata);
2488 + }
2489 set_jump_tgt_here(desc, key_jump_cmd);
2490
2491 /* Class 2 operation */
2492 @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
2493 static void init_sh_desc_key_aead(u32 * const desc,
2494 struct alginfo * const cdata,
2495 struct alginfo * const adata,
2496 - const bool is_rfc3686, u32 *nonce)
2497 + const bool is_rfc3686, u32 *nonce, int era)
2498 {
2499 u32 *key_jump_cmd;
2500 unsigned int enckeylen = cdata->keylen;
2501 @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
2502 if (is_rfc3686)
2503 enckeylen -= CTR_RFC3686_NONCE_SIZE;
2504
2505 - if (adata->key_inline)
2506 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2507 - adata->keylen, CLASS_2 |
2508 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2509 - else
2510 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2511 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2512 + if (era < 6) {
2513 + if (adata->key_inline)
2514 + append_key_as_imm(desc, adata->key_virt,
2515 + adata->keylen_pad, adata->keylen,
2516 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2517 + KEY_ENC);
2518 + else
2519 + append_key(desc, adata->key_dma, adata->keylen,
2520 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2521 + } else {
2522 + append_proto_dkp(desc, adata);
2523 + }
2524
2525 if (cdata->key_inline)
2526 append_key_as_imm(desc, cdata->key_virt, enckeylen,
2527 @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
2528 * @cdata: pointer to block cipher transform definitions
2529 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2530 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2531 - * @adata: pointer to authentication transform definitions. Note that since a
2532 - * split key is to be used, the size of the split key itself is
2533 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2534 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2535 + * @adata: pointer to authentication transform definitions.
2536 + * A split key is required for SEC Era < 6; the size of the split key
2537 + * is specified in this case. Valid algorithm values - one of
2538 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2539 + * with OP_ALG_AAI_HMAC_PRECOMP.
2540 * @ivsize: initialization vector size
2541 * @icvsize: integrity check value (ICV) size (truncated or full)
2542 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2543 * @nonce: pointer to rfc3686 nonce
2544 * @ctx1_iv_off: IV offset in CONTEXT1 register
2545 * @is_qi: true when called from caam/qi
2546 - *
2547 - * Note: Requires an MDHA split key.
2548 + * @era: SEC Era
2549 */
2550 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2551 struct alginfo *adata, unsigned int ivsize,
2552 unsigned int icvsize, const bool is_rfc3686,
2553 - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
2554 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
2555 + int era)
2556 {
2557 /* Note: Context registers are saved. */
2558 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2559 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2560
2561 /* Class 2 operation */
2562 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2563 @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
2564 }
2565
2566 /* Read and write assoclen bytes */
2567 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2568 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2569 + if (is_qi || era < 3) {
2570 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2571 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2572 + } else {
2573 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2574 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2575 + }
2576
2577 /* Skip assoc data */
2578 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2579 @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
2580 * @cdata: pointer to block cipher transform definitions
2581 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2582 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2583 - * @adata: pointer to authentication transform definitions. Note that since a
2584 - * split key is to be used, the size of the split key itself is
2585 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2586 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2587 + * @adata: pointer to authentication transform definitions.
2588 + * A split key is required for SEC Era < 6; the size of the split key
2589 + * is specified in this case. Valid algorithm values - one of
2590 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2591 + * with OP_ALG_AAI_HMAC_PRECOMP.
2592 * @ivsize: initialization vector size
2593 * @icvsize: integrity check value (ICV) size (truncated or full)
2594 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2595 * @nonce: pointer to rfc3686 nonce
2596 * @ctx1_iv_off: IV offset in CONTEXT1 register
2597 * @is_qi: true when called from caam/qi
2598 - *
2599 - * Note: Requires an MDHA split key.
2600 + * @era: SEC Era
2601 */
2602 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2603 struct alginfo *adata, unsigned int ivsize,
2604 unsigned int icvsize, const bool geniv,
2605 const bool is_rfc3686, u32 *nonce,
2606 - const u32 ctx1_iv_off, const bool is_qi)
2607 + const u32 ctx1_iv_off, const bool is_qi, int era)
2608 {
2609 /* Note: Context registers are saved. */
2610 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2611 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2612
2613 /* Class 2 operation */
2614 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2615 @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
2616 }
2617
2618 /* Read and write assoclen bytes */
2619 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2620 - if (geniv)
2621 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
2622 - else
2623 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2624 + if (is_qi || era < 3) {
2625 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2626 + if (geniv)
2627 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
2628 + ivsize);
2629 + else
2630 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
2631 + CAAM_CMD_SZ);
2632 + } else {
2633 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2634 + if (geniv)
2635 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
2636 + ivsize);
2637 + else
2638 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
2639 + CAAM_CMD_SZ);
2640 + }
2641
2642 /* Skip assoc data */
2643 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2644 @@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
2645 * @cdata: pointer to block cipher transform definitions
2646 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2647 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2648 - * @adata: pointer to authentication transform definitions. Note that since a
2649 - * split key is to be used, the size of the split key itself is
2650 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2651 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2652 + * @adata: pointer to authentication transform definitions.
2653 + * A split key is required for SEC Era < 6; the size of the split key
2654 + * is specified in this case. Valid algorithm values - one of
2655 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2656 + * with OP_ALG_AAI_HMAC_PRECOMP.
2657 * @ivsize: initialization vector size
2658 * @icvsize: integrity check value (ICV) size (truncated or full)
2659 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2660 * @nonce: pointer to rfc3686 nonce
2661 * @ctx1_iv_off: IV offset in CONTEXT1 register
2662 * @is_qi: true when called from caam/qi
2663 - *
2664 - * Note: Requires an MDHA split key.
2665 + * @era: SEC Era
2666 */
2667 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2668 struct alginfo *adata, unsigned int ivsize,
2669 unsigned int icvsize, const bool is_rfc3686,
2670 u32 *nonce, const u32 ctx1_iv_off,
2671 - const bool is_qi)
2672 + const bool is_qi, int era)
2673 {
2674 u32 geniv, moveiv;
2675
2676 /* Note: Context registers are saved. */
2677 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2678 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2679
2680 if (is_qi) {
2681 u32 *wait_load_cmd;
2682 @@ -528,8 +561,13 @@ copy_iv:
2683 OP_ALG_ENCRYPT);
2684
2685 /* Read and write assoclen bytes */
2686 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2687 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2688 + if (is_qi || era < 3) {
2689 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2690 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2691 + } else {
2692 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2693 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2694 + }
2695
2696 /* Skip assoc data */
2697 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2698 @@ -583,14 +621,431 @@ copy_iv:
2699 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
2700
2701 /**
2702 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
2703 + * @desc: pointer to buffer used for descriptor construction
2704 + * @cdata: pointer to block cipher transform definitions
2705 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2706 + * with OP_ALG_AAI_CBC
2707 + * @adata: pointer to authentication transform definitions.
2708 + * A split key is required for SEC Era < 6; the size of the split key
2709 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2710 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2711 + * @assoclen: associated data length
2712 + * @ivsize: initialization vector size
2713 + * @authsize: authentication data size
2714 + * @blocksize: block cipher size
2715 + * @era: SEC Era
2716 + */
2717 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
2718 + struct alginfo *adata, unsigned int assoclen,
2719 + unsigned int ivsize, unsigned int authsize,
2720 + unsigned int blocksize, int era)
2721 +{
2722 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
2723 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
2724 +
2725 + /*
2726 + * Compute the index (in bytes) for the LOAD with destination of
2727 + * Class 1 Data Size Register and for the LOAD that generates padding
2728 + */
2729 + if (adata->key_inline) {
2730 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2731 + cdata->keylen - 4 * CAAM_CMD_SZ;
2732 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2733 + cdata->keylen - 2 * CAAM_CMD_SZ;
2734 + } else {
2735 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2736 + 4 * CAAM_CMD_SZ;
2737 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2738 + 2 * CAAM_CMD_SZ;
2739 + }
2740 +
2741 + stidx = 1 << HDR_START_IDX_SHIFT;
2742 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2743 +
2744 + /* skip key loading if they are loaded due to sharing */
2745 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2746 + JUMP_COND_SHRD);
2747 +
2748 + if (era < 6) {
2749 + if (adata->key_inline)
2750 + append_key_as_imm(desc, adata->key_virt,
2751 + adata->keylen_pad, adata->keylen,
2752 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2753 + KEY_ENC);
2754 + else
2755 + append_key(desc, adata->key_dma, adata->keylen,
2756 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2757 + } else {
2758 + append_proto_dkp(desc, adata);
2759 + }
2760 +
2761 + if (cdata->key_inline)
2762 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
2763 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
2764 + else
2765 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2766 + KEY_DEST_CLASS_REG);
2767 +
2768 + set_jump_tgt_here(desc, key_jump_cmd);
2769 +
2770 + /* class 2 operation */
2771 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2772 + OP_ALG_ENCRYPT);
2773 + /* class 1 operation */
2774 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2775 + OP_ALG_ENCRYPT);
2776 +
2777 + /* payloadlen = input data length - (assoclen + ivlen) */
2778 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
2779 +
2780 + /* math1 = payloadlen + icvlen */
2781 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
2782 +
2783 + /* padlen = block_size - math1 % block_size */
2784 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
2785 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
2786 +
2787 + /* cryptlen = payloadlen + icvlen + padlen */
2788 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
2789 +
2790 + /*
2791 + * update immediate data with the padding length value
2792 + * for the LOAD in the class 1 data size register.
2793 + */
2794 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2795 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
2796 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2797 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
2798 +
2799 + /* overwrite PL field for the padding iNFO FIFO entry */
2800 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2801 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
2802 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2803 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
2804 +
2805 + /* store encrypted payload, icv and padding */
2806 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2807 +
2808 + /* if payload length is zero, jump to zero-payload commands */
2809 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
2810 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2811 + JUMP_COND_MATH_Z);
2812 +
2813 + /* load iv in context1 */
2814 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2815 + LDST_CLASS_1_CCB | ivsize);
2816 +
2817 + /* read assoc for authentication */
2818 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2819 + FIFOLD_TYPE_MSG);
2820 + /* insnoop payload */
2821 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
2822 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
2823 +
2824 + /* jump the zero-payload commands */
2825 + append_jump(desc, JUMP_TEST_ALL | 3);
2826 +
2827 + /* zero-payload commands */
2828 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
2829 +
2830 + /* load iv in context1 */
2831 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2832 + LDST_CLASS_1_CCB | ivsize);
2833 +
2834 + /* assoc data is the only data for authentication */
2835 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2836 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
2837 +
2838 + /* send icv to encryption */
2839 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
2840 + authsize);
2841 +
2842 + /* update class 1 data size register with padding length */
2843 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
2844 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
2845 +
2846 + /* generate padding and send it to encryption */
2847 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
2848 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
2849 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
2850 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2851 +
2852 +#ifdef DEBUG
2853 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
2854 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
2855 + desc_bytes(desc), 1);
2856 +#endif
2857 +}
2858 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
2859 +
2860 +/**
2861 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
2862 + * @desc: pointer to buffer used for descriptor construction
2863 + * @cdata: pointer to block cipher transform definitions
2864 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2865 + * with OP_ALG_AAI_CBC
2866 + * @adata: pointer to authentication transform definitions.
2867 + * A split key is required for SEC Era < 6; the size of the split key
2868 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2869 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2870 + * @assoclen: associated data length
2871 + * @ivsize: initialization vector size
2872 + * @authsize: authentication data size
2873 + * @blocksize: block cipher size
2874 + * @era: SEC Era
2875 + */
2876 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
2877 + struct alginfo *adata, unsigned int assoclen,
2878 + unsigned int ivsize, unsigned int authsize,
2879 + unsigned int blocksize, int era)
2880 +{
2881 + u32 stidx, jumpback;
2882 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
2883 + /*
2884 + * Pointer Size bool determines the size of address pointers.
2885 + * false - Pointers fit in one 32-bit word.
2886 + * true - Pointers fit in two 32-bit words.
2887 + */
2888 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
2889 +
2890 + stidx = 1 << HDR_START_IDX_SHIFT;
2891 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2892 +
2893 + /* skip key loading if they are loaded due to sharing */
2894 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2895 + JUMP_COND_SHRD);
2896 +
2897 + if (era < 6)
2898 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2899 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
2900 + else
2901 + append_proto_dkp(desc, adata);
2902 +
2903 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2904 + KEY_DEST_CLASS_REG);
2905 +
2906 + set_jump_tgt_here(desc, key_jump_cmd);
2907 +
2908 + /* class 2 operation */
2909 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2910 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2911 + /* class 1 operation */
2912 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2913 + OP_ALG_DECRYPT);
2914 +
2915 + /* VSIL = input data length - 2 * block_size */
2916 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
2917 + blocksize);
2918 +
2919 + /*
2920 + * payloadlen + icvlen + padlen = input data length - (assoclen +
2921 + * ivsize)
2922 + */
2923 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
2924 +
2925 + /* skip data to the last but one cipher block */
2926 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
2927 +
2928 + /* load iv for the last cipher block */
2929 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2930 + LDST_CLASS_1_CCB | ivsize);
2931 +
2932 + /* read last cipher block */
2933 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2934 + FIFOLD_TYPE_LAST1 | blocksize);
2935 +
2936 + /* move decrypted block into math0 and math1 */
2937 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
2938 + blocksize);
2939 +
2940 + /* reset AES CHA */
2941 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
2942 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
2943 +
2944 + /* rewind input sequence */
2945 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
2946 +
2947 + /* key1 is in decryption form */
2948 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
2949 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
2950 +
2951 + /* load iv in context1 */
2952 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
2953 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
2954 +
2955 + /* read sequence number */
2956 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
2957 + /* load Type, Version and Len fields in math0 */
2958 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
2959 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
2960 +
2961 + /* compute (padlen - 1) */
2962 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
2963 +
2964 + /* math2 = icvlen + (padlen - 1) + 1 */
2965 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
2966 +
2967 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2968 +
2969 + /* VSOL = payloadlen + icvlen + padlen */
2970 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
2971 +
2972 + if (caam_little_end)
2973 + append_moveb(desc, MOVE_WAITCOMP |
2974 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
2975 +
2976 + /* update Len field */
2977 + append_math_sub(desc, REG0, REG0, REG2, 8);
2978 +
2979 + /* store decrypted payload, icv and padding */
2980 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2981 +
2982 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
2983 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2984 +
2985 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2986 + JUMP_COND_MATH_Z);
2987 +
2988 + /* send Type, Version and Len(pre ICV) fields to authentication */
2989 + append_move(desc, MOVE_WAITCOMP |
2990 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2991 + (3 << MOVE_OFFSET_SHIFT) | 5);
2992 +
2993 + /* outsnooping payload */
2994 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
2995 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
2996 + FIFOLDST_VLF);
2997 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
2998 +
2999 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
3000 + /* send Type, Version and Len(pre ICV) fields to authentication */
3001 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
3002 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
3003 + (3 << MOVE_OFFSET_SHIFT) | 5);
3004 +
3005 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
3006 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
3007 +
3008 + /* load icvlen and padlen */
3009 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
3010 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
3011 +
3012 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
3013 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
3014 +
3015 + /*
3016 + * Start a new input sequence using the SEQ OUT PTR command options,
3017 + * pointer and length used when the current output sequence was defined.
3018 + */
3019 + if (ps) {
3020 + /*
3021 + * Move the lower 32 bits of Shared Descriptor address, the
3022 + * SEQ OUT PTR command, Output Pointer (2 words) and
3023 + * Output Length into math registers.
3024 + */
3025 + if (caam_little_end)
3026 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3027 + MOVE_DEST_MATH0 |
3028 + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
3029 + else
3030 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3031 + MOVE_DEST_MATH0 |
3032 + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
3033 +
3034 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3035 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
3036 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
3037 + /* Append a JUMP command after the copied fields */
3038 + jumpback = CMD_JUMP | (char)-9;
3039 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3040 + LDST_SRCDST_WORD_DECO_MATH2 |
3041 + (4 << LDST_OFFSET_SHIFT));
3042 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3043 + /* Move the updated fields back to the Job Descriptor */
3044 + if (caam_little_end)
3045 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3046 + MOVE_DEST_DESCBUF |
3047 + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
3048 + else
3049 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3050 + MOVE_DEST_DESCBUF |
3051 + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
3052 +
3053 + /*
3054 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
3055 + * and then jump back to the next command from the
3056 + * Shared Descriptor.
3057 + */
3058 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
3059 + } else {
3060 + /*
3061 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
3062 + * Output Length into math registers.
3063 + */
3064 + if (caam_little_end)
3065 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3066 + MOVE_DEST_MATH0 |
3067 + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
3068 + else
3069 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3070 + MOVE_DEST_MATH0 |
3071 + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
3072 +
3073 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3074 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
3075 + ~(((u64)(CMD_SEQ_IN_PTR ^
3076 + CMD_SEQ_OUT_PTR)) << 32));
3077 + /* Append a JUMP command after the copied fields */
3078 + jumpback = CMD_JUMP | (char)-7;
3079 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3080 + LDST_SRCDST_WORD_DECO_MATH1 |
3081 + (4 << LDST_OFFSET_SHIFT));
3082 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3083 + /* Move the updated fields back to the Job Descriptor */
3084 + if (caam_little_end)
3085 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3086 + MOVE_DEST_DESCBUF |
3087 + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
3088 + else
3089 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3090 + MOVE_DEST_DESCBUF |
3091 + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
3092 +
3093 + /*
3094 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
3095 + * and then jump back to the next command from the
3096 + * Shared Descriptor.
3097 + */
3098 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
3099 + }
3100 +
3101 + /* skip payload */
3102 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
3103 + /* check icv */
3104 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
3105 + FIFOLD_TYPE_LAST2 | authsize);
3106 +
3107 +#ifdef DEBUG
3108 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
3109 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
3110 + desc_bytes(desc), 1);
3111 +#endif
3112 +}
3113 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
3114 +
3115 +/**
3116 * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
3117 * @desc: pointer to buffer used for descriptor construction
3118 * @cdata: pointer to block cipher transform definitions
3119 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3120 + * @ivsize: initialization vector size
3121 * @icvsize: integrity check value (ICV) size (truncated or full)
3122 + * @is_qi: true when called from caam/qi
3123 */
3124 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3125 - unsigned int icvsize)
3126 + unsigned int ivsize, unsigned int icvsize,
3127 + const bool is_qi)
3128 {
3129 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
3130 *zero_assoc_jump_cmd2;
3131 @@ -612,11 +1067,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3132 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3133 OP_ALG_ENCRYPT);
3134
3135 + if (is_qi) {
3136 + u32 *wait_load_cmd;
3137 +
3138 + /* REG3 = assoclen */
3139 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3140 + LDST_SRCDST_WORD_DECO_MATH3 |
3141 + (4 << LDST_OFFSET_SHIFT));
3142 +
3143 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3144 + JUMP_COND_CALM | JUMP_COND_NCP |
3145 + JUMP_COND_NOP | JUMP_COND_NIP |
3146 + JUMP_COND_NIFP);
3147 + set_jump_tgt_here(desc, wait_load_cmd);
3148 +
3149 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
3150 + ivsize);
3151 + } else {
3152 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3153 + CAAM_CMD_SZ);
3154 + }
3155 +
3156 /* if assoclen + cryptlen is ZERO, skip to ICV write */
3157 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3158 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
3159 JUMP_COND_MATH_Z);
3160
3161 + if (is_qi)
3162 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3163 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3164 +
3165 /* if assoclen is ZERO, skip reading the assoc data */
3166 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3167 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3168 @@ -648,8 +1127,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3169 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3170 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
3171
3172 - /* jump the zero-payload commands */
3173 - append_jump(desc, JUMP_TEST_ALL | 2);
3174 + /* jump to ICV writing */
3175 + if (is_qi)
3176 + append_jump(desc, JUMP_TEST_ALL | 4);
3177 + else
3178 + append_jump(desc, JUMP_TEST_ALL | 2);
3179
3180 /* zero-payload commands */
3181 set_jump_tgt_here(desc, zero_payload_jump_cmd);
3182 @@ -657,10 +1139,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3183 /* read assoc data */
3184 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3185 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
3186 + if (is_qi)
3187 + /* jump to ICV writing */
3188 + append_jump(desc, JUMP_TEST_ALL | 2);
3189
3190 /* There is no input data */
3191 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
3192
3193 + if (is_qi)
3194 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3195 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
3196 + FIFOLD_TYPE_LAST1);
3197 +
3198 /* write ICV */
3199 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
3200 LDST_SRCDST_BYTE_CONTEXT);
3201 @@ -677,10 +1167,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
3202 * @desc: pointer to buffer used for descriptor construction
3203 * @cdata: pointer to block cipher transform definitions
3204 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3205 + * @ivsize: initialization vector size
3206 * @icvsize: integrity check value (ICV) size (truncated or full)
3207 + * @is_qi: true when called from caam/qi
3208 */
3209 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3210 - unsigned int icvsize)
3211 + unsigned int ivsize, unsigned int icvsize,
3212 + const bool is_qi)
3213 {
3214 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
3215
3216 @@ -701,6 +1194,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
3217 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3218 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3219
3220 + if (is_qi) {
3221 + u32 *wait_load_cmd;
3222 +
3223 + /* REG3 = assoclen */
3224 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3225 + LDST_SRCDST_WORD_DECO_MATH3 |
3226 + (4 << LDST_OFFSET_SHIFT));
3227 +
3228 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3229 + JUMP_COND_CALM | JUMP_COND_NCP |
3230 + JUMP_COND_NOP | JUMP_COND_NIP |
3231 + JUMP_COND_NIFP);
3232 + set_jump_tgt_here(desc, wait_load_cmd);
3233 +
3234 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3235 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3236 + }
3237 +
3238 /* if assoclen is ZERO, skip reading the assoc data */
3239 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3240 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3241 @@ -753,10 +1264,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
3242 * @desc: pointer to buffer used for descriptor construction
3243 * @cdata: pointer to block cipher transform definitions
3244 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3245 + * @ivsize: initialization vector size
3246 * @icvsize: integrity check value (ICV) size (truncated or full)
3247 + * @is_qi: true when called from caam/qi
3248 */
3249 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3250 - unsigned int icvsize)
3251 + unsigned int ivsize, unsigned int icvsize,
3252 + const bool is_qi)
3253 {
3254 u32 *key_jump_cmd;
3255
3256 @@ -777,7 +1291,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3257 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3258 OP_ALG_ENCRYPT);
3259
3260 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3261 + if (is_qi) {
3262 + u32 *wait_load_cmd;
3263 +
3264 + /* REG3 = assoclen */
3265 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3266 + LDST_SRCDST_WORD_DECO_MATH3 |
3267 + (4 << LDST_OFFSET_SHIFT));
3268 +
3269 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3270 + JUMP_COND_CALM | JUMP_COND_NCP |
3271 + JUMP_COND_NOP | JUMP_COND_NIP |
3272 + JUMP_COND_NIFP);
3273 + set_jump_tgt_here(desc, wait_load_cmd);
3274 +
3275 + /* Read salt and IV */
3276 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3277 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3278 + FIFOLD_TYPE_IV);
3279 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3280 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3281 + }
3282 +
3283 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3284 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3285
3286 /* Read assoc data */
3287 @@ -785,7 +1321,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3288 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3289
3290 /* Skip IV */
3291 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3292 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3293
3294 /* Will read cryptlen bytes */
3295 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3296 @@ -824,10 +1360,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
3297 * @desc: pointer to buffer used for descriptor construction
3298 * @cdata: pointer to block cipher transform definitions
3299 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3300 + * @ivsize: initialization vector size
3301 * @icvsize: integrity check value (ICV) size (truncated or full)
3302 + * @is_qi: true when called from caam/qi
3303 */
3304 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3305 - unsigned int icvsize)
3306 + unsigned int ivsize, unsigned int icvsize,
3307 + const bool is_qi)
3308 {
3309 u32 *key_jump_cmd;
3310
3311 @@ -849,7 +1388,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3312 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3313 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3314
3315 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3316 + if (is_qi) {
3317 + u32 *wait_load_cmd;
3318 +
3319 + /* REG3 = assoclen */
3320 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3321 + LDST_SRCDST_WORD_DECO_MATH3 |
3322 + (4 << LDST_OFFSET_SHIFT));
3323 +
3324 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3325 + JUMP_COND_CALM | JUMP_COND_NCP |
3326 + JUMP_COND_NOP | JUMP_COND_NIP |
3327 + JUMP_COND_NIFP);
3328 + set_jump_tgt_here(desc, wait_load_cmd);
3329 +
3330 + /* Read salt and IV */
3331 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3332 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3333 + FIFOLD_TYPE_IV);
3334 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3335 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3336 + }
3337 +
3338 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3339 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3340
3341 /* Read assoc data */
3342 @@ -857,7 +1418,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3343 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3344
3345 /* Skip IV */
3346 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3347 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3348
3349 /* Will read cryptlen bytes */
3350 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
3351 @@ -896,10 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
3352 * @desc: pointer to buffer used for descriptor construction
3353 * @cdata: pointer to block cipher transform definitions
3354 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3355 + * @ivsize: initialization vector size
3356 * @icvsize: integrity check value (ICV) size (truncated or full)
3357 + * @is_qi: true when called from caam/qi
3358 */
3359 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3360 - unsigned int icvsize)
3361 + unsigned int ivsize, unsigned int icvsize,
3362 + const bool is_qi)
3363 {
3364 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3365
3366 @@ -920,6 +1484,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3367 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3368 OP_ALG_ENCRYPT);
3369
3370 + if (is_qi) {
3371 + /* assoclen is not needed, skip it */
3372 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3373 +
3374 + /* Read salt and IV */
3375 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3376 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3377 + FIFOLD_TYPE_IV);
3378 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3379 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3380 + }
3381 +
3382 /* assoclen + cryptlen = seqinlen */
3383 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
3384
3385 @@ -931,7 +1507,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3386 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3387 (0x6 << MOVE_LEN_SHIFT));
3388 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3389 - (0x8 << MOVE_LEN_SHIFT));
3390 + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3391
3392 /* Will read assoclen + cryptlen bytes */
3393 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3394 @@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
3395 * @desc: pointer to buffer used for descriptor construction
3396 * @cdata: pointer to block cipher transform definitions
3397 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3398 + * @ivsize: initialization vector size
3399 * @icvsize: integrity check value (ICV) size (truncated or full)
3400 + * @is_qi: true when called from caam/qi
3401 */
3402 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3403 - unsigned int icvsize)
3404 + unsigned int ivsize, unsigned int icvsize,
3405 + const bool is_qi)
3406 {
3407 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3408
3409 @@ -990,6 +1569,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3410 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3411 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3412
3413 + if (is_qi) {
3414 + /* assoclen is not needed, skip it */
3415 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3416 +
3417 + /* Read salt and IV */
3418 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3419 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3420 + FIFOLD_TYPE_IV);
3421 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3422 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3423 + }
3424 +
3425 /* assoclen + cryptlen = seqoutlen */
3426 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3427
3428 @@ -1001,7 +1592,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3429 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3430 (0x6 << MOVE_LEN_SHIFT));
3431 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3432 - (0x8 << MOVE_LEN_SHIFT));
3433 + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3434
3435 /* Will read assoclen + cryptlen bytes */
3436 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3437 @@ -1035,6 +1626,138 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3438 }
3439 EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
3440
3441 +/**
3442 + * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
3443 + * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
3444 + * descriptor (non-protocol).
3445 + * @desc: pointer to buffer used for descriptor construction
3446 + * @cdata: pointer to block cipher transform definitions
3447 + * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
3448 + * OP_ALG_AAI_AEAD.
3449 + * @adata: pointer to authentication transform definitions
3450 + * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
3451 + * OP_ALG_AAI_AEAD.
3452 + * @ivsize: initialization vector size
3453 + * @icvsize: integrity check value (ICV) size (truncated or full)
3454 + * @encap: true if encapsulation, false if decapsulation
3455 + * @is_qi: true when called from caam/qi
3456 + */
3457 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3458 + struct alginfo *adata, unsigned int ivsize,
3459 + unsigned int icvsize, const bool encap,
3460 + const bool is_qi)
3461 +{
3462 + u32 *key_jump_cmd, *wait_cmd;
3463 + u32 nfifo;
3464 + const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
3465 +
3466 + /* Note: Context registers are saved. */
3467 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
3468 +
3469 + /* skip key loading if they are loaded due to sharing */
3470 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3471 + JUMP_COND_SHRD);
3472 +
3473 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
3474 + CLASS_1 | KEY_DEST_CLASS_REG);
3475 +
3476 + /* For IPsec load the salt from keymat in the context register */
3477 + if (is_ipsec)
3478 + append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
3479 + LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
3480 + 4 << LDST_OFFSET_SHIFT);
3481 +
3482 + set_jump_tgt_here(desc, key_jump_cmd);
3483 +
3484 + /* Class 2 and 1 operations: Poly & ChaCha */
3485 + if (encap) {
3486 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3487 + OP_ALG_ENCRYPT);
3488 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3489 + OP_ALG_ENCRYPT);
3490 + } else {
3491 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3492 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3493 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3494 + OP_ALG_DECRYPT);
3495 + }
3496 +
3497 + if (is_qi) {
3498 + u32 *wait_load_cmd;
3499 + u32 ctx1_iv_off = is_ipsec ? 8 : 4;
3500 +
3501 + /* REG3 = assoclen */
3502 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3503 + LDST_SRCDST_WORD_DECO_MATH3 |
3504 + 4 << LDST_OFFSET_SHIFT);
3505 +
3506 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3507 + JUMP_COND_CALM | JUMP_COND_NCP |
3508 + JUMP_COND_NOP | JUMP_COND_NIP |
3509 + JUMP_COND_NIFP);
3510 + set_jump_tgt_here(desc, wait_load_cmd);
3511 +
3512 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
3513 + LDST_SRCDST_BYTE_CONTEXT |
3514 + ctx1_iv_off << LDST_OFFSET_SHIFT);
3515 + }
3516 +
3517 + /*
3518 + * MAGIC with NFIFO
3519 + * Read associated data from the input and send them to class1 and
3520 + * class2 alignment blocks. From class1 send data to output fifo and
3521 + * then write it to memory since we don't need to encrypt AD.
3522 + */
3523 + nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
3524 + NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
3525 + append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
3526 + LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
3527 +
3528 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3529 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3530 + append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
3531 + FIFOLD_CLASS_CLASS1 | LDST_VLF);
3532 + append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
3533 + MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
3534 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
3535 +
3536 + /* IPsec - copy IV at the output */
3537 + if (is_ipsec)
3538 + append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
3539 + 0x2 << 25);
3540 +
3541 + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
3542 + JUMP_COND_NOP | JUMP_TEST_ALL);
3543 + set_jump_tgt_here(desc, wait_cmd);
3544 +
3545 + if (encap) {
3546 + /* Read and write cryptlen bytes */
3547 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3548 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3549 + CAAM_CMD_SZ);
3550 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
3551 +
3552 + /* Write ICV */
3553 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
3554 + LDST_SRCDST_BYTE_CONTEXT);
3555 + } else {
3556 + /* Read and write cryptlen bytes */
3557 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3558 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
3559 + CAAM_CMD_SZ);
3560 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
3561 +
3562 + /* Load ICV for verification */
3563 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
3564 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
3565 + }
3566 +
3567 + print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
3568 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3569 + 1);
3570 +}
3571 +EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
3572 +
3573 /*
3574 * For ablkcipher encrypt and decrypt, read from req->src and
3575 * write to req->dst
3576 @@ -1053,7 +1776,8 @@ static inline void ablkcipher_append_src
3577 * @desc: pointer to buffer used for descriptor construction
3578 * @cdata: pointer to block cipher transform definitions
3579 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3580 - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3581 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3582 + * - OP_ALG_ALGSEL_CHACHA20
3583 * @ivsize: initialization vector size
3584 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3585 * @ctx1_iv_off: IV offset in CONTEXT1 register
3586 @@ -1075,7 +1799,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
3587
3588 /* Load nonce into CONTEXT1 reg */
3589 if (is_rfc3686) {
3590 - u8 *nonce = cdata->key_virt + cdata->keylen;
3591 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3592
3593 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3594 LDST_CLASS_IND_CCB |
3595 @@ -1118,7 +1842,8 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_enc
3596 * @desc: pointer to buffer used for descriptor construction
3597 * @cdata: pointer to block cipher transform definitions
3598 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3599 - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3600 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3601 + * - OP_ALG_ALGSEL_CHACHA20
3602 * @ivsize: initialization vector size
3603 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3604 * @ctx1_iv_off: IV offset in CONTEXT1 register
3605 @@ -1140,7 +1865,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
3606
3607 /* Load nonce into CONTEXT1 reg */
3608 if (is_rfc3686) {
3609 - u8 *nonce = cdata->key_virt + cdata->keylen;
3610 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3611
3612 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3613 LDST_CLASS_IND_CCB |
3614 @@ -1209,7 +1934,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
3615
3616 /* Load Nonce into CONTEXT1 reg */
3617 if (is_rfc3686) {
3618 - u8 *nonce = cdata->key_virt + cdata->keylen;
3619 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3620
3621 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3622 LDST_CLASS_IND_CCB |
3623 --- a/drivers/crypto/caam/caamalg_desc.h
3624 +++ b/drivers/crypto/caam/caamalg_desc.h
3625 @@ -17,6 +17,9 @@
3626 #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
3627 #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
3628
3629 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
3630 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
3631 +
3632 /* Note: Nonce is counted in cdata.keylen */
3633 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
3634
3635 @@ -27,14 +30,20 @@
3636 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
3637 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
3638 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
3639 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
3640 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
3641
3642 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
3643 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3644 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3645 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
3646 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
3647
3648 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
3649 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
3650 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
3651 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
3652 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
3653
3654 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
3655 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
3656 @@ -43,46 +52,67 @@
3657 15 * CAAM_CMD_SZ)
3658
3659 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
3660 - unsigned int icvsize);
3661 + unsigned int icvsize, int era);
3662
3663 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
3664 - unsigned int icvsize);
3665 + unsigned int icvsize, int era);
3666
3667 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
3668 struct alginfo *adata, unsigned int ivsize,
3669 unsigned int icvsize, const bool is_rfc3686,
3670 u32 *nonce, const u32 ctx1_iv_off,
3671 - const bool is_qi);
3672 + const bool is_qi, int era);
3673
3674 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
3675 struct alginfo *adata, unsigned int ivsize,
3676 unsigned int icvsize, const bool geniv,
3677 const bool is_rfc3686, u32 *nonce,
3678 - const u32 ctx1_iv_off, const bool is_qi);
3679 + const u32 ctx1_iv_off, const bool is_qi, int era);
3680
3681 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
3682 struct alginfo *adata, unsigned int ivsize,
3683 unsigned int icvsize, const bool is_rfc3686,
3684 u32 *nonce, const u32 ctx1_iv_off,
3685 - const bool is_qi);
3686 + const bool is_qi, int era);
3687 +
3688 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
3689 + struct alginfo *adata, unsigned int assoclen,
3690 + unsigned int ivsize, unsigned int authsize,
3691 + unsigned int blocksize, int era);
3692 +
3693 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
3694 + struct alginfo *adata, unsigned int assoclen,
3695 + unsigned int ivsize, unsigned int authsize,
3696 + unsigned int blocksize, int era);
3697
3698 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3699 - unsigned int icvsize);
3700 + unsigned int ivsize, unsigned int icvsize,
3701 + const bool is_qi);
3702
3703 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3704 - unsigned int icvsize);
3705 + unsigned int ivsize, unsigned int icvsize,
3706 + const bool is_qi);
3707
3708 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3709 - unsigned int icvsize);
3710 + unsigned int ivsize, unsigned int icvsize,
3711 + const bool is_qi);
3712
3713 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3714 - unsigned int icvsize);
3715 + unsigned int ivsize, unsigned int icvsize,
3716 + const bool is_qi);
3717
3718 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3719 - unsigned int icvsize);
3720 + unsigned int ivsize, unsigned int icvsize,
3721 + const bool is_qi);
3722
3723 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3724 - unsigned int icvsize);
3725 + unsigned int ivsize, unsigned int icvsize,
3726 + const bool is_qi);
3727 +
3728 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3729 + struct alginfo *adata, unsigned int ivsize,
3730 + unsigned int icvsize, const bool encap,
3731 + const bool is_qi);
3732
3733 void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
3734 unsigned int ivsize, const bool is_rfc3686,
3735 --- a/drivers/crypto/caam/caamalg_qi.c
3736 +++ b/drivers/crypto/caam/caamalg_qi.c
3737 @@ -7,7 +7,7 @@
3738 */
3739
3740 #include "compat.h"
3741 -
3742 +#include "ctrl.h"
3743 #include "regs.h"
3744 #include "intern.h"
3745 #include "desc_constr.h"
3746 @@ -53,6 +53,7 @@ struct caam_ctx {
3747 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
3748 u8 key[CAAM_MAX_KEY_SIZE];
3749 dma_addr_t key_dma;
3750 + enum dma_data_direction dir;
3751 struct alginfo adata;
3752 struct alginfo cdata;
3753 unsigned int authsize;
3754 @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
3755 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
3756 OP_ALG_AAI_CTR_MOD128);
3757 const bool is_rfc3686 = alg->caam.rfc3686;
3758 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3759
3760 if (!ctx->cdata.keylen || !ctx->authsize)
3761 return 0;
3762 @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
3763
3764 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3765 ivsize, ctx->authsize, is_rfc3686, nonce,
3766 - ctx1_iv_off, true);
3767 + ctx1_iv_off, true, ctrlpriv->era);
3768
3769 skip_enc:
3770 /* aead_decrypt shared descriptor */
3771 @@ -149,7 +151,8 @@ skip_enc:
3772
3773 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3774 ivsize, ctx->authsize, alg->caam.geniv,
3775 - is_rfc3686, nonce, ctx1_iv_off, true);
3776 + is_rfc3686, nonce, ctx1_iv_off, true,
3777 + ctrlpriv->era);
3778
3779 if (!alg->caam.geniv)
3780 goto skip_givenc;
3781 @@ -176,7 +179,7 @@ skip_enc:
3782
3783 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3784 ivsize, ctx->authsize, is_rfc3686, nonce,
3785 - ctx1_iv_off, true);
3786 + ctx1_iv_off, true, ctrlpriv->era);
3787
3788 skip_givenc:
3789 return 0;
3790 @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
3791 {
3792 struct caam_ctx *ctx = crypto_aead_ctx(aead);
3793 struct device *jrdev = ctx->jrdev;
3794 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3795 struct crypto_authenc_keys keys;
3796 int ret = 0;
3797
3798 @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
3799 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3800 #endif
3801
3802 + /*
3803 + * If DKP is supported, use it in the shared descriptor to generate
3804 + * the split key.
3805 + */
3806 + if (ctrlpriv->era >= 6) {
3807 + ctx->adata.keylen = keys.authkeylen;
3808 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3809 + OP_ALG_ALGSEL_MASK);
3810 +
3811 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3812 + goto badkey;
3813 +
3814 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3815 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3816 + keys.enckeylen);
3817 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
3818 + ctx->adata.keylen_pad +
3819 + keys.enckeylen, ctx->dir);
3820 + goto skip_split_key;
3821 + }
3822 +
3823 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3824 keys.authkeylen, CAAM_MAX_KEY_SIZE -
3825 keys.enckeylen);
3826 @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
3827 /* postpend encryption key to auth split key */
3828 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3829 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3830 - keys.enckeylen, DMA_TO_DEVICE);
3831 + keys.enckeylen, ctx->dir);
3832 #ifdef DEBUG
3833 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3834 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3835 ctx->adata.keylen_pad + keys.enckeylen, 1);
3836 #endif
3837
3838 +skip_split_key:
3839 ctx->cdata.keylen = keys.enckeylen;
3840
3841 ret = aead_set_sh_desc(aead);
3842 @@ -258,55 +284,139 @@ badkey:
3843 return -EINVAL;
3844 }
3845
3846 -static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
3847 - const u8 *key, unsigned int keylen)
3848 +static int tls_set_sh_desc(struct crypto_aead *tls)
3849 {
3850 - struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3851 - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
3852 - const char *alg_name = crypto_tfm_alg_name(tfm);
3853 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3854 + unsigned int ivsize = crypto_aead_ivsize(tls);
3855 + unsigned int blocksize = crypto_aead_blocksize(tls);
3856 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
3857 + unsigned int data_len[2];
3858 + u32 inl_mask;
3859 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3860 +
3861 + if (!ctx->cdata.keylen || !ctx->authsize)
3862 + return 0;
3863 +
3864 + /*
3865 + * TLS 1.0 encrypt shared descriptor
3866 + * Job Descriptor and Shared Descriptor
3867 + * must fit into the 64-word Descriptor h/w Buffer
3868 + */
3869 + data_len[0] = ctx->adata.keylen_pad;
3870 + data_len[1] = ctx->cdata.keylen;
3871 +
3872 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
3873 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
3874 + return -EINVAL;
3875 +
3876 + if (inl_mask & 1)
3877 + ctx->adata.key_virt = ctx->key;
3878 + else
3879 + ctx->adata.key_dma = ctx->key_dma;
3880 +
3881 + if (inl_mask & 2)
3882 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
3883 + else
3884 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3885 +
3886 + ctx->adata.key_inline = !!(inl_mask & 1);
3887 + ctx->cdata.key_inline = !!(inl_mask & 2);
3888 +
3889 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3890 + assoclen, ivsize, ctx->authsize, blocksize,
3891 + ctrlpriv->era);
3892 +
3893 + /*
3894 + * TLS 1.0 decrypt shared descriptor
3895 + * Keys do not fit inline, regardless of algorithms used
3896 + */
3897 + ctx->adata.key_inline = false;
3898 + ctx->adata.key_dma = ctx->key_dma;
3899 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3900 +
3901 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3902 + assoclen, ivsize, ctx->authsize, blocksize,
3903 + ctrlpriv->era);
3904 +
3905 + return 0;
3906 +}
3907 +
3908 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
3909 +{
3910 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3911 +
3912 + ctx->authsize = authsize;
3913 + tls_set_sh_desc(tls);
3914 +
3915 + return 0;
3916 +}
3917 +
3918 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
3919 + unsigned int keylen)
3920 +{
3921 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3922 struct device *jrdev = ctx->jrdev;
3923 - unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3924 - u32 ctx1_iv_off = 0;
3925 - const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
3926 - OP_ALG_AAI_CTR_MOD128);
3927 - const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
3928 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3929 + struct crypto_authenc_keys keys;
3930 int ret = 0;
3931
3932 - memcpy(ctx->key, key, keylen);
3933 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3934 + goto badkey;
3935 +
3936 #ifdef DEBUG
3937 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
3938 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
3939 + keys.authkeylen);
3940 print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3941 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3942 #endif
3943 - /*
3944 - * AES-CTR needs to load IV in CONTEXT1 reg
3945 - * at an offset of 128bits (16bytes)
3946 - * CONTEXT1[255:128] = IV
3947 - */
3948 - if (ctr_mode)
3949 - ctx1_iv_off = 16;
3950
3951 /*
3952 - * RFC3686 specific:
3953 - * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
3954 - * | *key = {KEY, NONCE}
3955 + * If DKP is supported, use it in the shared descriptor to generate
3956 + * the split key.
3957 */
3958 - if (is_rfc3686) {
3959 - ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
3960 - keylen -= CTR_RFC3686_NONCE_SIZE;
3961 + if (ctrlpriv->era >= 6) {
3962 + ctx->adata.keylen = keys.authkeylen;
3963 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3964 + OP_ALG_ALGSEL_MASK);
3965 +
3966 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3967 + goto badkey;
3968 +
3969 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3970 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3971 + keys.enckeylen);
3972 + dma_sync_single_for_device(jrdev, ctx->key_dma,
3973 + ctx->adata.keylen_pad +
3974 + keys.enckeylen, ctx->dir);
3975 + goto skip_split_key;
3976 }
3977