kernel: bump 4.14 to 4.14.95
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.14 / 820-sec-support-layerscape.patch
1 From 936d5f485f2ff837cdd7d49839771bd3367e8b92 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 30 Oct 2018 18:28:03 +0800
4 Subject: [PATCH 37/40] sec: support layerscape
5 This is an integrated patch of sec for layerscape
6
7 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
8 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
9 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
10 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
11 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
12 Signed-off-by: Horia Geantă horia.geanta@nxp.com
13 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
14 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
15 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
16 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
17 Signed-off-by: Biwen Li <biwen.li@nxp.com>
18 ---
19 crypto/Kconfig | 20 +
20 crypto/Makefile | 1 +
21 crypto/tcrypt.c | 27 +-
22 crypto/testmgr.c | 244 ++
23 crypto/testmgr.h | 219 ++
24 crypto/tls.c | 607 +++
25 drivers/crypto/Makefile | 2 +-
26 drivers/crypto/caam/Kconfig | 57 +-
27 drivers/crypto/caam/Makefile | 10 +-
28 drivers/crypto/caam/caamalg.c | 131 +-
29 drivers/crypto/caam/caamalg_desc.c | 761 +++-
30 drivers/crypto/caam/caamalg_desc.h | 47 +-
31 drivers/crypto/caam/caamalg_qi.c | 927 ++++-
32 drivers/crypto/caam/caamalg_qi2.c | 5691 +++++++++++++++++++++++++++
33 drivers/crypto/caam/caamalg_qi2.h | 274 ++
34 drivers/crypto/caam/caamhash.c | 132 +-
35 drivers/crypto/caam/caamhash_desc.c | 108 +
36 drivers/crypto/caam/caamhash_desc.h | 49 +
37 drivers/crypto/caam/compat.h | 2 +
38 drivers/crypto/caam/ctrl.c | 23 +-
39 drivers/crypto/caam/desc.h | 62 +-
40 drivers/crypto/caam/desc_constr.h | 52 +-
41 drivers/crypto/caam/dpseci.c | 865 ++++
42 drivers/crypto/caam/dpseci.h | 433 ++
43 drivers/crypto/caam/dpseci_cmd.h | 287 ++
44 drivers/crypto/caam/error.c | 75 +-
45 drivers/crypto/caam/error.h | 6 +-
46 drivers/crypto/caam/intern.h | 1 +
47 drivers/crypto/caam/jr.c | 42 +
48 drivers/crypto/caam/jr.h | 2 +
49 drivers/crypto/caam/key_gen.c | 30 -
50 drivers/crypto/caam/key_gen.h | 30 +
51 drivers/crypto/caam/qi.c | 85 +-
52 drivers/crypto/caam/qi.h | 2 +-
53 drivers/crypto/caam/regs.h | 2 +
54 drivers/crypto/caam/sg_sw_qm.h | 46 +-
55 drivers/crypto/talitos.c | 8 +
56 37 files changed, 11006 insertions(+), 354 deletions(-)
57 create mode 100644 crypto/tls.c
58 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
59 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
60 create mode 100644 drivers/crypto/caam/caamhash_desc.c
61 create mode 100644 drivers/crypto/caam/caamhash_desc.h
62 create mode 100644 drivers/crypto/caam/dpseci.c
63 create mode 100644 drivers/crypto/caam/dpseci.h
64 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
65
66 --- a/crypto/Kconfig
67 +++ b/crypto/Kconfig
68 @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
69 a sequence number xored with a salt. This is the default
70 algorithm for CBC.
71
72 +config CRYPTO_TLS
73 + tristate "TLS support"
74 + select CRYPTO_AEAD
75 + select CRYPTO_BLKCIPHER
76 + select CRYPTO_MANAGER
77 + select CRYPTO_HASH
78 + select CRYPTO_NULL
79 + select CRYPTO_AUTHENC
80 + help
81 + Support for TLS 1.0 record encryption and decryption
82 +
83 + This module adds support for encryption/decryption of TLS 1.0 frames
84 + using blockcipher algorithms. The name of the resulting algorithm is
85 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
86 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
87 + accelerated versions will be used automatically if available.
88 +
89 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
90 + operations through AF_ALG or cryptodev interfaces
91 +
92 comment "Block modes"
93
94 config CRYPTO_CBC
95 --- a/crypto/Makefile
96 +++ b/crypto/Makefile
97 @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
98 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
99 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
100 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
101 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
102 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
103 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
104 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
105 --- a/crypto/tcrypt.c
106 +++ b/crypto/tcrypt.c
107 @@ -76,7 +76,7 @@ static char *check[] = {
108 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
109 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
110 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
111 - NULL
112 + "rsa", NULL
113 };
114
115 struct tcrypt_result {
116 @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
117 iv);
118 aead_request_set_ad(req, aad_size);
119
120 - if (secs)
121 + if (secs) {
122 ret = test_aead_jiffies(req, enc, *b_size,
123 secs);
124 - else
125 + cond_resched();
126 + } else {
127 ret = test_aead_cycles(req, enc, *b_size);
128 + }
129
130 if (ret) {
131 pr_err("%s() failed return code=%d\n", e, ret);
132 @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
133
134 ahash_request_set_crypt(req, sg, output, speed[i].plen);
135
136 - if (secs)
137 + if (secs) {
138 ret = test_ahash_jiffies(req, speed[i].blen,
139 speed[i].plen, output, secs);
140 - else
141 + cond_resched();
142 + } else {
143 ret = test_ahash_cycles(req, speed[i].blen,
144 speed[i].plen, output);
145 + }
146
147 if (ret) {
148 pr_err("hashing failed ret=%d\n", ret);
149 @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
150
151 skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
152
153 - if (secs)
154 + if (secs) {
155 ret = test_acipher_jiffies(req, enc,
156 *b_size, secs);
157 - else
158 + cond_resched();
159 + } else {
160 ret = test_acipher_cycles(req, enc,
161 *b_size);
162 + }
163
164 if (ret) {
165 pr_err("%s() failed flags=%x\n", e,
166 @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
167 ret += tcrypt_test("hmac(sha3-512)");
168 break;
169
170 + case 115:
171 + ret += tcrypt_test("rsa");
172 + break;
173 +
174 case 150:
175 ret += tcrypt_test("ansi_cprng");
176 break;
177 @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
178 case 190:
179 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
180 break;
181 + case 191:
182 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
183 + break;
184 case 200:
185 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
186 speed_template_16_24_32);
187 --- a/crypto/testmgr.c
188 +++ b/crypto/testmgr.c
189 @@ -117,6 +117,13 @@ struct drbg_test_suite {
190 unsigned int count;
191 };
192
193 +struct tls_test_suite {
194 + struct {
195 + struct tls_testvec *vecs;
196 + unsigned int count;
197 + } enc, dec;
198 +};
199 +
200 struct akcipher_test_suite {
201 const struct akcipher_testvec *vecs;
202 unsigned int count;
203 @@ -140,6 +147,7 @@ struct alg_test_desc {
204 struct hash_test_suite hash;
205 struct cprng_test_suite cprng;
206 struct drbg_test_suite drbg;
207 + struct tls_test_suite tls;
208 struct akcipher_test_suite akcipher;
209 struct kpp_test_suite kpp;
210 } suite;
211 @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
212 return 0;
213 }
214
215 +static int __test_tls(struct crypto_aead *tfm, int enc,
216 + struct tls_testvec *template, unsigned int tcount,
217 + const bool diff_dst)
218 +{
219 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
220 + unsigned int i, k, authsize;
221 + char *q;
222 + struct aead_request *req;
223 + struct scatterlist *sg;
224 + struct scatterlist *sgout;
225 + const char *e, *d;
226 + struct tcrypt_result result;
227 + void *input;
228 + void *output;
229 + void *assoc;
230 + char *iv;
231 + char *key;
232 + char *xbuf[XBUFSIZE];
233 + char *xoutbuf[XBUFSIZE];
234 + char *axbuf[XBUFSIZE];
235 + int ret = -ENOMEM;
236 +
237 + if (testmgr_alloc_buf(xbuf))
238 + goto out_noxbuf;
239 +
240 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
241 + goto out_nooutbuf;
242 +
243 + if (testmgr_alloc_buf(axbuf))
244 + goto out_noaxbuf;
245 +
246 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
247 + if (!iv)
248 + goto out_noiv;
249 +
250 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
251 + if (!key)
252 + goto out_nokey;
253 +
254 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
255 + if (!sg)
256 + goto out_nosg;
257 +
258 + sgout = sg + 8;
259 +
260 + d = diff_dst ? "-ddst" : "";
261 + e = enc ? "encryption" : "decryption";
262 +
263 + init_completion(&result.completion);
264 +
265 + req = aead_request_alloc(tfm, GFP_KERNEL);
266 + if (!req) {
267 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
268 + d, algo);
269 + goto out;
270 + }
271 +
272 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
273 + tcrypt_complete, &result);
274 +
275 + for (i = 0; i < tcount; i++) {
276 + input = xbuf[0];
277 + assoc = axbuf[0];
278 +
279 + ret = -EINVAL;
280 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
281 + template[i].alen > PAGE_SIZE))
282 + goto out;
283 +
284 + memcpy(assoc, template[i].assoc, template[i].alen);
285 + memcpy(input, template[i].input, template[i].ilen);
286 +
287 + if (template[i].iv)
288 + memcpy(iv, template[i].iv, MAX_IVLEN);
289 + else
290 + memset(iv, 0, MAX_IVLEN);
291 +
292 + crypto_aead_clear_flags(tfm, ~0);
293 +
294 + if (template[i].klen > MAX_KEYLEN) {
295 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
296 + d, i, algo, template[i].klen, MAX_KEYLEN);
297 + ret = -EINVAL;
298 + goto out;
299 + }
300 + memcpy(key, template[i].key, template[i].klen);
301 +
302 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
303 + if (!ret == template[i].fail) {
304 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
305 + d, i, algo, crypto_aead_get_flags(tfm));
306 + goto out;
307 + } else if (ret)
308 + continue;
309 +
310 + authsize = 20;
311 + ret = crypto_aead_setauthsize(tfm, authsize);
312 + if (ret) {
313 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
314 + d, authsize, i, algo);
315 + goto out;
316 + }
317 +
318 + k = !!template[i].alen;
319 + sg_init_table(sg, k + 1);
320 + sg_set_buf(&sg[0], assoc, template[i].alen);
321 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
322 + template[i].ilen));
323 + output = input;
324 +
325 + if (diff_dst) {
326 + sg_init_table(sgout, k + 1);
327 + sg_set_buf(&sgout[0], assoc, template[i].alen);
328 +
329 + output = xoutbuf[0];
330 + sg_set_buf(&sgout[k], output,
331 + (enc ? template[i].rlen : template[i].ilen));
332 + }
333 +
334 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
335 + template[i].ilen, iv);
336 +
337 + aead_request_set_ad(req, template[i].alen);
338 +
339 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
340 +
341 + switch (ret) {
342 + case 0:
343 + if (template[i].novrfy) {
344 + /* verification was supposed to fail */
345 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
346 + d, e, i, algo);
347 + /* so really, we got a bad message */
348 + ret = -EBADMSG;
349 + goto out;
350 + }
351 + break;
352 + case -EINPROGRESS:
353 + case -EBUSY:
354 + wait_for_completion(&result.completion);
355 + reinit_completion(&result.completion);
356 + ret = result.err;
357 + if (!ret)
358 + break;
359 + case -EBADMSG:
360 + /* verification failure was expected */
361 + if (template[i].novrfy)
362 + continue;
363 + /* fall through */
364 + default:
365 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
366 + d, e, i, algo, -ret);
367 + goto out;
368 + }
369 +
370 + q = output;
371 + if (memcmp(q, template[i].result, template[i].rlen)) {
372 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
373 + d, i, e, algo);
374 + hexdump(q, template[i].rlen);
375 + pr_err("should be:\n");
376 + hexdump(template[i].result, template[i].rlen);
377 + ret = -EINVAL;
378 + goto out;
379 + }
380 + }
381 +
382 +out:
383 + aead_request_free(req);
384 +
385 + kfree(sg);
386 +out_nosg:
387 + kfree(key);
388 +out_nokey:
389 + kfree(iv);
390 +out_noiv:
391 + testmgr_free_buf(axbuf);
392 +out_noaxbuf:
393 + if (diff_dst)
394 + testmgr_free_buf(xoutbuf);
395 +out_nooutbuf:
396 + testmgr_free_buf(xbuf);
397 +out_noxbuf:
398 + return ret;
399 +}
400 +
401 +static int test_tls(struct crypto_aead *tfm, int enc,
402 + struct tls_testvec *template, unsigned int tcount)
403 +{
404 + int ret;
405 + /* test 'dst == src' case */
406 + ret = __test_tls(tfm, enc, template, tcount, false);
407 + if (ret)
408 + return ret;
409 + /* test 'dst != src' case */
410 + return __test_tls(tfm, enc, template, tcount, true);
411 +}
412 +
413 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
414 + u32 type, u32 mask)
415 +{
416 + struct crypto_aead *tfm;
417 + int err = 0;
418 +
419 + tfm = crypto_alloc_aead(driver, type, mask);
420 + if (IS_ERR(tfm)) {
421 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
422 + driver, PTR_ERR(tfm));
423 + return PTR_ERR(tfm);
424 + }
425 +
426 + if (desc->suite.tls.enc.vecs) {
427 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
428 + desc->suite.tls.enc.count);
429 + if (err)
430 + goto out;
431 + }
432 +
433 + if (!err && desc->suite.tls.dec.vecs)
434 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
435 + desc->suite.tls.dec.count);
436 +
437 +out:
438 + crypto_free_aead(tfm);
439 + return err;
440 +}
441 +
442 static int test_cipher(struct crypto_cipher *tfm, int enc,
443 const struct cipher_testvec *template,
444 unsigned int tcount)
445 @@ -3518,6 +3753,15 @@ static const struct alg_test_desc alg_te
446 .hash = __VECS(tgr192_tv_template)
447 }
448 }, {
449 + .alg = "tls10(hmac(sha1),cbc(aes))",
450 + .test = alg_test_tls,
451 + .suite = {
452 + .tls = {
453 + .enc = __VECS(tls_enc_tv_template),
454 + .dec = __VECS(tls_dec_tv_template)
455 + }
456 + }
457 + }, {
458 .alg = "vmac(aes)",
459 .test = alg_test_hash,
460 .suite = {
461 --- a/crypto/testmgr.h
462 +++ b/crypto/testmgr.h
463 @@ -125,6 +125,20 @@ struct drbg_testvec {
464 size_t expectedlen;
465 };
466
467 +struct tls_testvec {
468 + char *key; /* wrapped keys for encryption and authentication */
469 + char *iv; /* initialization vector */
470 + char *input; /* input data */
471 + char *assoc; /* associated data: seq num, type, version, input len */
472 + char *result; /* result data */
473 + unsigned char fail; /* the test failure is expected */
474 + unsigned char novrfy; /* dec verification failure expected */
475 + unsigned char klen; /* key length */
476 + unsigned short ilen; /* input data length */
477 + unsigned short alen; /* associated data length */
478 + unsigned short rlen; /* result length */
479 +};
480 +
481 struct akcipher_testvec {
482 const unsigned char *key;
483 const unsigned char *m;
484 @@ -153,6 +167,211 @@ struct kpp_testvec {
485 static const char zeroed_string[48];
486
487 /*
488 + * TLS1.0 synthetic test vectors
489 + */
490 +static struct tls_testvec tls_enc_tv_template[] = {
491 + {
492 +#ifdef __LITTLE_ENDIAN
493 + .key = "\x08\x00" /* rta length */
494 + "\x01\x00" /* rta type */
495 +#else
496 + .key = "\x00\x08" /* rta length */
497 + "\x00\x01" /* rta type */
498 +#endif
499 + "\x00\x00\x00\x10" /* enc key length */
500 + "authenticationkey20benckeyis16_bytes",
501 + .klen = 8 + 20 + 16,
502 + .iv = "iv0123456789abcd",
503 + .input = "Single block msg",
504 + .ilen = 16,
505 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
506 + "\x00\x03\x01\x00\x10",
507 + .alen = 13,
508 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
509 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
510 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
511 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
512 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
513 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
514 + .rlen = 16 + 20 + 12,
515 + }, {
516 +#ifdef __LITTLE_ENDIAN
517 + .key = "\x08\x00" /* rta length */
518 + "\x01\x00" /* rta type */
519 +#else
520 + .key = "\x00\x08" /* rta length */
521 + "\x00\x01" /* rta type */
522 +#endif
523 + "\x00\x00\x00\x10" /* enc key length */
524 + "authenticationkey20benckeyis16_bytes",
525 + .klen = 8 + 20 + 16,
526 + .iv = "iv0123456789abcd",
527 + .input = "",
528 + .ilen = 0,
529 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
530 + "\x00\x03\x01\x00\x00",
531 + .alen = 13,
532 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
533 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
534 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
535 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
536 + .rlen = 20 + 12,
537 + }, {
538 +#ifdef __LITTLE_ENDIAN
539 + .key = "\x08\x00" /* rta length */
540 + "\x01\x00" /* rta type */
541 +#else
542 + .key = "\x00\x08" /* rta length */
543 + "\x00\x01" /* rta type */
544 +#endif
545 + "\x00\x00\x00\x10" /* enc key length */
546 + "authenticationkey20benckeyis16_bytes",
547 + .klen = 8 + 20 + 16,
548 + .iv = "iv0123456789abcd",
549 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
550 + " plaintext285 bytes plaintext285 bytes plaintext285"
551 + " bytes plaintext285 bytes plaintext285 bytes"
552 + " plaintext285 bytes plaintext285 bytes plaintext285"
553 + " bytes plaintext285 bytes plaintext285 bytes"
554 + " plaintext285 bytes plaintext285 bytes plaintext285"
555 + " bytes plaintext285 bytes plaintext",
556 + .ilen = 285,
557 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
558 + "\x00\x03\x01\x01\x1d",
559 + .alen = 13,
560 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
561 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
562 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
563 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
564 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
565 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
566 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
567 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
568 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
569 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
570 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
571 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
572 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
573 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
574 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
575 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
576 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
577 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
578 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
579 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
580 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
581 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
582 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
583 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
584 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
585 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
586 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
587 + .rlen = 285 + 20 + 15,
588 + }
589 +};
590 +
591 +static struct tls_testvec tls_dec_tv_template[] = {
592 + {
593 +#ifdef __LITTLE_ENDIAN
594 + .key = "\x08\x00" /* rta length */
595 + "\x01\x00" /* rta type */
596 +#else
597 + .key = "\x00\x08" /* rta length */
598 + "\x00\x01" /* rta type */
599 +#endif
600 + "\x00\x00\x00\x10" /* enc key length */
601 + "authenticationkey20benckeyis16_bytes",
602 + .klen = 8 + 20 + 16,
603 + .iv = "iv0123456789abcd",
604 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
605 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
606 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
607 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
608 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
609 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
610 + .ilen = 16 + 20 + 12,
611 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
612 + "\x00\x03\x01\x00\x30",
613 + .alen = 13,
614 + .result = "Single block msg",
615 + .rlen = 16,
616 + }, {
617 +#ifdef __LITTLE_ENDIAN
618 + .key = "\x08\x00" /* rta length */
619 + "\x01\x00" /* rta type */
620 +#else
621 + .key = "\x00\x08" /* rta length */
622 + "\x00\x01" /* rta type */
623 +#endif
624 + "\x00\x00\x00\x10" /* enc key length */
625 + "authenticationkey20benckeyis16_bytes",
626 + .klen = 8 + 20 + 16,
627 + .iv = "iv0123456789abcd",
628 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
629 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
630 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
631 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
632 + .ilen = 20 + 12,
633 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
634 + "\x00\x03\x01\x00\x20",
635 + .alen = 13,
636 + .result = "",
637 + .rlen = 0,
638 + }, {
639 +#ifdef __LITTLE_ENDIAN
640 + .key = "\x08\x00" /* rta length */
641 + "\x01\x00" /* rta type */
642 +#else
643 + .key = "\x00\x08" /* rta length */
644 + "\x00\x01" /* rta type */
645 +#endif
646 + "\x00\x00\x00\x10" /* enc key length */
647 + "authenticationkey20benckeyis16_bytes",
648 + .klen = 8 + 20 + 16,
649 + .iv = "iv0123456789abcd",
650 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
651 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
652 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
653 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
654 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
655 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
656 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
657 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
658 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
659 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
660 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
661 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
662 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
663 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
664 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
665 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
666 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
667 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
668 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
669 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
670 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
671 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
672 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
673 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
674 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
675 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
676 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
677 +
678 + .ilen = 285 + 20 + 15,
679 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
680 + "\x00\x03\x01\x01\x40",
681 + .alen = 13,
682 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
683 + " plaintext285 bytes plaintext285 bytes plaintext285"
684 + " bytes plaintext285 bytes plaintext285 bytes"
685 + " plaintext285 bytes plaintext285 bytes plaintext285"
686 + " bytes plaintext285 bytes plaintext285 bytes"
687 + " plaintext285 bytes plaintext285 bytes plaintext",
688 + .rlen = 285,
689 + }
690 +};
691 +
692 +/*
693 * RSA test vectors. Borrowed from openSSL.
694 */
695 static const struct akcipher_testvec rsa_tv_template[] = {
696 --- /dev/null
697 +++ b/crypto/tls.c
698 @@ -0,0 +1,607 @@
699 +/*
700 + * Copyright 2013 Freescale Semiconductor, Inc.
701 + * Copyright 2017 NXP Semiconductor, Inc.
702 + *
703 + * This program is free software; you can redistribute it and/or modify it
704 + * under the terms of the GNU General Public License as published by the Free
705 + * Software Foundation; either version 2 of the License, or (at your option)
706 + * any later version.
707 + *
708 + */
709 +
710 +#include <crypto/internal/aead.h>
711 +#include <crypto/internal/hash.h>
712 +#include <crypto/internal/skcipher.h>
713 +#include <crypto/authenc.h>
714 +#include <crypto/null.h>
715 +#include <crypto/scatterwalk.h>
716 +#include <linux/err.h>
717 +#include <linux/init.h>
718 +#include <linux/module.h>
719 +#include <linux/rtnetlink.h>
720 +
721 +struct tls_instance_ctx {
722 + struct crypto_ahash_spawn auth;
723 + struct crypto_skcipher_spawn enc;
724 +};
725 +
726 +struct crypto_tls_ctx {
727 + unsigned int reqoff;
728 + struct crypto_ahash *auth;
729 + struct crypto_skcipher *enc;
730 + struct crypto_skcipher *null;
731 +};
732 +
733 +struct tls_request_ctx {
734 + /*
735 + * cryptlen holds the payload length in the case of encryption or
736 + * payload_len + icv_len + padding_len in case of decryption
737 + */
738 + unsigned int cryptlen;
739 + /* working space for partial results */
740 + struct scatterlist tmp[2];
741 + struct scatterlist cipher[2];
742 + struct scatterlist dst[2];
743 + char tail[];
744 +};
745 +
746 +struct async_op {
747 + struct completion completion;
748 + int err;
749 +};
750 +
751 +static void tls_async_op_done(struct crypto_async_request *req, int err)
752 +{
753 + struct async_op *areq = req->data;
754 +
755 + if (err == -EINPROGRESS)
756 + return;
757 +
758 + areq->err = err;
759 + complete(&areq->completion);
760 +}
761 +
762 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
763 + unsigned int keylen)
764 +{
765 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
766 + struct crypto_ahash *auth = ctx->auth;
767 + struct crypto_skcipher *enc = ctx->enc;
768 + struct crypto_authenc_keys keys;
769 + int err = -EINVAL;
770 +
771 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
772 + goto badkey;
773 +
774 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
775 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
776 + CRYPTO_TFM_REQ_MASK);
777 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
778 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
779 + CRYPTO_TFM_RES_MASK);
780 +
781 + if (err)
782 + goto out;
783 +
784 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
785 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
786 + CRYPTO_TFM_REQ_MASK);
787 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
788 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
789 + CRYPTO_TFM_RES_MASK);
790 +
791 +out:
792 + return err;
793 +
794 +badkey:
795 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
796 + goto out;
797 +}
798 +
799 +/**
800 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
801 + * @hash: (output) buffer to save the digest into
802 + * @src: (input) scatterlist with the assoc and payload data
803 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
804 + * @req: (input) aead request
805 + **/
806 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
807 + unsigned int srclen, struct aead_request *req)
808 +{
809 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
810 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
811 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
812 + struct async_op ahash_op;
813 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
814 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
815 + int err = -EBADMSG;
816 +
817 + /* Bail out if the request assoc len is 0 */
818 + if (!req->assoclen)
819 + return err;
820 +
821 + init_completion(&ahash_op.completion);
822 +
823 + /* the hash transform to be executed comes from the original request */
824 + ahash_request_set_tfm(ahreq, ctx->auth);
825 + /* prepare the hash request with input data and result pointer */
826 + ahash_request_set_crypt(ahreq, src, hash, srclen);
827 + /* set the notifier for when the async hash function returns */
828 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
829 + tls_async_op_done, &ahash_op);
830 +
831 + /* Calculate the digest on the given data. The result is put in hash */
832 + err = crypto_ahash_digest(ahreq);
833 + if (err == -EINPROGRESS) {
834 + err = wait_for_completion_interruptible(&ahash_op.completion);
835 + if (!err)
836 + err = ahash_op.err;
837 + }
838 +
839 + return err;
840 +}
841 +
842 +/**
843 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
844 + * @hash: (output) buffer to save the digest and padding into
845 + * @phashlen: (output) the size of digest + padding
846 + * @req: (input) aead request
847 + **/
848 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
849 + struct aead_request *req)
850 +{
851 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
852 + unsigned int hash_size = crypto_aead_authsize(tls);
853 + unsigned int block_size = crypto_aead_blocksize(tls);
854 + unsigned int srclen = req->cryptlen + hash_size;
855 + unsigned int icvlen = req->cryptlen + req->assoclen;
856 + unsigned int padlen;
857 + int err;
858 +
859 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
860 + if (err)
861 + goto out;
862 +
863 + /* add padding after digest */
864 + padlen = block_size - (srclen % block_size);
865 + memset(hash + hash_size, padlen - 1, padlen);
866 +
867 + *phashlen = hash_size + padlen;
868 +out:
869 + return err;
870 +}
871 +
872 +static int crypto_tls_copy_data(struct aead_request *req,
873 + struct scatterlist *src,
874 + struct scatterlist *dst,
875 + unsigned int len)
876 +{
877 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
878 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
879 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
880 +
881 + skcipher_request_set_tfm(skreq, ctx->null);
882 + skcipher_request_set_callback(skreq, aead_request_flags(req),
883 + NULL, NULL);
884 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
885 +
886 + return crypto_skcipher_encrypt(skreq);
887 +}
888 +
889 +static int crypto_tls_encrypt(struct aead_request *req)
890 +{
891 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
892 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
893 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
894 + struct skcipher_request *skreq;
895 + struct scatterlist *cipher = treq_ctx->cipher;
896 + struct scatterlist *tmp = treq_ctx->tmp;
897 + struct scatterlist *sg, *src, *dst;
898 + unsigned int cryptlen, phashlen;
899 + u8 *hash = treq_ctx->tail;
900 + int err;
901 +
902 + /*
903 + * The hash result is saved at the beginning of the tls request ctx
904 + * and is aligned as required by the hash transform. Enough space was
905 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
906 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
907 + * the result is not overwritten by the second (cipher) request.
908 + */
909 + hash = (u8 *)ALIGN((unsigned long)hash +
910 + crypto_ahash_alignmask(ctx->auth),
911 + crypto_ahash_alignmask(ctx->auth) + 1);
912 +
913 + /*
914 + * STEP 1: create ICV together with necessary padding
915 + */
916 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
917 + if (err)
918 + return err;
919 +
920 + /*
921 + * STEP 2: Hash and padding are combined with the payload
922 + * depending on the form it arrives. Scatter tables must have at least
923 + * one page of data before chaining with another table and can't have
924 + * an empty data page. The following code addresses these requirements.
925 + *
926 + * If the payload is empty, only the hash is encrypted, otherwise the
927 + * payload scatterlist is merged with the hash. A special merging case
928 + * is when the payload has only one page of data. In that case the
929 + * payload page is moved to another scatterlist and prepared there for
930 + * encryption.
931 + */
932 + if (req->cryptlen) {
933 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
934 +
935 + sg_init_table(cipher, 2);
936 + sg_set_buf(cipher + 1, hash, phashlen);
937 +
938 + if (sg_is_last(src)) {
939 + sg_set_page(cipher, sg_page(src), req->cryptlen,
940 + src->offset);
941 + src = cipher;
942 + } else {
943 + unsigned int rem_len = req->cryptlen;
944 +
945 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
946 + rem_len -= min(rem_len, sg->length);
947 +
948 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
949 + sg_chain(sg, 1, cipher);
950 + }
951 + } else {
952 + sg_init_one(cipher, hash, phashlen);
953 + src = cipher;
954 + }
955 +
956 + /**
957 + * If src != dst copy the associated data from source to destination.
958 + * In both cases fast-forward passed the associated data in the dest.
959 + */
960 + if (req->src != req->dst) {
961 + err = crypto_tls_copy_data(req, req->src, req->dst,
962 + req->assoclen);
963 + if (err)
964 + return err;
965 + }
966 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
967 +
968 + /*
969 + * STEP 3: encrypt the frame and return the result
970 + */
971 + cryptlen = req->cryptlen + phashlen;
972 +
973 + /*
974 + * The hash and the cipher are applied at different times and their
975 + * requests can use the same memory space without interference
976 + */
977 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
978 + skcipher_request_set_tfm(skreq, ctx->enc);
979 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
980 + skcipher_request_set_callback(skreq, aead_request_flags(req),
981 + req->base.complete, req->base.data);
982 + /*
983 + * Apply the cipher transform. The result will be in req->dst when the
984 + * asynchronuous call terminates
985 + */
986 + err = crypto_skcipher_encrypt(skreq);
987 +
988 + return err;
989 +}
990 +
991 +static int crypto_tls_decrypt(struct aead_request *req)
992 +{
993 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
994 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
995 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
996 + unsigned int cryptlen = req->cryptlen;
997 + unsigned int hash_size = crypto_aead_authsize(tls);
998 + unsigned int block_size = crypto_aead_blocksize(tls);
999 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
1000 + struct scatterlist *tmp = treq_ctx->tmp;
1001 + struct scatterlist *src, *dst;
1002 +
1003 + u8 padding[255]; /* padding can be 0-255 bytes */
1004 + u8 pad_size;
1005 + u16 *len_field;
1006 + u8 *ihash, *hash = treq_ctx->tail;
1007 +
1008 + int paderr = 0;
1009 + int err = -EINVAL;
1010 + int i;
1011 + struct async_op ciph_op;
1012 +
1013 + /*
1014 + * Rule out bad packets. The input packet length must be at least one
1015 + * byte more than the hash_size
1016 + */
1017 + if (cryptlen <= hash_size || cryptlen % block_size)
1018 + goto out;
1019 +
1020 + /*
1021 + * Step 1 - Decrypt the source. Fast-forward past the associated data
1022 + * to the encrypted data. The result will be overwritten in place so
1023 + * that the decrypted data will be adjacent to the associated data. The
1024 + * last step (computing the hash) will have it's input data already
1025 + * prepared and ready to be accessed at req->src.
1026 + */
1027 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
1028 + dst = src;
1029 +
1030 + init_completion(&ciph_op.completion);
1031 + skcipher_request_set_tfm(skreq, ctx->enc);
1032 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1033 + tls_async_op_done, &ciph_op);
1034 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1035 + err = crypto_skcipher_decrypt(skreq);
1036 + if (err == -EINPROGRESS) {
1037 + err = wait_for_completion_interruptible(&ciph_op.completion);
1038 + if (!err)
1039 + err = ciph_op.err;
1040 + }
1041 + if (err)
1042 + goto out;
1043 +
1044 + /*
1045 + * Step 2 - Verify padding
1046 + * Retrieve the last byte of the payload; this is the padding size.
1047 + */
1048 + cryptlen -= 1;
1049 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
1050 +
1051 + /* RFC recommendation for invalid padding size. */
1052 + if (cryptlen < pad_size + hash_size) {
1053 + pad_size = 0;
1054 + paderr = -EBADMSG;
1055 + }
1056 + cryptlen -= pad_size;
1057 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
1058 +
1059 + /* Padding content must be equal with pad_size. We verify it all */
1060 + for (i = 0; i < pad_size; i++)
1061 + if (padding[i] != pad_size)
1062 + paderr = -EBADMSG;
1063 +
1064 + /*
1065 + * Step 3 - Verify hash
1066 + * Align the digest result as required by the hash transform. Enough
1067 + * space was allocated in crypto_tls_init_tfm
1068 + */
1069 + hash = (u8 *)ALIGN((unsigned long)hash +
1070 + crypto_ahash_alignmask(ctx->auth),
1071 + crypto_ahash_alignmask(ctx->auth) + 1);
1072 + /*
1073 + * Two bytes at the end of the associated data make the length field.
1074 + * It must be updated with the length of the cleartext message before
1075 + * the hash is calculated.
1076 + */
1077 + len_field = sg_virt(req->src) + req->assoclen - 2;
1078 + cryptlen -= hash_size;
1079 + *len_field = htons(cryptlen);
1080 +
1081 + /* This is the hash from the decrypted packet. Save it for later */
1082 + ihash = hash + hash_size;
1083 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
1084 +
1085 + /* Now compute and compare our ICV with the one from the packet */
1086 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
1087 + if (!err)
1088 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
1089 +
1090 + if (req->src != req->dst) {
1091 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
1092 + req->assoclen);
1093 + if (err)
1094 + goto out;
1095 + }
1096 +
1097 + /* return the first found error */
1098 + if (paderr)
1099 + err = paderr;
1100 +
1101 +out:
1102 + aead_request_complete(req, err);
1103 + return err;
1104 +}
1105 +
1106 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
1107 +{
1108 + struct aead_instance *inst = aead_alg_instance(tfm);
1109 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
1110 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1111 + struct crypto_ahash *auth;
1112 + struct crypto_skcipher *enc;
1113 + struct crypto_skcipher *null;
1114 + int err;
1115 +
1116 + auth = crypto_spawn_ahash(&ictx->auth);
1117 + if (IS_ERR(auth))
1118 + return PTR_ERR(auth);
1119 +
1120 + enc = crypto_spawn_skcipher(&ictx->enc);
1121 + err = PTR_ERR(enc);
1122 + if (IS_ERR(enc))
1123 + goto err_free_ahash;
1124 +
1125 + null = crypto_get_default_null_skcipher2();
1126 + err = PTR_ERR(null);
1127 + if (IS_ERR(null))
1128 + goto err_free_skcipher;
1129 +
1130 + ctx->auth = auth;
1131 + ctx->enc = enc;
1132 + ctx->null = null;
1133 +
1134 + /*
1135 + * Allow enough space for two digests. The two digests will be compared
1136 + * during the decryption phase. One will come from the decrypted packet
1137 + * and the other will be calculated. For encryption, one digest is
1138 + * padded (up to a cipher blocksize) and chained with the payload
1139 + */
1140 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
1141 + crypto_ahash_alignmask(auth),
1142 + crypto_ahash_alignmask(auth) + 1) +
1143 + max(crypto_ahash_digestsize(auth),
1144 + crypto_skcipher_blocksize(enc));
1145 +
1146 + crypto_aead_set_reqsize(tfm,
1147 + sizeof(struct tls_request_ctx) +
1148 + ctx->reqoff +
1149 + max_t(unsigned int,
1150 + crypto_ahash_reqsize(auth) +
1151 + sizeof(struct ahash_request),
1152 + crypto_skcipher_reqsize(enc) +
1153 + sizeof(struct skcipher_request)));
1154 +
1155 + return 0;
1156 +
1157 +err_free_skcipher:
1158 + crypto_free_skcipher(enc);
1159 +err_free_ahash:
1160 + crypto_free_ahash(auth);
1161 + return err;
1162 +}
1163 +
1164 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
1165 +{
1166 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1167 +
1168 + crypto_free_ahash(ctx->auth);
1169 + crypto_free_skcipher(ctx->enc);
1170 + crypto_put_default_null_skcipher2();
1171 +}
1172 +
1173 +static void crypto_tls_free(struct aead_instance *inst)
1174 +{
1175 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
1176 +
1177 + crypto_drop_skcipher(&ctx->enc);
1178 + crypto_drop_ahash(&ctx->auth);
1179 + kfree(inst);
1180 +}
1181 +
1182 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
1183 +{
1184 + struct crypto_attr_type *algt;
1185 + struct aead_instance *inst;
1186 + struct hash_alg_common *auth;
1187 + struct crypto_alg *auth_base;
1188 + struct skcipher_alg *enc;
1189 + struct tls_instance_ctx *ctx;
1190 + const char *enc_name;
1191 + int err;
1192 +
1193 + algt = crypto_get_attr_type(tb);
1194 + if (IS_ERR(algt))
1195 + return PTR_ERR(algt);
1196 +
1197 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
1198 + return -EINVAL;
1199 +
1200 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
1201 + CRYPTO_ALG_TYPE_AHASH_MASK |
1202 + crypto_requires_sync(algt->type, algt->mask));
1203 + if (IS_ERR(auth))
1204 + return PTR_ERR(auth);
1205 +
1206 + auth_base = &auth->base;
1207 +
1208 + enc_name = crypto_attr_alg_name(tb[2]);
1209 + err = PTR_ERR(enc_name);
1210 + if (IS_ERR(enc_name))
1211 + goto out_put_auth;
1212 +
1213 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1214 + err = -ENOMEM;
1215 + if (!inst)
1216 + goto out_put_auth;
1217 +
1218 + ctx = aead_instance_ctx(inst);
1219 +
1220 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
1221 + aead_crypto_instance(inst));
1222 + if (err)
1223 + goto err_free_inst;
1224 +
1225 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
1226 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
1227 + crypto_requires_sync(algt->type,
1228 + algt->mask));
1229 + if (err)
1230 + goto err_drop_auth;
1231 +
1232 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
1233 +
1234 + err = -ENAMETOOLONG;
1235 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
1236 + "tls10(%s,%s)", auth_base->cra_name,
1237 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
1238 + goto err_drop_enc;
1239 +
1240 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1241 + "tls10(%s,%s)", auth_base->cra_driver_name,
1242 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
1243 + goto err_drop_enc;
1244 +
1245 + inst->alg.base.cra_flags = (auth_base->cra_flags |
1246 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
1247 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
1248 + auth_base->cra_priority;
1249 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
1250 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
1251 + enc->base.cra_alignmask;
1252 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
1253 +
1254 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
1255 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
1256 + inst->alg.maxauthsize = auth->digestsize;
1257 +
1258 + inst->alg.init = crypto_tls_init_tfm;
1259 + inst->alg.exit = crypto_tls_exit_tfm;
1260 +
1261 + inst->alg.setkey = crypto_tls_setkey;
1262 + inst->alg.encrypt = crypto_tls_encrypt;
1263 + inst->alg.decrypt = crypto_tls_decrypt;
1264 +
1265 + inst->free = crypto_tls_free;
1266 +
1267 + err = aead_register_instance(tmpl, inst);
1268 + if (err)
1269 + goto err_drop_enc;
1270 +
1271 +out:
1272 + crypto_mod_put(auth_base);
1273 + return err;
1274 +
1275 +err_drop_enc:
1276 + crypto_drop_skcipher(&ctx->enc);
1277 +err_drop_auth:
1278 + crypto_drop_ahash(&ctx->auth);
1279 +err_free_inst:
1280 + kfree(inst);
1281 +out_put_auth:
1282 + goto out;
1283 +}
1284 +
1285 +static struct crypto_template crypto_tls_tmpl = {
1286 + .name = "tls10",
1287 + .create = crypto_tls_create,
1288 + .module = THIS_MODULE,
1289 +};
1290 +
1291 +static int __init crypto_tls_module_init(void)
1292 +{
1293 + return crypto_register_template(&crypto_tls_tmpl);
1294 +}
1295 +
1296 +static void __exit crypto_tls_module_exit(void)
1297 +{
1298 + crypto_unregister_template(&crypto_tls_tmpl);
1299 +}
1300 +
1301 +module_init(crypto_tls_module_init);
1302 +module_exit(crypto_tls_module_exit);
1303 +
1304 +MODULE_LICENSE("GPL");
1305 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
1306 --- a/drivers/crypto/Makefile
1307 +++ b/drivers/crypto/Makefile
1308 @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
1309 obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
1310 obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
1311 obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
1312 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
1313 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
1314 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
1315 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
1316 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
1317 --- a/drivers/crypto/caam/Kconfig
1318 +++ b/drivers/crypto/caam/Kconfig
1319 @@ -1,7 +1,11 @@
1320 +config CRYPTO_DEV_FSL_CAAM_COMMON
1321 + tristate
1322 +
1323 config CRYPTO_DEV_FSL_CAAM
1324 - tristate "Freescale CAAM-Multicore driver backend"
1325 + tristate "Freescale CAAM-Multicore platform driver backend"
1326 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
1327 select SOC_BUS
1328 + select CRYPTO_DEV_FSL_CAAM_COMMON
1329 help
1330 Enables the driver module for Freescale's Cryptographic Accelerator
1331 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
1332 @@ -12,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
1333 To compile this driver as a module, choose M here: the module
1334 will be called caam.
1335
1336 +if CRYPTO_DEV_FSL_CAAM
1337 +
1338 +config CRYPTO_DEV_FSL_CAAM_DEBUG
1339 + bool "Enable debug output in CAAM driver"
1340 + help
1341 + Selecting this will enable printing of various debug
1342 + information in the CAAM driver.
1343 +
1344 config CRYPTO_DEV_FSL_CAAM_JR
1345 tristate "Freescale CAAM Job Ring driver backend"
1346 - depends on CRYPTO_DEV_FSL_CAAM
1347 default y
1348 help
1349 Enables the driver module for Job Rings which are part of
1350 @@ -25,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
1351 To compile this driver as a module, choose M here: the module
1352 will be called caam_jr.
1353
1354 +if CRYPTO_DEV_FSL_CAAM_JR
1355 +
1356 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1357 int "Job Ring size"
1358 - depends on CRYPTO_DEV_FSL_CAAM_JR
1359 range 2 9
1360 default "9"
1361 help
1362 @@ -45,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1363
1364 config CRYPTO_DEV_FSL_CAAM_INTC
1365 bool "Job Ring interrupt coalescing"
1366 - depends on CRYPTO_DEV_FSL_CAAM_JR
1367 help
1368 Enable the Job Ring's interrupt coalescing feature.
1369
1370 @@ -75,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
1371
1372 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1373 tristate "Register algorithm implementations with the Crypto API"
1374 - depends on CRYPTO_DEV_FSL_CAAM_JR
1375 default y
1376 select CRYPTO_AEAD
1377 select CRYPTO_AUTHENC
1378 @@ -90,7 +100,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1379
1380 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1381 tristate "Queue Interface as Crypto API backend"
1382 - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
1383 + depends on FSL_SDK_DPA && NET
1384 default y
1385 select CRYPTO_AUTHENC
1386 select CRYPTO_BLKCIPHER
1387 @@ -107,7 +117,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1388
1389 config CRYPTO_DEV_FSL_CAAM_AHASH_API
1390 tristate "Register hash algorithm implementations with Crypto API"
1391 - depends on CRYPTO_DEV_FSL_CAAM_JR
1392 default y
1393 select CRYPTO_HASH
1394 help
1395 @@ -119,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
1396
1397 config CRYPTO_DEV_FSL_CAAM_PKC_API
1398 tristate "Register public key cryptography implementations with Crypto API"
1399 - depends on CRYPTO_DEV_FSL_CAAM_JR
1400 default y
1401 select CRYPTO_RSA
1402 help
1403 @@ -131,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
1404
1405 config CRYPTO_DEV_FSL_CAAM_RNG_API
1406 tristate "Register caam device for hwrng API"
1407 - depends on CRYPTO_DEV_FSL_CAAM_JR
1408 default y
1409 select CRYPTO_RNG
1410 select HW_RANDOM
1411 @@ -142,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
1412 To compile this as a module, choose M here: the module
1413 will be called caamrng.
1414
1415 -config CRYPTO_DEV_FSL_CAAM_DEBUG
1416 - bool "Enable debug output in CAAM driver"
1417 - depends on CRYPTO_DEV_FSL_CAAM
1418 - help
1419 - Selecting this will enable printing of various debug
1420 - information in the CAAM driver.
1421 +endif # CRYPTO_DEV_FSL_CAAM_JR
1422 +
1423 +endif # CRYPTO_DEV_FSL_CAAM
1424 +
1425 +config CRYPTO_DEV_FSL_DPAA2_CAAM
1426 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
1427 + depends on FSL_MC_DPIO
1428 + select CRYPTO_DEV_FSL_CAAM_COMMON
1429 + select CRYPTO_BLKCIPHER
1430 + select CRYPTO_AUTHENC
1431 + select CRYPTO_AEAD
1432 + select CRYPTO_HASH
1433 + ---help---
1434 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
1435 + It handles DPSECI DPAA2 objects that sit on the Management Complex
1436 + (MC) fsl-mc bus.
1437 +
1438 + To compile this as a module, choose M here: the module
1439 + will be called dpaa2_caam.
1440
1441 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1442 def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
1443 - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
1444 + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
1445 + CRYPTO_DEV_FSL_DPAA2_CAAM)
1446 +
1447 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1448 + def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
1449 + CRYPTO_DEV_FSL_DPAA2_CAAM)
1450 --- a/drivers/crypto/caam/Makefile
1451 +++ b/drivers/crypto/caam/Makefile
1452 @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
1453 ccflags-y := -DDEBUG
1454 endif
1455
1456 +ccflags-y += -DVERSION=\"\"
1457 +
1458 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
1459 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
1460 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
1461 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1462 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1463 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
1464 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1465 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
1466 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1467 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
1468
1469 caam-objs := ctrl.o
1470 -caam_jr-objs := jr.o key_gen.o error.o
1471 +caam_jr-objs := jr.o key_gen.o
1472 caam_pkc-y := caampkc.o pkc_desc.o
1473 ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
1474 ccflags-y += -DCONFIG_CAAM_QI
1475 caam-objs += qi.o
1476 endif
1477 +
1478 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
1479 +
1480 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
1481 --- a/drivers/crypto/caam/caamalg.c
1482 +++ b/drivers/crypto/caam/caamalg.c
1483 @@ -108,6 +108,7 @@ struct caam_ctx {
1484 dma_addr_t sh_desc_dec_dma;
1485 dma_addr_t sh_desc_givenc_dma;
1486 dma_addr_t key_dma;
1487 + enum dma_data_direction dir;
1488 struct device *jrdev;
1489 struct alginfo adata;
1490 struct alginfo cdata;
1491 @@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct
1492 {
1493 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1494 struct device *jrdev = ctx->jrdev;
1495 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1496 u32 *desc;
1497 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
1498 ctx->adata.keylen_pad;
1499 @@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct
1500
1501 /* aead_encrypt shared descriptor */
1502 desc = ctx->sh_desc_enc;
1503 - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
1504 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
1505 + ctrlpriv->era);
1506 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1507 - desc_bytes(desc), DMA_TO_DEVICE);
1508 + desc_bytes(desc), ctx->dir);
1509
1510 /*
1511 * Job Descriptor and Shared Descriptors
1512 @@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct
1513
1514 /* aead_decrypt shared descriptor */
1515 desc = ctx->sh_desc_dec;
1516 - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
1517 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
1518 + ctrlpriv->era);
1519 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1520 - desc_bytes(desc), DMA_TO_DEVICE);
1521 + desc_bytes(desc), ctx->dir);
1522
1523 return 0;
1524 }
1525 @@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypt
1526 unsigned int ivsize = crypto_aead_ivsize(aead);
1527 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1528 struct device *jrdev = ctx->jrdev;
1529 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1530 u32 ctx1_iv_off = 0;
1531 u32 *desc, *nonce = NULL;
1532 u32 inl_mask;
1533 @@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypt
1534 desc = ctx->sh_desc_enc;
1535 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
1536 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
1537 - false);
1538 + false, ctrlpriv->era);
1539 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1540 - desc_bytes(desc), DMA_TO_DEVICE);
1541 + desc_bytes(desc), ctx->dir);
1542
1543 skip_enc:
1544 /*
1545 @@ -266,9 +271,9 @@ skip_enc:
1546 desc = ctx->sh_desc_dec;
1547 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
1548 ctx->authsize, alg->caam.geniv, is_rfc3686,
1549 - nonce, ctx1_iv_off, false);
1550 + nonce, ctx1_iv_off, false, ctrlpriv->era);
1551 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1552 - desc_bytes(desc), DMA_TO_DEVICE);
1553 + desc_bytes(desc), ctx->dir);
1554
1555 if (!alg->caam.geniv)
1556 goto skip_givenc;
1557 @@ -300,9 +305,9 @@ skip_enc:
1558 desc = ctx->sh_desc_enc;
1559 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
1560 ctx->authsize, is_rfc3686, nonce,
1561 - ctx1_iv_off, false);
1562 + ctx1_iv_off, false, ctrlpriv->era);
1563 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1564 - desc_bytes(desc), DMA_TO_DEVICE);
1565 + desc_bytes(desc), ctx->dir);
1566
1567 skip_givenc:
1568 return 0;
1569 @@ -323,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto
1570 {
1571 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1572 struct device *jrdev = ctx->jrdev;
1573 + unsigned int ivsize = crypto_aead_ivsize(aead);
1574 u32 *desc;
1575 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1576 ctx->cdata.keylen;
1577 @@ -344,9 +350,9 @@ static int gcm_set_sh_desc(struct crypto
1578 }
1579
1580 desc = ctx->sh_desc_enc;
1581 - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
1582 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1583 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1584 - desc_bytes(desc), DMA_TO_DEVICE);
1585 + desc_bytes(desc), ctx->dir);
1586
1587 /*
1588 * Job Descriptor and Shared Descriptors
1589 @@ -361,9 +367,9 @@ static int gcm_set_sh_desc(struct crypto
1590 }
1591
1592 desc = ctx->sh_desc_dec;
1593 - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
1594 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1595 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1596 - desc_bytes(desc), DMA_TO_DEVICE);
1597 + desc_bytes(desc), ctx->dir);
1598
1599 return 0;
1600 }
1601 @@ -382,6 +388,7 @@ static int rfc4106_set_sh_desc(struct cr
1602 {
1603 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1604 struct device *jrdev = ctx->jrdev;
1605 + unsigned int ivsize = crypto_aead_ivsize(aead);
1606 u32 *desc;
1607 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1608 ctx->cdata.keylen;
1609 @@ -403,9 +410,10 @@ static int rfc4106_set_sh_desc(struct cr
1610 }
1611
1612 desc = ctx->sh_desc_enc;
1613 - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
1614 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1615 + false);
1616 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1617 - desc_bytes(desc), DMA_TO_DEVICE);
1618 + desc_bytes(desc), ctx->dir);
1619
1620 /*
1621 * Job Descriptor and Shared Descriptors
1622 @@ -420,9 +428,10 @@ static int rfc4106_set_sh_desc(struct cr
1623 }
1624
1625 desc = ctx->sh_desc_dec;
1626 - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
1627 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1628 + false);
1629 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1630 - desc_bytes(desc), DMA_TO_DEVICE);
1631 + desc_bytes(desc), ctx->dir);
1632
1633 return 0;
1634 }
1635 @@ -442,6 +451,7 @@ static int rfc4543_set_sh_desc(struct cr
1636 {
1637 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1638 struct device *jrdev = ctx->jrdev;
1639 + unsigned int ivsize = crypto_aead_ivsize(aead);
1640 u32 *desc;
1641 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1642 ctx->cdata.keylen;
1643 @@ -463,9 +473,10 @@ static int rfc4543_set_sh_desc(struct cr
1644 }
1645
1646 desc = ctx->sh_desc_enc;
1647 - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
1648 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1649 + false);
1650 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1651 - desc_bytes(desc), DMA_TO_DEVICE);
1652 + desc_bytes(desc), ctx->dir);
1653
1654 /*
1655 * Job Descriptor and Shared Descriptors
1656 @@ -480,9 +491,10 @@ static int rfc4543_set_sh_desc(struct cr
1657 }
1658
1659 desc = ctx->sh_desc_dec;
1660 - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
1661 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1662 + false);
1663 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1664 - desc_bytes(desc), DMA_TO_DEVICE);
1665 + desc_bytes(desc), ctx->dir);
1666
1667 return 0;
1668 }
1669 @@ -503,6 +515,7 @@ static int aead_setkey(struct crypto_aea
1670 {
1671 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1672 struct device *jrdev = ctx->jrdev;
1673 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1674 struct crypto_authenc_keys keys;
1675 int ret = 0;
1676
1677 @@ -517,6 +530,27 @@ static int aead_setkey(struct crypto_aea
1678 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1679 #endif
1680
1681 + /*
1682 + * If DKP is supported, use it in the shared descriptor to generate
1683 + * the split key.
1684 + */
1685 + if (ctrlpriv->era >= 6) {
1686 + ctx->adata.keylen = keys.authkeylen;
1687 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
1688 + OP_ALG_ALGSEL_MASK);
1689 +
1690 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1691 + goto badkey;
1692 +
1693 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
1694 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
1695 + keys.enckeylen);
1696 + dma_sync_single_for_device(jrdev, ctx->key_dma,
1697 + ctx->adata.keylen_pad +
1698 + keys.enckeylen, ctx->dir);
1699 + goto skip_split_key;
1700 + }
1701 +
1702 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
1703 keys.authkeylen, CAAM_MAX_KEY_SIZE -
1704 keys.enckeylen);
1705 @@ -527,12 +561,14 @@ static int aead_setkey(struct crypto_aea
1706 /* postpend encryption key to auth split key */
1707 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
1708 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
1709 - keys.enckeylen, DMA_TO_DEVICE);
1710 + keys.enckeylen, ctx->dir);
1711 #ifdef DEBUG
1712 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1713 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1714 ctx->adata.keylen_pad + keys.enckeylen, 1);
1715 #endif
1716 +
1717 +skip_split_key:
1718 ctx->cdata.keylen = keys.enckeylen;
1719 return aead_set_sh_desc(aead);
1720 badkey:
1721 @@ -552,7 +588,7 @@ static int gcm_setkey(struct crypto_aead
1722 #endif
1723
1724 memcpy(ctx->key, key, keylen);
1725 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
1726 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
1727 ctx->cdata.keylen = keylen;
1728
1729 return gcm_set_sh_desc(aead);
1730 @@ -580,7 +616,7 @@ static int rfc4106_setkey(struct crypto_
1731 */
1732 ctx->cdata.keylen = keylen - 4;
1733 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1734 - DMA_TO_DEVICE);
1735 + ctx->dir);
1736 return rfc4106_set_sh_desc(aead);
1737 }
1738
1739 @@ -606,7 +642,7 @@ static int rfc4543_setkey(struct crypto_
1740 */
1741 ctx->cdata.keylen = keylen - 4;
1742 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1743 - DMA_TO_DEVICE);
1744 + ctx->dir);
1745 return rfc4543_set_sh_desc(aead);
1746 }
1747
1748 @@ -658,21 +694,21 @@ static int ablkcipher_setkey(struct cryp
1749 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
1750 ctx1_iv_off);
1751 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1752 - desc_bytes(desc), DMA_TO_DEVICE);
1753 + desc_bytes(desc), ctx->dir);
1754
1755 /* ablkcipher_decrypt shared descriptor */
1756 desc = ctx->sh_desc_dec;
1757 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
1758 ctx1_iv_off);
1759 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1760 - desc_bytes(desc), DMA_TO_DEVICE);
1761 + desc_bytes(desc), ctx->dir);
1762
1763 /* ablkcipher_givencrypt shared descriptor */
1764 desc = ctx->sh_desc_givenc;
1765 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
1766 ctx1_iv_off);
1767 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
1768 - desc_bytes(desc), DMA_TO_DEVICE);
1769 + desc_bytes(desc), ctx->dir);
1770
1771 return 0;
1772 }
1773 @@ -701,13 +737,13 @@ static int xts_ablkcipher_setkey(struct
1774 desc = ctx->sh_desc_enc;
1775 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
1776 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1777 - desc_bytes(desc), DMA_TO_DEVICE);
1778 + desc_bytes(desc), ctx->dir);
1779
1780 /* xts_ablkcipher_decrypt shared descriptor */
1781 desc = ctx->sh_desc_dec;
1782 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
1783 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1784 - desc_bytes(desc), DMA_TO_DEVICE);
1785 + desc_bytes(desc), ctx->dir);
1786
1787 return 0;
1788 }
1789 @@ -987,9 +1023,6 @@ static void init_aead_job(struct aead_re
1790 append_seq_out_ptr(desc, dst_dma,
1791 req->assoclen + req->cryptlen - authsize,
1792 out_options);
1793 -
1794 - /* REG3 = assoclen */
1795 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1796 }
1797
1798 static void init_gcm_job(struct aead_request *req,
1799 @@ -1004,6 +1037,7 @@ static void init_gcm_job(struct aead_req
1800 unsigned int last;
1801
1802 init_aead_job(req, edesc, all_contig, encrypt);
1803 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1804
1805 /* BUG This should not be specific to generic GCM. */
1806 last = 0;
1807 @@ -1030,6 +1064,7 @@ static void init_authenc_job(struct aead
1808 struct caam_aead_alg, aead);
1809 unsigned int ivsize = crypto_aead_ivsize(aead);
1810 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1811 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1812 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1813 OP_ALG_AAI_CTR_MOD128);
1814 const bool is_rfc3686 = alg->caam.rfc3686;
1815 @@ -1053,6 +1088,15 @@ static void init_authenc_job(struct aead
1816
1817 init_aead_job(req, edesc, all_contig, encrypt);
1818
1819 + /*
1820 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1821 + * having DPOVRD as destination.
1822 + */
1823 + if (ctrlpriv->era < 3)
1824 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1825 + else
1826 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
1827 +
1828 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
1829 append_load_as_imm(desc, req->iv, ivsize,
1830 LDST_CLASS_1_CCB |
1831 @@ -3203,9 +3247,11 @@ struct caam_crypto_alg {
1832 struct caam_alg_entry caam;
1833 };
1834
1835 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
1836 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1837 + bool uses_dkp)
1838 {
1839 dma_addr_t dma_addr;
1840 + struct caam_drv_private *priv;
1841
1842 ctx->jrdev = caam_jr_alloc();
1843 if (IS_ERR(ctx->jrdev)) {
1844 @@ -3213,10 +3259,16 @@ static int caam_init_common(struct caam_
1845 return PTR_ERR(ctx->jrdev);
1846 }
1847
1848 + priv = dev_get_drvdata(ctx->jrdev->parent);
1849 + if (priv->era >= 6 && uses_dkp)
1850 + ctx->dir = DMA_BIDIRECTIONAL;
1851 + else
1852 + ctx->dir = DMA_TO_DEVICE;
1853 +
1854 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
1855 offsetof(struct caam_ctx,
1856 sh_desc_enc_dma),
1857 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1858 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1859 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
1860 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
1861 caam_jr_free(ctx->jrdev);
1862 @@ -3244,7 +3296,7 @@ static int caam_cra_init(struct crypto_t
1863 container_of(alg, struct caam_crypto_alg, crypto_alg);
1864 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
1865
1866 - return caam_init_common(ctx, &caam_alg->caam);
1867 + return caam_init_common(ctx, &caam_alg->caam, false);
1868 }
1869
1870 static int caam_aead_init(struct crypto_aead *tfm)
1871 @@ -3254,14 +3306,15 @@ static int caam_aead_init(struct crypto_
1872 container_of(alg, struct caam_aead_alg, aead);
1873 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
1874
1875 - return caam_init_common(ctx, &caam_alg->caam);
1876 + return caam_init_common(ctx, &caam_alg->caam,
1877 + alg->setkey == aead_setkey);
1878 }
1879
1880 static void caam_exit_common(struct caam_ctx *ctx)
1881 {
1882 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
1883 offsetof(struct caam_ctx, sh_desc_enc_dma),
1884 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1885 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1886 caam_jr_free(ctx->jrdev);
1887 }
1888
1889 --- a/drivers/crypto/caam/caamalg_desc.c
1890 +++ b/drivers/crypto/caam/caamalg_desc.c
1891 @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
1892 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
1893 * (non-protocol) with no (null) encryption.
1894 * @desc: pointer to buffer used for descriptor construction
1895 - * @adata: pointer to authentication transform definitions. Note that since a
1896 - * split key is to be used, the size of the split key itself is
1897 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
1898 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
1899 + * @adata: pointer to authentication transform definitions.
1900 + * A split key is required for SEC Era < 6; the size of the split key
1901 + * is specified in this case. Valid algorithm values - one of
1902 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
1903 + * with OP_ALG_AAI_HMAC_PRECOMP.
1904 * @icvsize: integrity check value (ICV) size (truncated or full)
1905 - *
1906 - * Note: Requires an MDHA split key.
1907 + * @era: SEC Era
1908 */
1909 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
1910 - unsigned int icvsize)
1911 + unsigned int icvsize, int era)
1912 {
1913 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
1914
1915 @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
1916 /* Skip if already shared */
1917 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1918 JUMP_COND_SHRD);
1919 - if (adata->key_inline)
1920 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
1921 - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
1922 - KEY_ENC);
1923 - else
1924 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
1925 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
1926 + if (era < 6) {
1927 + if (adata->key_inline)
1928 + append_key_as_imm(desc, adata->key_virt,
1929 + adata->keylen_pad, adata->keylen,
1930 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
1931 + KEY_ENC);
1932 + else
1933 + append_key(desc, adata->key_dma, adata->keylen,
1934 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
1935 + } else {
1936 + append_proto_dkp(desc, adata);
1937 + }
1938 set_jump_tgt_here(desc, key_jump_cmd);
1939
1940 /* assoclen + cryptlen = seqinlen */
1941 @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
1942 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
1943 * (non-protocol) with no (null) decryption.
1944 * @desc: pointer to buffer used for descriptor construction
1945 - * @adata: pointer to authentication transform definitions. Note that since a
1946 - * split key is to be used, the size of the split key itself is
1947 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
1948 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
1949 + * @adata: pointer to authentication transform definitions.
1950 + * A split key is required for SEC Era < 6; the size of the split key
1951 + * is specified in this case. Valid algorithm values - one of
1952 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
1953 + * with OP_ALG_AAI_HMAC_PRECOMP.
1954 * @icvsize: integrity check value (ICV) size (truncated or full)
1955 - *
1956 - * Note: Requires an MDHA split key.
1957 + * @era: SEC Era
1958 */
1959 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
1960 - unsigned int icvsize)
1961 + unsigned int icvsize, int era)
1962 {
1963 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
1964
1965 @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
1966 /* Skip if already shared */
1967 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
1968 JUMP_COND_SHRD);
1969 - if (adata->key_inline)
1970 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
1971 - adata->keylen, CLASS_2 |
1972 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
1973 - else
1974 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
1975 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
1976 + if (era < 6) {
1977 + if (adata->key_inline)
1978 + append_key_as_imm(desc, adata->key_virt,
1979 + adata->keylen_pad, adata->keylen,
1980 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
1981 + KEY_ENC);
1982 + else
1983 + append_key(desc, adata->key_dma, adata->keylen,
1984 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
1985 + } else {
1986 + append_proto_dkp(desc, adata);
1987 + }
1988 set_jump_tgt_here(desc, key_jump_cmd);
1989
1990 /* Class 2 operation */
1991 @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
1992 static void init_sh_desc_key_aead(u32 * const desc,
1993 struct alginfo * const cdata,
1994 struct alginfo * const adata,
1995 - const bool is_rfc3686, u32 *nonce)
1996 + const bool is_rfc3686, u32 *nonce, int era)
1997 {
1998 u32 *key_jump_cmd;
1999 unsigned int enckeylen = cdata->keylen;
2000 @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
2001 if (is_rfc3686)
2002 enckeylen -= CTR_RFC3686_NONCE_SIZE;
2003
2004 - if (adata->key_inline)
2005 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2006 - adata->keylen, CLASS_2 |
2007 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2008 - else
2009 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2010 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2011 + if (era < 6) {
2012 + if (adata->key_inline)
2013 + append_key_as_imm(desc, adata->key_virt,
2014 + adata->keylen_pad, adata->keylen,
2015 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2016 + KEY_ENC);
2017 + else
2018 + append_key(desc, adata->key_dma, adata->keylen,
2019 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2020 + } else {
2021 + append_proto_dkp(desc, adata);
2022 + }
2023
2024 if (cdata->key_inline)
2025 append_key_as_imm(desc, cdata->key_virt, enckeylen,
2026 @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
2027 * @cdata: pointer to block cipher transform definitions
2028 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2029 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2030 - * @adata: pointer to authentication transform definitions. Note that since a
2031 - * split key is to be used, the size of the split key itself is
2032 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2033 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2034 + * @adata: pointer to authentication transform definitions.
2035 + * A split key is required for SEC Era < 6; the size of the split key
2036 + * is specified in this case. Valid algorithm values - one of
2037 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2038 + * with OP_ALG_AAI_HMAC_PRECOMP.
2039 * @ivsize: initialization vector size
2040 * @icvsize: integrity check value (ICV) size (truncated or full)
2041 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2042 * @nonce: pointer to rfc3686 nonce
2043 * @ctx1_iv_off: IV offset in CONTEXT1 register
2044 * @is_qi: true when called from caam/qi
2045 - *
2046 - * Note: Requires an MDHA split key.
2047 + * @era: SEC Era
2048 */
2049 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2050 struct alginfo *adata, unsigned int ivsize,
2051 unsigned int icvsize, const bool is_rfc3686,
2052 - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
2053 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
2054 + int era)
2055 {
2056 /* Note: Context registers are saved. */
2057 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2058 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2059
2060 /* Class 2 operation */
2061 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2062 @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
2063 }
2064
2065 /* Read and write assoclen bytes */
2066 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2067 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2068 + if (is_qi || era < 3) {
2069 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2070 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2071 + } else {
2072 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2073 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2074 + }
2075
2076 /* Skip assoc data */
2077 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2078 @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
2079 * @cdata: pointer to block cipher transform definitions
2080 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2081 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2082 - * @adata: pointer to authentication transform definitions. Note that since a
2083 - * split key is to be used, the size of the split key itself is
2084 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2085 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2086 + * @adata: pointer to authentication transform definitions.
2087 + * A split key is required for SEC Era < 6; the size of the split key
2088 + * is specified in this case. Valid algorithm values - one of
2089 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2090 + * with OP_ALG_AAI_HMAC_PRECOMP.
2091 * @ivsize: initialization vector size
2092 * @icvsize: integrity check value (ICV) size (truncated or full)
2093 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2094 * @nonce: pointer to rfc3686 nonce
2095 * @ctx1_iv_off: IV offset in CONTEXT1 register
2096 * @is_qi: true when called from caam/qi
2097 - *
2098 - * Note: Requires an MDHA split key.
2099 + * @era: SEC Era
2100 */
2101 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2102 struct alginfo *adata, unsigned int ivsize,
2103 unsigned int icvsize, const bool geniv,
2104 const bool is_rfc3686, u32 *nonce,
2105 - const u32 ctx1_iv_off, const bool is_qi)
2106 + const u32 ctx1_iv_off, const bool is_qi, int era)
2107 {
2108 /* Note: Context registers are saved. */
2109 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2110 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2111
2112 /* Class 2 operation */
2113 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2114 @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
2115 }
2116
2117 /* Read and write assoclen bytes */
2118 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2119 - if (geniv)
2120 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
2121 - else
2122 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2123 + if (is_qi || era < 3) {
2124 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2125 + if (geniv)
2126 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
2127 + ivsize);
2128 + else
2129 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
2130 + CAAM_CMD_SZ);
2131 + } else {
2132 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2133 + if (geniv)
2134 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
2135 + ivsize);
2136 + else
2137 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
2138 + CAAM_CMD_SZ);
2139 + }
2140
2141 /* Skip assoc data */
2142 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2143 @@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
2144 * @cdata: pointer to block cipher transform definitions
2145 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2146 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2147 - * @adata: pointer to authentication transform definitions. Note that since a
2148 - * split key is to be used, the size of the split key itself is
2149 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2150 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2151 + * @adata: pointer to authentication transform definitions.
2152 + * A split key is required for SEC Era < 6; the size of the split key
2153 + * is specified in this case. Valid algorithm values - one of
2154 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2155 + * with OP_ALG_AAI_HMAC_PRECOMP.
2156 * @ivsize: initialization vector size
2157 * @icvsize: integrity check value (ICV) size (truncated or full)
2158 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2159 * @nonce: pointer to rfc3686 nonce
2160 * @ctx1_iv_off: IV offset in CONTEXT1 register
2161 * @is_qi: true when called from caam/qi
2162 - *
2163 - * Note: Requires an MDHA split key.
2164 + * @era: SEC Era
2165 */
2166 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2167 struct alginfo *adata, unsigned int ivsize,
2168 unsigned int icvsize, const bool is_rfc3686,
2169 u32 *nonce, const u32 ctx1_iv_off,
2170 - const bool is_qi)
2171 + const bool is_qi, int era)
2172 {
2173 u32 geniv, moveiv;
2174
2175 /* Note: Context registers are saved. */
2176 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2177 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2178
2179 if (is_qi) {
2180 u32 *wait_load_cmd;
2181 @@ -528,8 +561,13 @@ copy_iv:
2182 OP_ALG_ENCRYPT);
2183
2184 /* Read and write assoclen bytes */
2185 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2186 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2187 + if (is_qi || era < 3) {
2188 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2189 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2190 + } else {
2191 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2192 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2193 + }
2194
2195 /* Skip assoc data */
2196 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2197 @@ -583,14 +621,431 @@ copy_iv:
2198 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
2199
2200 /**
2201 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
2202 + * @desc: pointer to buffer used for descriptor construction
2203 + * @cdata: pointer to block cipher transform definitions
2204 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2205 + * with OP_ALG_AAI_CBC
2206 + * @adata: pointer to authentication transform definitions.
2207 + * A split key is required for SEC Era < 6; the size of the split key
2208 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2209 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2210 + * @assoclen: associated data length
2211 + * @ivsize: initialization vector size
2212 + * @authsize: authentication data size
2213 + * @blocksize: block cipher size
2214 + * @era: SEC Era
2215 + */
2216 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
2217 + struct alginfo *adata, unsigned int assoclen,
2218 + unsigned int ivsize, unsigned int authsize,
2219 + unsigned int blocksize, int era)
2220 +{
2221 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
2222 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
2223 +
2224 + /*
2225 + * Compute the index (in bytes) for the LOAD with destination of
2226 + * Class 1 Data Size Register and for the LOAD that generates padding
2227 + */
2228 + if (adata->key_inline) {
2229 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2230 + cdata->keylen - 4 * CAAM_CMD_SZ;
2231 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2232 + cdata->keylen - 2 * CAAM_CMD_SZ;
2233 + } else {
2234 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2235 + 4 * CAAM_CMD_SZ;
2236 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2237 + 2 * CAAM_CMD_SZ;
2238 + }
2239 +
2240 + stidx = 1 << HDR_START_IDX_SHIFT;
2241 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2242 +
2243 + /* skip key loading if they are loaded due to sharing */
2244 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2245 + JUMP_COND_SHRD);
2246 +
2247 + if (era < 6) {
2248 + if (adata->key_inline)
2249 + append_key_as_imm(desc, adata->key_virt,
2250 + adata->keylen_pad, adata->keylen,
2251 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2252 + KEY_ENC);
2253 + else
2254 + append_key(desc, adata->key_dma, adata->keylen,
2255 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2256 + } else {
2257 + append_proto_dkp(desc, adata);
2258 + }
2259 +
2260 + if (cdata->key_inline)
2261 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
2262 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
2263 + else
2264 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2265 + KEY_DEST_CLASS_REG);
2266 +
2267 + set_jump_tgt_here(desc, key_jump_cmd);
2268 +
2269 + /* class 2 operation */
2270 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2271 + OP_ALG_ENCRYPT);
2272 + /* class 1 operation */
2273 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2274 + OP_ALG_ENCRYPT);
2275 +
2276 + /* payloadlen = input data length - (assoclen + ivlen) */
2277 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
2278 +
2279 + /* math1 = payloadlen + icvlen */
2280 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
2281 +
2282 + /* padlen = block_size - math1 % block_size */
2283 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
2284 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
2285 +
2286 + /* cryptlen = payloadlen + icvlen + padlen */
2287 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
2288 +
2289 + /*
2290 + * update immediate data with the padding length value
2291 + * for the LOAD in the class 1 data size register.
2292 + */
2293 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2294 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
2295 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2296 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
2297 +
2298 + /* overwrite PL field for the padding iNFO FIFO entry */
2299 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2300 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
2301 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2302 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
2303 +
2304 + /* store encrypted payload, icv and padding */
2305 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2306 +
2307 + /* if payload length is zero, jump to zero-payload commands */
2308 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
2309 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2310 + JUMP_COND_MATH_Z);
2311 +
2312 + /* load iv in context1 */
2313 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2314 + LDST_CLASS_1_CCB | ivsize);
2315 +
2316 + /* read assoc for authentication */
2317 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2318 + FIFOLD_TYPE_MSG);
2319 + /* insnoop payload */
2320 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
2321 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
2322 +
2323 + /* jump the zero-payload commands */
2324 + append_jump(desc, JUMP_TEST_ALL | 3);
2325 +
2326 + /* zero-payload commands */
2327 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
2328 +
2329 + /* load iv in context1 */
2330 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2331 + LDST_CLASS_1_CCB | ivsize);
2332 +
2333 + /* assoc data is the only data for authentication */
2334 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2335 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
2336 +
2337 + /* send icv to encryption */
2338 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
2339 + authsize);
2340 +
2341 + /* update class 1 data size register with padding length */
2342 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
2343 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
2344 +
2345 + /* generate padding and send it to encryption */
2346 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
2347 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
2348 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
2349 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2350 +
2351 +#ifdef DEBUG
2352 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
2353 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
2354 + desc_bytes(desc), 1);
2355 +#endif
2356 +}
2357 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
2358 +
2359 +/**
2360 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
2361 + * @desc: pointer to buffer used for descriptor construction
2362 + * @cdata: pointer to block cipher transform definitions
2363 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2364 + * with OP_ALG_AAI_CBC
2365 + * @adata: pointer to authentication transform definitions.
2366 + * A split key is required for SEC Era < 6; the size of the split key
2367 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2368 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2369 + * @assoclen: associated data length
2370 + * @ivsize: initialization vector size
2371 + * @authsize: authentication data size
2372 + * @blocksize: block cipher size
2373 + * @era: SEC Era
2374 + */
2375 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
2376 + struct alginfo *adata, unsigned int assoclen,
2377 + unsigned int ivsize, unsigned int authsize,
2378 + unsigned int blocksize, int era)
2379 +{
2380 + u32 stidx, jumpback;
2381 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
2382 + /*
2383 + * Pointer Size bool determines the size of address pointers.
2384 + * false - Pointers fit in one 32-bit word.
2385 + * true - Pointers fit in two 32-bit words.
2386 + */
2387 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
2388 +
2389 + stidx = 1 << HDR_START_IDX_SHIFT;
2390 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2391 +
2392 + /* skip key loading if they are loaded due to sharing */
2393 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2394 + JUMP_COND_SHRD);
2395 +
2396 + if (era < 6)
2397 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2398 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
2399 + else
2400 + append_proto_dkp(desc, adata);
2401 +
2402 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2403 + KEY_DEST_CLASS_REG);
2404 +
2405 + set_jump_tgt_here(desc, key_jump_cmd);
2406 +
2407 + /* class 2 operation */
2408 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2409 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2410 + /* class 1 operation */
2411 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2412 + OP_ALG_DECRYPT);
2413 +
2414 + /* VSIL = input data length - 2 * block_size */
2415 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
2416 + blocksize);
2417 +
2418 + /*
2419 + * payloadlen + icvlen + padlen = input data length - (assoclen +
2420 + * ivsize)
2421 + */
2422 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
2423 +
2424 + /* skip data to the last but one cipher block */
2425 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
2426 +
2427 + /* load iv for the last cipher block */
2428 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2429 + LDST_CLASS_1_CCB | ivsize);
2430 +
2431 + /* read last cipher block */
2432 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2433 + FIFOLD_TYPE_LAST1 | blocksize);
2434 +
2435 + /* move decrypted block into math0 and math1 */
2436 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
2437 + blocksize);
2438 +
2439 + /* reset AES CHA */
2440 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
2441 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
2442 +
2443 + /* rewind input sequence */
2444 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
2445 +
2446 + /* key1 is in decryption form */
2447 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
2448 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
2449 +
2450 + /* load iv in context1 */
2451 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
2452 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
2453 +
2454 + /* read sequence number */
2455 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
2456 + /* load Type, Version and Len fields in math0 */
2457 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
2458 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
2459 +
2460 + /* compute (padlen - 1) */
2461 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
2462 +
2463 + /* math2 = icvlen + (padlen - 1) + 1 */
2464 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
2465 +
2466 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2467 +
2468 + /* VSOL = payloadlen + icvlen + padlen */
2469 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
2470 +
2471 + if (caam_little_end)
2472 + append_moveb(desc, MOVE_WAITCOMP |
2473 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
2474 +
2475 + /* update Len field */
2476 + append_math_sub(desc, REG0, REG0, REG2, 8);
2477 +
2478 + /* store decrypted payload, icv and padding */
2479 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2480 +
2481 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
2482 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2483 +
2484 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2485 + JUMP_COND_MATH_Z);
2486 +
2487 + /* send Type, Version and Len(pre ICV) fields to authentication */
2488 + append_move(desc, MOVE_WAITCOMP |
2489 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2490 + (3 << MOVE_OFFSET_SHIFT) | 5);
2491 +
2492 + /* outsnooping payload */
2493 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
2494 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
2495 + FIFOLDST_VLF);
2496 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
2497 +
2498 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
2499 + /* send Type, Version and Len(pre ICV) fields to authentication */
2500 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
2501 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2502 + (3 << MOVE_OFFSET_SHIFT) | 5);
2503 +
2504 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
2505 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
2506 +
2507 + /* load icvlen and padlen */
2508 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2509 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
2510 +
2511 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
2512 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2513 +
2514 + /*
2515 + * Start a new input sequence using the SEQ OUT PTR command options,
2516 + * pointer and length used when the current output sequence was defined.
2517 + */
2518 + if (ps) {
2519 + /*
2520 + * Move the lower 32 bits of Shared Descriptor address, the
2521 + * SEQ OUT PTR command, Output Pointer (2 words) and
2522 + * Output Length into math registers.
2523 + */
2524 + if (caam_little_end)
2525 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
2526 + MOVE_DEST_MATH0 |
2527 + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
2528 + else
2529 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
2530 + MOVE_DEST_MATH0 |
2531 + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
2532 +
2533 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
2534 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
2535 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
2536 + /* Append a JUMP command after the copied fields */
2537 + jumpback = CMD_JUMP | (char)-9;
2538 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
2539 + LDST_SRCDST_WORD_DECO_MATH2 |
2540 + (4 << LDST_OFFSET_SHIFT));
2541 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2542 + /* Move the updated fields back to the Job Descriptor */
2543 + if (caam_little_end)
2544 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
2545 + MOVE_DEST_DESCBUF |
2546 + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
2547 + else
2548 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
2549 + MOVE_DEST_DESCBUF |
2550 + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
2551 +
2552 + /*
2553 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
2554 + * and then jump back to the next command from the
2555 + * Shared Descriptor.
2556 + */
2557 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
2558 + } else {
2559 + /*
2560 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
2561 + * Output Length into math registers.
2562 + */
2563 + if (caam_little_end)
2564 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
2565 + MOVE_DEST_MATH0 |
2566 + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
2567 + else
2568 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
2569 + MOVE_DEST_MATH0 |
2570 + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
2571 +
2572 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
2573 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
2574 + ~(((u64)(CMD_SEQ_IN_PTR ^
2575 + CMD_SEQ_OUT_PTR)) << 32));
2576 + /* Append a JUMP command after the copied fields */
2577 + jumpback = CMD_JUMP | (char)-7;
2578 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
2579 + LDST_SRCDST_WORD_DECO_MATH1 |
2580 + (4 << LDST_OFFSET_SHIFT));
2581 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2582 + /* Move the updated fields back to the Job Descriptor */
2583 + if (caam_little_end)
2584 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
2585 + MOVE_DEST_DESCBUF |
2586 + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
2587 + else
2588 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
2589 + MOVE_DEST_DESCBUF |
2590 + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
2591 +
2592 + /*
2593 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
2594 + * and then jump back to the next command from the
2595 + * Shared Descriptor.
2596 + */
2597 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
2598 + }
2599 +
2600 + /* skip payload */
2601 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
2602 + /* check icv */
2603 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
2604 + FIFOLD_TYPE_LAST2 | authsize);
2605 +
2606 +#ifdef DEBUG
2607 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
2608 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
2609 + desc_bytes(desc), 1);
2610 +#endif
2611 +}
2612 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
2613 +
2614 +/**
2615 * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
2616 * @desc: pointer to buffer used for descriptor construction
2617 * @cdata: pointer to block cipher transform definitions
2618 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2619 + * @ivsize: initialization vector size
2620 * @icvsize: integrity check value (ICV) size (truncated or full)
2621 + * @is_qi: true when called from caam/qi
2622 */
2623 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
2624 - unsigned int icvsize)
2625 + unsigned int ivsize, unsigned int icvsize,
2626 + const bool is_qi)
2627 {
2628 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
2629 *zero_assoc_jump_cmd2;
2630 @@ -612,11 +1067,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
2631 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2632 OP_ALG_ENCRYPT);
2633
2634 + if (is_qi) {
2635 + u32 *wait_load_cmd;
2636 +
2637 + /* REG3 = assoclen */
2638 + append_seq_load(desc, 4, LDST_CLASS_DECO |
2639 + LDST_SRCDST_WORD_DECO_MATH3 |
2640 + (4 << LDST_OFFSET_SHIFT));
2641 +
2642 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2643 + JUMP_COND_CALM | JUMP_COND_NCP |
2644 + JUMP_COND_NOP | JUMP_COND_NIP |
2645 + JUMP_COND_NIFP);
2646 + set_jump_tgt_here(desc, wait_load_cmd);
2647 +
2648 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
2649 + ivsize);
2650 + } else {
2651 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
2652 + CAAM_CMD_SZ);
2653 + }
2654 +
2655 /* if assoclen + cryptlen is ZERO, skip to ICV write */
2656 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
2657 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
2658 JUMP_COND_MATH_Z);
2659
2660 + if (is_qi)
2661 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2662 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2663 +
2664 /* if assoclen is ZERO, skip reading the assoc data */
2665 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2666 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
2667 @@ -648,8 +1127,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
2668 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
2669 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
2670
2671 - /* jump the zero-payload commands */
2672 - append_jump(desc, JUMP_TEST_ALL | 2);
2673 + /* jump to ICV writing */
2674 + if (is_qi)
2675 + append_jump(desc, JUMP_TEST_ALL | 4);
2676 + else
2677 + append_jump(desc, JUMP_TEST_ALL | 2);
2678
2679 /* zero-payload commands */
2680 set_jump_tgt_here(desc, zero_payload_jump_cmd);
2681 @@ -657,10 +1139,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
2682 /* read assoc data */
2683 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
2684 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
2685 + if (is_qi)
2686 + /* jump to ICV writing */
2687 + append_jump(desc, JUMP_TEST_ALL | 2);
2688
2689 /* There is no input data */
2690 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
2691
2692 + if (is_qi)
2693 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2694 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
2695 + FIFOLD_TYPE_LAST1);
2696 +
2697 /* write ICV */
2698 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
2699 LDST_SRCDST_BYTE_CONTEXT);
2700 @@ -677,10 +1167,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
2701 * @desc: pointer to buffer used for descriptor construction
2702 * @cdata: pointer to block cipher transform definitions
2703 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2704 + * @ivsize: initialization vector size
2705 * @icvsize: integrity check value (ICV) size (truncated or full)
2706 + * @is_qi: true when called from caam/qi
2707 */
2708 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
2709 - unsigned int icvsize)
2710 + unsigned int ivsize, unsigned int icvsize,
2711 + const bool is_qi)
2712 {
2713 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
2714
2715 @@ -701,6 +1194,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
2716 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2717 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2718
2719 + if (is_qi) {
2720 + u32 *wait_load_cmd;
2721 +
2722 + /* REG3 = assoclen */
2723 + append_seq_load(desc, 4, LDST_CLASS_DECO |
2724 + LDST_SRCDST_WORD_DECO_MATH3 |
2725 + (4 << LDST_OFFSET_SHIFT));
2726 +
2727 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2728 + JUMP_COND_CALM | JUMP_COND_NCP |
2729 + JUMP_COND_NOP | JUMP_COND_NIP |
2730 + JUMP_COND_NIFP);
2731 + set_jump_tgt_here(desc, wait_load_cmd);
2732 +
2733 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2734 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2735 + }
2736 +
2737 /* if assoclen is ZERO, skip reading the assoc data */
2738 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2739 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
2740 @@ -753,10 +1264,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
2741 * @desc: pointer to buffer used for descriptor construction
2742 * @cdata: pointer to block cipher transform definitions
2743 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2744 + * @ivsize: initialization vector size
2745 * @icvsize: integrity check value (ICV) size (truncated or full)
2746 + * @is_qi: true when called from caam/qi
2747 */
2748 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
2749 - unsigned int icvsize)
2750 + unsigned int ivsize, unsigned int icvsize,
2751 + const bool is_qi)
2752 {
2753 u32 *key_jump_cmd;
2754
2755 @@ -777,7 +1291,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
2756 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2757 OP_ALG_ENCRYPT);
2758
2759 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
2760 + if (is_qi) {
2761 + u32 *wait_load_cmd;
2762 +
2763 + /* REG3 = assoclen */
2764 + append_seq_load(desc, 4, LDST_CLASS_DECO |
2765 + LDST_SRCDST_WORD_DECO_MATH3 |
2766 + (4 << LDST_OFFSET_SHIFT));
2767 +
2768 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2769 + JUMP_COND_CALM | JUMP_COND_NCP |
2770 + JUMP_COND_NOP | JUMP_COND_NIP |
2771 + JUMP_COND_NIFP);
2772 + set_jump_tgt_here(desc, wait_load_cmd);
2773 +
2774 + /* Read salt and IV */
2775 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
2776 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
2777 + FIFOLD_TYPE_IV);
2778 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2779 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2780 + }
2781 +
2782 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
2783 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2784
2785 /* Read assoc data */
2786 @@ -785,7 +1321,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
2787 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
2788
2789 /* Skip IV */
2790 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
2791 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
2792
2793 /* Will read cryptlen bytes */
2794 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
2795 @@ -824,10 +1360,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
2796 * @desc: pointer to buffer used for descriptor construction
2797 * @cdata: pointer to block cipher transform definitions
2798 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2799 + * @ivsize: initialization vector size
2800 * @icvsize: integrity check value (ICV) size (truncated or full)
2801 + * @is_qi: true when called from caam/qi
2802 */
2803 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
2804 - unsigned int icvsize)
2805 + unsigned int ivsize, unsigned int icvsize,
2806 + const bool is_qi)
2807 {
2808 u32 *key_jump_cmd;
2809
2810 @@ -849,7 +1388,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
2811 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2812 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2813
2814 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
2815 + if (is_qi) {
2816 + u32 *wait_load_cmd;
2817 +
2818 + /* REG3 = assoclen */
2819 + append_seq_load(desc, 4, LDST_CLASS_DECO |
2820 + LDST_SRCDST_WORD_DECO_MATH3 |
2821 + (4 << LDST_OFFSET_SHIFT));
2822 +
2823 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2824 + JUMP_COND_CALM | JUMP_COND_NCP |
2825 + JUMP_COND_NOP | JUMP_COND_NIP |
2826 + JUMP_COND_NIFP);
2827 + set_jump_tgt_here(desc, wait_load_cmd);
2828 +
2829 + /* Read salt and IV */
2830 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
2831 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
2832 + FIFOLD_TYPE_IV);
2833 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2834 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2835 + }
2836 +
2837 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
2838 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2839
2840 /* Read assoc data */
2841 @@ -857,7 +1418,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
2842 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
2843
2844 /* Skip IV */
2845 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
2846 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
2847
2848 /* Will read cryptlen bytes */
2849 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
2850 @@ -896,10 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
2851 * @desc: pointer to buffer used for descriptor construction
2852 * @cdata: pointer to block cipher transform definitions
2853 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2854 + * @ivsize: initialization vector size
2855 * @icvsize: integrity check value (ICV) size (truncated or full)
2856 + * @is_qi: true when called from caam/qi
2857 */
2858 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
2859 - unsigned int icvsize)
2860 + unsigned int ivsize, unsigned int icvsize,
2861 + const bool is_qi)
2862 {
2863 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2864
2865 @@ -920,6 +1484,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
2866 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2867 OP_ALG_ENCRYPT);
2868
2869 + if (is_qi) {
2870 + /* assoclen is not needed, skip it */
2871 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
2872 +
2873 + /* Read salt and IV */
2874 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
2875 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
2876 + FIFOLD_TYPE_IV);
2877 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2878 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2879 + }
2880 +
2881 /* assoclen + cryptlen = seqinlen */
2882 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
2883
2884 @@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
2885 * @desc: pointer to buffer used for descriptor construction
2886 * @cdata: pointer to block cipher transform definitions
2887 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
2888 + * @ivsize: initialization vector size
2889 * @icvsize: integrity check value (ICV) size (truncated or full)
2890 + * @is_qi: true when called from caam/qi
2891 */
2892 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
2893 - unsigned int icvsize)
2894 + unsigned int ivsize, unsigned int icvsize,
2895 + const bool is_qi)
2896 {
2897 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2898
2899 @@ -990,6 +1569,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
2900 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2901 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2902
2903 + if (is_qi) {
2904 + /* assoclen is not needed, skip it */
2905 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
2906 +
2907 + /* Read salt and IV */
2908 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
2909 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
2910 + FIFOLD_TYPE_IV);
2911 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
2912 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
2913 + }
2914 +
2915 /* assoclen + cryptlen = seqoutlen */
2916 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
2917
2918 @@ -1075,7 +1666,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
2919
2920 /* Load nonce into CONTEXT1 reg */
2921 if (is_rfc3686) {
2922 - u8 *nonce = cdata->key_virt + cdata->keylen;
2923 + const u8 *nonce = cdata->key_virt + cdata->keylen;
2924
2925 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
2926 LDST_CLASS_IND_CCB |
2927 @@ -1140,7 +1731,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
2928
2929 /* Load nonce into CONTEXT1 reg */
2930 if (is_rfc3686) {
2931 - u8 *nonce = cdata->key_virt + cdata->keylen;
2932 + const u8 *nonce = cdata->key_virt + cdata->keylen;
2933
2934 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
2935 LDST_CLASS_IND_CCB |
2936 @@ -1209,7 +1800,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
2937
2938 /* Load Nonce into CONTEXT1 reg */
2939 if (is_rfc3686) {
2940 - u8 *nonce = cdata->key_virt + cdata->keylen;
2941 + const u8 *nonce = cdata->key_virt + cdata->keylen;
2942
2943 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
2944 LDST_CLASS_IND_CCB |
2945 --- a/drivers/crypto/caam/caamalg_desc.h
2946 +++ b/drivers/crypto/caam/caamalg_desc.h
2947 @@ -17,6 +17,9 @@
2948 #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
2949 #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
2950
2951 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
2952 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
2953 +
2954 /* Note: Nonce is counted in cdata.keylen */
2955 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
2956
2957 @@ -27,14 +30,20 @@
2958 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
2959 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
2960 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
2961 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
2962 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
2963
2964 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
2965 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
2966 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
2967 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
2968 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
2969
2970 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
2971 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
2972 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
2973 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
2974 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
2975
2976 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
2977 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
2978 @@ -43,46 +52,62 @@
2979 15 * CAAM_CMD_SZ)
2980
2981 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
2982 - unsigned int icvsize);
2983 + unsigned int icvsize, int era);
2984
2985 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
2986 - unsigned int icvsize);
2987 + unsigned int icvsize, int era);
2988
2989 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2990 struct alginfo *adata, unsigned int ivsize,
2991 unsigned int icvsize, const bool is_rfc3686,
2992 u32 *nonce, const u32 ctx1_iv_off,
2993 - const bool is_qi);
2994 + const bool is_qi, int era);
2995
2996 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2997 struct alginfo *adata, unsigned int ivsize,
2998 unsigned int icvsize, const bool geniv,
2999 const bool is_rfc3686, u32 *nonce,
3000 - const u32 ctx1_iv_off, const bool is_qi);
3001 + const u32 ctx1_iv_off, const bool is_qi, int era);
3002
3003 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
3004 struct alginfo *adata, unsigned int ivsize,
3005 unsigned int icvsize, const bool is_rfc3686,
3006 u32 *nonce, const u32 ctx1_iv_off,
3007 - const bool is_qi);
3008 + const bool is_qi, int era);
3009 +
3010 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
3011 + struct alginfo *adata, unsigned int assoclen,
3012 + unsigned int ivsize, unsigned int authsize,
3013 + unsigned int blocksize, int era);
3014 +
3015 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
3016 + struct alginfo *adata, unsigned int assoclen,
3017 + unsigned int ivsize, unsigned int authsize,
3018 + unsigned int blocksize, int era);
3019
3020 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3021 - unsigned int icvsize);
3022 + unsigned int ivsize, unsigned int icvsize,
3023 + const bool is_qi);
3024
3025 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3026 - unsigned int icvsize);
3027 + unsigned int ivsize, unsigned int icvsize,
3028 + const bool is_qi);
3029
3030 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3031 - unsigned int icvsize);
3032 + unsigned int ivsize, unsigned int icvsize,
3033 + const bool is_qi);
3034
3035 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3036 - unsigned int icvsize);
3037 + unsigned int ivsize, unsigned int icvsize,
3038 + const bool is_qi);
3039
3040 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3041 - unsigned int icvsize);
3042 + unsigned int ivsize, unsigned int icvsize,
3043 + const bool is_qi);
3044
3045 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3046 - unsigned int icvsize);
3047 + unsigned int ivsize, unsigned int icvsize,
3048 + const bool is_qi);
3049
3050 void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
3051 unsigned int ivsize, const bool is_rfc3686,
3052 --- a/drivers/crypto/caam/caamalg_qi.c
3053 +++ b/drivers/crypto/caam/caamalg_qi.c
3054 @@ -7,7 +7,7 @@
3055 */
3056
3057 #include "compat.h"
3058 -
3059 +#include "ctrl.h"
3060 #include "regs.h"
3061 #include "intern.h"
3062 #include "desc_constr.h"
3063 @@ -53,6 +53,7 @@ struct caam_ctx {
3064 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
3065 u8 key[CAAM_MAX_KEY_SIZE];
3066 dma_addr_t key_dma;
3067 + enum dma_data_direction dir;
3068 struct alginfo adata;
3069 struct alginfo cdata;
3070 unsigned int authsize;
3071 @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
3072 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
3073 OP_ALG_AAI_CTR_MOD128);
3074 const bool is_rfc3686 = alg->caam.rfc3686;
3075 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3076
3077 if (!ctx->cdata.keylen || !ctx->authsize)
3078 return 0;
3079 @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
3080
3081 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3082 ivsize, ctx->authsize, is_rfc3686, nonce,
3083 - ctx1_iv_off, true);
3084 + ctx1_iv_off, true, ctrlpriv->era);
3085
3086 skip_enc:
3087 /* aead_decrypt shared descriptor */
3088 @@ -149,7 +151,8 @@ skip_enc:
3089
3090 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3091 ivsize, ctx->authsize, alg->caam.geniv,
3092 - is_rfc3686, nonce, ctx1_iv_off, true);
3093 + is_rfc3686, nonce, ctx1_iv_off, true,
3094 + ctrlpriv->era);
3095
3096 if (!alg->caam.geniv)
3097 goto skip_givenc;
3098 @@ -176,7 +179,7 @@ skip_enc:
3099
3100 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3101 ivsize, ctx->authsize, is_rfc3686, nonce,
3102 - ctx1_iv_off, true);
3103 + ctx1_iv_off, true, ctrlpriv->era);
3104
3105 skip_givenc:
3106 return 0;
3107 @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
3108 {
3109 struct caam_ctx *ctx = crypto_aead_ctx(aead);
3110 struct device *jrdev = ctx->jrdev;
3111 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3112 struct crypto_authenc_keys keys;
3113 int ret = 0;
3114
3115 @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
3116 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3117 #endif
3118
3119 + /*
3120 + * If DKP is supported, use it in the shared descriptor to generate
3121 + * the split key.
3122 + */
3123 + if (ctrlpriv->era >= 6) {
3124 + ctx->adata.keylen = keys.authkeylen;
3125 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3126 + OP_ALG_ALGSEL_MASK);
3127 +
3128 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3129 + goto badkey;
3130 +
3131 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3132 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3133 + keys.enckeylen);
3134 + dma_sync_single_for_device(jrdev, ctx->key_dma,
3135 + ctx->adata.keylen_pad +
3136 + keys.enckeylen, ctx->dir);
3137 + goto skip_split_key;
3138 + }
3139 +
3140 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3141 keys.authkeylen, CAAM_MAX_KEY_SIZE -
3142 keys.enckeylen);
3143 @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
3144 /* postpend encryption key to auth split key */
3145 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3146 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3147 - keys.enckeylen, DMA_TO_DEVICE);
3148 + keys.enckeylen, ctx->dir);
3149 #ifdef DEBUG
3150 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3151 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3152 ctx->adata.keylen_pad + keys.enckeylen, 1);
3153 #endif
3154
3155 +skip_split_key:
3156 ctx->cdata.keylen = keys.enckeylen;
3157
3158 ret = aead_set_sh_desc(aead);
3159 @@ -258,6 +284,468 @@ badkey:
3160 return -EINVAL;
3161 }
3162
3163 +static int tls_set_sh_desc(struct crypto_aead *tls)
3164 +{
3165 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3166 + unsigned int ivsize = crypto_aead_ivsize(tls);
3167 + unsigned int blocksize = crypto_aead_blocksize(tls);
3168 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
3169 + unsigned int data_len[2];
3170 + u32 inl_mask;
3171 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3172 +
3173 + if (!ctx->cdata.keylen || !ctx->authsize)
3174 + return 0;
3175 +
3176 + /*
3177 + * TLS 1.0 encrypt shared descriptor
3178 + * Job Descriptor and Shared Descriptor
3179 + * must fit into the 64-word Descriptor h/w Buffer
3180 + */
3181 + data_len[0] = ctx->adata.keylen_pad;
3182 + data_len[1] = ctx->cdata.keylen;
3183 +
3184 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
3185 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
3186 + return -EINVAL;
3187 +
3188 + if (inl_mask & 1)
3189 + ctx->adata.key_virt = ctx->key;
3190 + else
3191 + ctx->adata.key_dma = ctx->key_dma;
3192 +
3193 + if (inl_mask & 2)
3194 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
3195 + else
3196 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3197 +
3198 + ctx->adata.key_inline = !!(inl_mask & 1);
3199 + ctx->cdata.key_inline = !!(inl_mask & 2);
3200 +
3201 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3202 + assoclen, ivsize, ctx->authsize, blocksize,
3203 + ctrlpriv->era);
3204 +
3205 + /*
3206 + * TLS 1.0 decrypt shared descriptor
3207 + * Keys do not fit inline, regardless of algorithms used
3208 + */
3209 + ctx->adata.key_inline = false;
3210 + ctx->adata.key_dma = ctx->key_dma;
3211 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3212 +
3213 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3214 + assoclen, ivsize, ctx->authsize, blocksize,
3215 + ctrlpriv->era);
3216 +
3217 + return 0;
3218 +}
3219 +
3220 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
3221 +{
3222 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3223 +
3224 + ctx->authsize = authsize;
3225 + tls_set_sh_desc(tls);
3226 +
3227 + return 0;
3228 +}
3229 +
3230 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
3231 + unsigned int keylen)
3232 +{
3233 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3234 + struct device *jrdev = ctx->jrdev;
3235 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3236 + struct crypto_authenc_keys keys;
3237 + int ret = 0;
3238 +
3239 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3240 + goto badkey;
3241 +
3242 +#ifdef DEBUG
3243 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
3244 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
3245 + keys.authkeylen);
3246 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3247 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3248 +#endif
3249 +
3250 + /*
3251 + * If DKP is supported, use it in the shared descriptor to generate
3252 + * the split key.
3253 + */
3254 + if (ctrlpriv->era >= 6) {
3255 + ctx->adata.keylen = keys.authkeylen;
3256 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3257 + OP_ALG_ALGSEL_MASK);
3258 +
3259 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3260 + goto badkey;
3261 +
3262 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3263 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3264 + keys.enckeylen);
3265 + dma_sync_single_for_device(jrdev, ctx->key_dma,
3266 + ctx->adata.keylen_pad +
3267 + keys.enckeylen, ctx->dir);
3268 + goto skip_split_key;
3269 + }
3270 +
3271 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3272 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
3273 + keys.enckeylen);
3274 + if (ret)
3275 + goto badkey;
3276 +
3277 + /* postpend encryption key to auth split key */
3278 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3279 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3280 + keys.enckeylen, ctx->dir);
3281 +
3282 +#ifdef DEBUG
3283 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
3284 + ctx->adata.keylen, ctx->adata.keylen_pad);
3285 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3286 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3287 + ctx->adata.keylen_pad + keys.enckeylen, 1);
3288 +#endif
3289 +
3290 +skip_split_key:
3291 + ctx->cdata.keylen = keys.enckeylen;
3292 +
3293 + ret = tls_set_sh_desc(tls);
3294 + if (ret)
3295 + goto badkey;
3296 +
3297 + /* Now update the driver contexts with the new shared descriptor */
3298 + if (ctx->drv_ctx[ENCRYPT]) {
3299 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3300 + ctx->sh_desc_enc);
3301 + if (ret) {
3302 + dev_err(jrdev, "driver enc context update failed\n");
3303 + goto badkey;
3304 + }
3305 + }
3306 +
3307 + if (ctx->drv_ctx[DECRYPT]) {
3308 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3309 + ctx->sh_desc_dec);
3310 + if (ret) {
3311 + dev_err(jrdev, "driver dec context update failed\n");
3312 + goto badkey;
3313 + }
3314 + }
3315 +
3316 + return ret;
3317 +badkey:
3318 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
3319 + return -EINVAL;
3320 +}
3321 +
3322 +static int gcm_set_sh_desc(struct crypto_aead *aead)
3323 +{
3324 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3325 + unsigned int ivsize = crypto_aead_ivsize(aead);
3326 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
3327 + ctx->cdata.keylen;
3328 +
3329 + if (!ctx->cdata.keylen || !ctx->authsize)
3330 + return 0;
3331 +
3332 + /*
3333 + * Job Descriptor and Shared Descriptor
3334 + * must fit into the 64-word Descriptor h/w Buffer
3335 + */
3336 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
3337 + ctx->cdata.key_inline = true;
3338 + ctx->cdata.key_virt = ctx->key;
3339 + } else {
3340 + ctx->cdata.key_inline = false;
3341 + ctx->cdata.key_dma = ctx->key_dma;
3342 + }
3343 +
3344 + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
3345 + ctx->authsize, true);
3346 +
3347 + /*
3348 + * Job Descriptor and Shared Descriptor
3349 + * must fit into the 64-word Descriptor h/w Buffer
3350 + */
3351 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
3352 + ctx->cdata.key_inline = true;
3353 + ctx->cdata.key_virt = ctx->key;
3354 + } else {
3355 + ctx->cdata.key_inline = false;
3356 + ctx->cdata.key_dma = ctx->key_dma;
3357 + }
3358 +
3359 + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
3360 + ctx->authsize, true);
3361 +
3362 + return 0;
3363 +}
3364 +
3365 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
3366 +{
3367 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
3368 +
3369 + ctx->authsize = authsize;
3370 + gcm_set_sh_desc(authenc);
3371 +
3372 + return 0;
3373 +}
3374 +
3375 +static int gcm_setkey(struct crypto_aead *aead,
3376 + const u8 *key, unsigned int keylen)
3377 +{
3378 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
3379 + struct device *jrdev = ctx->jrdev;
3380 + int ret;
3381 +
3382 +#ifdef DEBUG
3383 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3384 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3385 +#endif
3386 +
3387 + memcpy(ctx->key, key, keylen);
3388 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
3389 + ctx->cdata.keylen = keylen;
3390 +
3391 + ret = gcm_set_sh_desc(aead);
3392 + if (ret)
3393 + return ret;
3394 +
3395 + /* Now update the driver contexts with the new shared descriptor */
3396 + if (ctx->drv_ctx[ENCRYPT]) {
3397 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3398 + ctx->sh_desc_enc);
3399 + if (ret) {
3400 + dev_err(jrdev, "driver enc context update failed\n");
3401 + return ret;
3402 + }
3403 + }