1 From ba8e92b322a3763880fdc4d19e9c7085f5504be7 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 23 Apr 2019 17:41:43 +0800
4 Subject: [PATCH] sec: support layerscape
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 This is an integrated patch of sec for layerscape
11 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
12 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
13 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 Signed-off-by: Carmen Iorga <carmen.iorga@nxp.com>
15 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
18 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
19 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
20 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
23 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
27 crypto/chacha20poly1305.c | 2 -
28 crypto/tcrypt.c | 27 +-
29 crypto/testmgr.c | 244 ++
30 crypto/testmgr.h | 219 ++
31 crypto/tls.c | 607 ++++
32 drivers/crypto/Makefile | 2 +-
33 drivers/crypto/caam/Kconfig | 85 +-
34 drivers/crypto/caam/Makefile | 26 +-
35 drivers/crypto/caam/caamalg.c | 468 +++-
36 drivers/crypto/caam/caamalg_desc.c | 903 +++++-
37 drivers/crypto/caam/caamalg_desc.h | 52 +-
38 drivers/crypto/caam/caamalg_qi.c | 1060 ++++++-
39 drivers/crypto/caam/caamalg_qi2.c | 5843 +++++++++++++++++++++++++++++++++++
40 drivers/crypto/caam/caamalg_qi2.h | 276 ++
41 drivers/crypto/caam/caamhash.c | 192 +-
42 drivers/crypto/caam/caamhash_desc.c | 108 +
43 drivers/crypto/caam/caamhash_desc.h | 49 +
44 drivers/crypto/caam/caampkc.c | 52 +-
45 drivers/crypto/caam/caamrng.c | 52 +-
46 drivers/crypto/caam/compat.h | 4 +
47 drivers/crypto/caam/ctrl.c | 194 +-
48 drivers/crypto/caam/desc.h | 89 +-
49 drivers/crypto/caam/desc_constr.h | 59 +-
50 drivers/crypto/caam/dpseci.c | 865 ++++++
51 drivers/crypto/caam/dpseci.h | 433 +++
52 drivers/crypto/caam/dpseci_cmd.h | 287 ++
53 drivers/crypto/caam/error.c | 81 +-
54 drivers/crypto/caam/error.h | 6 +-
55 drivers/crypto/caam/intern.h | 102 +-
56 drivers/crypto/caam/jr.c | 84 +
57 drivers/crypto/caam/jr.h | 2 +
58 drivers/crypto/caam/key_gen.c | 30 -
59 drivers/crypto/caam/key_gen.h | 30 +
60 drivers/crypto/caam/qi.c | 134 +-
61 drivers/crypto/caam/qi.h | 2 +-
62 drivers/crypto/caam/regs.h | 76 +-
63 drivers/crypto/caam/sg_sw_qm.h | 46 +-
64 drivers/crypto/talitos.c | 8 +
65 include/crypto/chacha20.h | 1 +
66 41 files changed, 12088 insertions(+), 733 deletions(-)
67 create mode 100644 crypto/tls.c
68 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
69 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
70 create mode 100644 drivers/crypto/caam/caamhash_desc.c
71 create mode 100644 drivers/crypto/caam/caamhash_desc.h
72 create mode 100644 drivers/crypto/caam/dpseci.c
73 create mode 100644 drivers/crypto/caam/dpseci.h
74 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
78 @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
79 a sequence number xored with a salt. This is the default
83 + tristate "TLS support"
85 + select CRYPTO_BLKCIPHER
86 + select CRYPTO_MANAGER
89 + select CRYPTO_AUTHENC
91 + Support for TLS 1.0 record encryption and decryption
93 + This module adds support for encryption/decryption of TLS 1.0 frames
94 + using blockcipher algorithms. The name of the resulting algorithm is
95 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
96 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
97 + accelerated versions will be used automatically if available.
99 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
100 + operations through AF_ALG or cryptodev interfaces
102 comment "Block modes"
105 --- a/crypto/Makefile
106 +++ b/crypto/Makefile
107 @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
108 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
109 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
110 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
111 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
112 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
113 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
114 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
115 --- a/crypto/chacha20poly1305.c
116 +++ b/crypto/chacha20poly1305.c
119 #include "internal.h"
121 -#define CHACHAPOLY_IV_SIZE 12
123 struct chachapoly_instance_ctx {
124 struct crypto_skcipher_spawn chacha;
125 struct crypto_ahash_spawn poly;
126 --- a/crypto/tcrypt.c
127 +++ b/crypto/tcrypt.c
128 @@ -76,7 +76,7 @@ static char *check[] = {
129 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
130 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
131 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
136 struct tcrypt_result {
137 @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
139 aead_request_set_ad(req, aad_size);
143 ret = test_aead_jiffies(req, enc, *b_size,
148 ret = test_aead_cycles(req, enc, *b_size);
152 pr_err("%s() failed return code=%d\n", e, ret);
153 @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
155 ahash_request_set_crypt(req, sg, output, speed[i].plen);
159 ret = test_ahash_jiffies(req, speed[i].blen,
160 speed[i].plen, output, secs);
164 ret = test_ahash_cycles(req, speed[i].blen,
165 speed[i].plen, output);
169 pr_err("hashing failed ret=%d\n", ret);
170 @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
172 skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
176 ret = test_acipher_jiffies(req, enc,
181 ret = test_acipher_cycles(req, enc,
186 pr_err("%s() failed flags=%x\n", e,
187 @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
188 ret += tcrypt_test("hmac(sha3-512)");
192 + ret += tcrypt_test("rsa");
196 ret += tcrypt_test("ansi_cprng");
198 @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
200 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
203 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
206 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
207 speed_template_16_24_32);
208 --- a/crypto/testmgr.c
209 +++ b/crypto/testmgr.c
210 @@ -117,6 +117,13 @@ struct drbg_test_suite {
214 +struct tls_test_suite {
216 + struct tls_testvec *vecs;
217 + unsigned int count;
221 struct akcipher_test_suite {
222 const struct akcipher_testvec *vecs;
224 @@ -140,6 +147,7 @@ struct alg_test_desc {
225 struct hash_test_suite hash;
226 struct cprng_test_suite cprng;
227 struct drbg_test_suite drbg;
228 + struct tls_test_suite tls;
229 struct akcipher_test_suite akcipher;
230 struct kpp_test_suite kpp;
232 @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
236 +static int __test_tls(struct crypto_aead *tfm, int enc,
237 + struct tls_testvec *template, unsigned int tcount,
238 + const bool diff_dst)
240 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
241 + unsigned int i, k, authsize;
243 + struct aead_request *req;
244 + struct scatterlist *sg;
245 + struct scatterlist *sgout;
247 + struct tcrypt_result result;
253 + char *xbuf[XBUFSIZE];
254 + char *xoutbuf[XBUFSIZE];
255 + char *axbuf[XBUFSIZE];
258 + if (testmgr_alloc_buf(xbuf))
261 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
264 + if (testmgr_alloc_buf(axbuf))
267 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
271 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
275 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
281 + d = diff_dst ? "-ddst" : "";
282 + e = enc ? "encryption" : "decryption";
284 + init_completion(&result.completion);
286 + req = aead_request_alloc(tfm, GFP_KERNEL);
288 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
293 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
294 + tcrypt_complete, &result);
296 + for (i = 0; i < tcount; i++) {
301 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
302 + template[i].alen > PAGE_SIZE))
305 + memcpy(assoc, template[i].assoc, template[i].alen);
306 + memcpy(input, template[i].input, template[i].ilen);
308 + if (template[i].iv)
309 + memcpy(iv, template[i].iv, MAX_IVLEN);
311 + memset(iv, 0, MAX_IVLEN);
313 + crypto_aead_clear_flags(tfm, ~0);
315 + if (template[i].klen > MAX_KEYLEN) {
316 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
317 + d, i, algo, template[i].klen, MAX_KEYLEN);
321 + memcpy(key, template[i].key, template[i].klen);
323 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
324 + if (!ret == template[i].fail) {
325 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
326 + d, i, algo, crypto_aead_get_flags(tfm));
332 + ret = crypto_aead_setauthsize(tfm, authsize);
334 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
335 + d, authsize, i, algo);
339 + k = !!template[i].alen;
340 + sg_init_table(sg, k + 1);
341 + sg_set_buf(&sg[0], assoc, template[i].alen);
342 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
343 + template[i].ilen));
347 + sg_init_table(sgout, k + 1);
348 + sg_set_buf(&sgout[0], assoc, template[i].alen);
350 + output = xoutbuf[0];
351 + sg_set_buf(&sgout[k], output,
352 + (enc ? template[i].rlen : template[i].ilen));
355 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
356 + template[i].ilen, iv);
358 + aead_request_set_ad(req, template[i].alen);
360 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
364 + if (template[i].novrfy) {
365 + /* verification was supposed to fail */
366 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
368 + /* so really, we got a bad message */
375 + wait_for_completion(&result.completion);
376 + reinit_completion(&result.completion);
381 + /* verification failure was expected */
382 + if (template[i].novrfy)
386 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
387 + d, e, i, algo, -ret);
392 + if (memcmp(q, template[i].result, template[i].rlen)) {
393 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
395 + hexdump(q, template[i].rlen);
396 + pr_err("should be:\n");
397 + hexdump(template[i].result, template[i].rlen);
404 + aead_request_free(req);
412 + testmgr_free_buf(axbuf);
415 + testmgr_free_buf(xoutbuf);
417 + testmgr_free_buf(xbuf);
422 +static int test_tls(struct crypto_aead *tfm, int enc,
423 + struct tls_testvec *template, unsigned int tcount)
426 + /* test 'dst == src' case */
427 + ret = __test_tls(tfm, enc, template, tcount, false);
430 + /* test 'dst != src' case */
431 + return __test_tls(tfm, enc, template, tcount, true);
434 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
435 + u32 type, u32 mask)
437 + struct crypto_aead *tfm;
440 + tfm = crypto_alloc_aead(driver, type, mask);
442 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
443 + driver, PTR_ERR(tfm));
444 + return PTR_ERR(tfm);
447 + if (desc->suite.tls.enc.vecs) {
448 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
449 + desc->suite.tls.enc.count);
454 + if (!err && desc->suite.tls.dec.vecs)
455 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
456 + desc->suite.tls.dec.count);
459 + crypto_free_aead(tfm);
463 static int test_cipher(struct crypto_cipher *tfm, int enc,
464 const struct cipher_testvec *template,
466 @@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
467 .hash = __VECS(tgr192_tv_template)
470 + .alg = "tls10(hmac(sha1),cbc(aes))",
471 + .test = alg_test_tls,
474 + .enc = __VECS(tls_enc_tv_template),
475 + .dec = __VECS(tls_dec_tv_template)
480 .test = alg_test_hash,
482 --- a/crypto/testmgr.h
483 +++ b/crypto/testmgr.h
484 @@ -125,6 +125,20 @@ struct drbg_testvec {
488 +struct tls_testvec {
489 + char *key; /* wrapped keys for encryption and authentication */
490 + char *iv; /* initialization vector */
491 + char *input; /* input data */
492 + char *assoc; /* associated data: seq num, type, version, input len */
493 + char *result; /* result data */
494 + unsigned char fail; /* the test failure is expected */
495 + unsigned char novrfy; /* dec verification failure expected */
496 + unsigned char klen; /* key length */
497 + unsigned short ilen; /* input data length */
498 + unsigned short alen; /* associated data length */
499 + unsigned short rlen; /* result length */
502 struct akcipher_testvec {
503 const unsigned char *key;
504 const unsigned char *m;
505 @@ -153,6 +167,211 @@ struct kpp_testvec {
506 static const char zeroed_string[48];
509 + * TLS1.0 synthetic test vectors
511 +static struct tls_testvec tls_enc_tv_template[] = {
513 +#ifdef __LITTLE_ENDIAN
514 + .key = "\x08\x00" /* rta length */
515 + "\x01\x00" /* rta type */
517 + .key = "\x00\x08" /* rta length */
518 + "\x00\x01" /* rta type */
520 + "\x00\x00\x00\x10" /* enc key length */
521 + "authenticationkey20benckeyis16_bytes",
522 + .klen = 8 + 20 + 16,
523 + .iv = "iv0123456789abcd",
524 + .input = "Single block msg",
526 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
527 + "\x00\x03\x01\x00\x10",
529 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
530 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
531 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
532 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
533 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
534 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
535 + .rlen = 16 + 20 + 12,
537 +#ifdef __LITTLE_ENDIAN
538 + .key = "\x08\x00" /* rta length */
539 + "\x01\x00" /* rta type */
541 + .key = "\x00\x08" /* rta length */
542 + "\x00\x01" /* rta type */
544 + "\x00\x00\x00\x10" /* enc key length */
545 + "authenticationkey20benckeyis16_bytes",
546 + .klen = 8 + 20 + 16,
547 + .iv = "iv0123456789abcd",
550 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
551 + "\x00\x03\x01\x00\x00",
553 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
554 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
555 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
556 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
559 +#ifdef __LITTLE_ENDIAN
560 + .key = "\x08\x00" /* rta length */
561 + "\x01\x00" /* rta type */
563 + .key = "\x00\x08" /* rta length */
564 + "\x00\x01" /* rta type */
566 + "\x00\x00\x00\x10" /* enc key length */
567 + "authenticationkey20benckeyis16_bytes",
568 + .klen = 8 + 20 + 16,
569 + .iv = "iv0123456789abcd",
570 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
571 + " plaintext285 bytes plaintext285 bytes plaintext285"
572 + " bytes plaintext285 bytes plaintext285 bytes"
573 + " plaintext285 bytes plaintext285 bytes plaintext285"
574 + " bytes plaintext285 bytes plaintext285 bytes"
575 + " plaintext285 bytes plaintext285 bytes plaintext285"
576 + " bytes plaintext285 bytes plaintext",
578 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
579 + "\x00\x03\x01\x01\x1d",
581 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
582 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
583 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
584 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
585 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
586 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
587 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
588 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
589 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
590 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
591 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
592 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
593 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
594 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
595 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
596 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
597 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
598 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
599 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
600 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
601 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
602 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
603 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
604 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
605 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
606 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
607 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
608 + .rlen = 285 + 20 + 15,
612 +static struct tls_testvec tls_dec_tv_template[] = {
614 +#ifdef __LITTLE_ENDIAN
615 + .key = "\x08\x00" /* rta length */
616 + "\x01\x00" /* rta type */
618 + .key = "\x00\x08" /* rta length */
619 + "\x00\x01" /* rta type */
621 + "\x00\x00\x00\x10" /* enc key length */
622 + "authenticationkey20benckeyis16_bytes",
623 + .klen = 8 + 20 + 16,
624 + .iv = "iv0123456789abcd",
625 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
626 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
627 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
628 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
629 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
630 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
631 + .ilen = 16 + 20 + 12,
632 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
633 + "\x00\x03\x01\x00\x30",
635 + .result = "Single block msg",
638 +#ifdef __LITTLE_ENDIAN
639 + .key = "\x08\x00" /* rta length */
640 + "\x01\x00" /* rta type */
642 + .key = "\x00\x08" /* rta length */
643 + "\x00\x01" /* rta type */
645 + "\x00\x00\x00\x10" /* enc key length */
646 + "authenticationkey20benckeyis16_bytes",
647 + .klen = 8 + 20 + 16,
648 + .iv = "iv0123456789abcd",
649 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
650 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
651 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
652 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
654 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
655 + "\x00\x03\x01\x00\x20",
660 +#ifdef __LITTLE_ENDIAN
661 + .key = "\x08\x00" /* rta length */
662 + "\x01\x00" /* rta type */
664 + .key = "\x00\x08" /* rta length */
665 + "\x00\x01" /* rta type */
667 + "\x00\x00\x00\x10" /* enc key length */
668 + "authenticationkey20benckeyis16_bytes",
669 + .klen = 8 + 20 + 16,
670 + .iv = "iv0123456789abcd",
671 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
672 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
673 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
674 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
675 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
676 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
677 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
678 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
679 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
680 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
681 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
682 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
683 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
684 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
685 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
686 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
687 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
688 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
689 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
690 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
691 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
692 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
693 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
694 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
695 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
696 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
697 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
699 + .ilen = 285 + 20 + 15,
700 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
701 + "\x00\x03\x01\x01\x40",
703 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
704 + " plaintext285 bytes plaintext285 bytes plaintext285"
705 + " bytes plaintext285 bytes plaintext285 bytes"
706 + " plaintext285 bytes plaintext285 bytes plaintext285"
707 + " bytes plaintext285 bytes plaintext285 bytes"
708 + " plaintext285 bytes plaintext285 bytes plaintext",
714 * RSA test vectors. Borrowed from openSSL.
716 static const struct akcipher_testvec rsa_tv_template[] = {
721 + * Copyright 2013 Freescale Semiconductor, Inc.
722 + * Copyright 2017 NXP Semiconductor, Inc.
724 + * This program is free software; you can redistribute it and/or modify it
725 + * under the terms of the GNU General Public License as published by the Free
726 + * Software Foundation; either version 2 of the License, or (at your option)
727 + * any later version.
731 +#include <crypto/internal/aead.h>
732 +#include <crypto/internal/hash.h>
733 +#include <crypto/internal/skcipher.h>
734 +#include <crypto/authenc.h>
735 +#include <crypto/null.h>
736 +#include <crypto/scatterwalk.h>
737 +#include <linux/err.h>
738 +#include <linux/init.h>
739 +#include <linux/module.h>
740 +#include <linux/rtnetlink.h>
742 +struct tls_instance_ctx {
743 + struct crypto_ahash_spawn auth;
744 + struct crypto_skcipher_spawn enc;
747 +struct crypto_tls_ctx {
748 + unsigned int reqoff;
749 + struct crypto_ahash *auth;
750 + struct crypto_skcipher *enc;
751 + struct crypto_skcipher *null;
754 +struct tls_request_ctx {
756 + * cryptlen holds the payload length in the case of encryption or
757 + * payload_len + icv_len + padding_len in case of decryption
759 + unsigned int cryptlen;
760 + /* working space for partial results */
761 + struct scatterlist tmp[2];
762 + struct scatterlist cipher[2];
763 + struct scatterlist dst[2];
768 + struct completion completion;
772 +static void tls_async_op_done(struct crypto_async_request *req, int err)
774 + struct async_op *areq = req->data;
776 + if (err == -EINPROGRESS)
780 + complete(&areq->completion);
783 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
784 + unsigned int keylen)
786 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
787 + struct crypto_ahash *auth = ctx->auth;
788 + struct crypto_skcipher *enc = ctx->enc;
789 + struct crypto_authenc_keys keys;
792 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
795 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
796 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
797 + CRYPTO_TFM_REQ_MASK);
798 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
799 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
800 + CRYPTO_TFM_RES_MASK);
805 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
806 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
807 + CRYPTO_TFM_REQ_MASK);
808 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
809 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
810 + CRYPTO_TFM_RES_MASK);
816 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
821 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
822 + * @hash: (output) buffer to save the digest into
823 + * @src: (input) scatterlist with the assoc and payload data
824 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
825 + * @req: (input) aead request
827 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
828 + unsigned int srclen, struct aead_request *req)
830 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
831 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
832 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
833 + struct async_op ahash_op;
834 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
835 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
836 + int err = -EBADMSG;
838 + /* Bail out if the request assoc len is 0 */
839 + if (!req->assoclen)
842 + init_completion(&ahash_op.completion);
844 + /* the hash transform to be executed comes from the original request */
845 + ahash_request_set_tfm(ahreq, ctx->auth);
846 + /* prepare the hash request with input data and result pointer */
847 + ahash_request_set_crypt(ahreq, src, hash, srclen);
848 + /* set the notifier for when the async hash function returns */
849 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
850 + tls_async_op_done, &ahash_op);
852 + /* Calculate the digest on the given data. The result is put in hash */
853 + err = crypto_ahash_digest(ahreq);
854 + if (err == -EINPROGRESS) {
855 + err = wait_for_completion_interruptible(&ahash_op.completion);
857 + err = ahash_op.err;
864 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
865 + * @hash: (output) buffer to save the digest and padding into
866 + * @phashlen: (output) the size of digest + padding
867 + * @req: (input) aead request
869 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
870 + struct aead_request *req)
872 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
873 + unsigned int hash_size = crypto_aead_authsize(tls);
874 + unsigned int block_size = crypto_aead_blocksize(tls);
875 + unsigned int srclen = req->cryptlen + hash_size;
876 + unsigned int icvlen = req->cryptlen + req->assoclen;
877 + unsigned int padlen;
880 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
884 + /* add padding after digest */
885 + padlen = block_size - (srclen % block_size);
886 + memset(hash + hash_size, padlen - 1, padlen);
888 + *phashlen = hash_size + padlen;
893 +static int crypto_tls_copy_data(struct aead_request *req,
894 + struct scatterlist *src,
895 + struct scatterlist *dst,
898 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
899 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
900 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
902 + skcipher_request_set_tfm(skreq, ctx->null);
903 + skcipher_request_set_callback(skreq, aead_request_flags(req),
905 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
907 + return crypto_skcipher_encrypt(skreq);
910 +static int crypto_tls_encrypt(struct aead_request *req)
912 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
913 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
914 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
915 + struct skcipher_request *skreq;
916 + struct scatterlist *cipher = treq_ctx->cipher;
917 + struct scatterlist *tmp = treq_ctx->tmp;
918 + struct scatterlist *sg, *src, *dst;
919 + unsigned int cryptlen, phashlen;
920 + u8 *hash = treq_ctx->tail;
924 + * The hash result is saved at the beginning of the tls request ctx
925 + * and is aligned as required by the hash transform. Enough space was
926 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
927 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
928 + * the result is not overwritten by the second (cipher) request.
930 + hash = (u8 *)ALIGN((unsigned long)hash +
931 + crypto_ahash_alignmask(ctx->auth),
932 + crypto_ahash_alignmask(ctx->auth) + 1);
935 + * STEP 1: create ICV together with necessary padding
937 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
942 + * STEP 2: Hash and padding are combined with the payload
943 + * depending on the form it arrives. Scatter tables must have at least
944 + * one page of data before chaining with another table and can't have
945 + * an empty data page. The following code addresses these requirements.
947 + * If the payload is empty, only the hash is encrypted, otherwise the
948 + * payload scatterlist is merged with the hash. A special merging case
949 + * is when the payload has only one page of data. In that case the
950 + * payload page is moved to another scatterlist and prepared there for
953 + if (req->cryptlen) {
954 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
956 + sg_init_table(cipher, 2);
957 + sg_set_buf(cipher + 1, hash, phashlen);
959 + if (sg_is_last(src)) {
960 + sg_set_page(cipher, sg_page(src), req->cryptlen,
964 + unsigned int rem_len = req->cryptlen;
966 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
967 + rem_len -= min(rem_len, sg->length);
969 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
970 + sg_chain(sg, 1, cipher);
973 + sg_init_one(cipher, hash, phashlen);
978 + * If src != dst copy the associated data from source to destination.
979 + * In both cases fast-forward passed the associated data in the dest.
981 + if (req->src != req->dst) {
982 + err = crypto_tls_copy_data(req, req->src, req->dst,
987 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
990 + * STEP 3: encrypt the frame and return the result
992 + cryptlen = req->cryptlen + phashlen;
995 + * The hash and the cipher are applied at different times and their
996 + * requests can use the same memory space without interference
998 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
999 + skcipher_request_set_tfm(skreq, ctx->enc);
1000 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1001 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1002 + req->base.complete, req->base.data);
1004 + * Apply the cipher transform. The result will be in req->dst when the
1005 + * asynchronuous call terminates
1007 + err = crypto_skcipher_encrypt(skreq);
1012 +static int crypto_tls_decrypt(struct aead_request *req)
1014 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
1015 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
1016 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
1017 + unsigned int cryptlen = req->cryptlen;
1018 + unsigned int hash_size = crypto_aead_authsize(tls);
1019 + unsigned int block_size = crypto_aead_blocksize(tls);
1020 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
1021 + struct scatterlist *tmp = treq_ctx->tmp;
1022 + struct scatterlist *src, *dst;
1024 + u8 padding[255]; /* padding can be 0-255 bytes */
1027 + u8 *ihash, *hash = treq_ctx->tail;
1030 + int err = -EINVAL;
1032 + struct async_op ciph_op;
1035 + * Rule out bad packets. The input packet length must be at least one
1036 + * byte more than the hash_size
1038 + if (cryptlen <= hash_size || cryptlen % block_size)
1042 + * Step 1 - Decrypt the source. Fast-forward past the associated data
1043 + * to the encrypted data. The result will be overwritten in place so
1044 + * that the decrypted data will be adjacent to the associated data. The
1045 + * last step (computing the hash) will have it's input data already
1046 + * prepared and ready to be accessed at req->src.
1048 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
1051 + init_completion(&ciph_op.completion);
1052 + skcipher_request_set_tfm(skreq, ctx->enc);
1053 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1054 + tls_async_op_done, &ciph_op);
1055 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1056 + err = crypto_skcipher_decrypt(skreq);
1057 + if (err == -EINPROGRESS) {
1058 + err = wait_for_completion_interruptible(&ciph_op.completion);
1060 + err = ciph_op.err;
1066 + * Step 2 - Verify padding
1067 + * Retrieve the last byte of the payload; this is the padding size.
1070 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
1072 + /* RFC recommendation for invalid padding size. */
1073 + if (cryptlen < pad_size + hash_size) {
1075 + paderr = -EBADMSG;
1077 + cryptlen -= pad_size;
1078 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
1080 + /* Padding content must be equal with pad_size. We verify it all */
1081 + for (i = 0; i < pad_size; i++)
1082 + if (padding[i] != pad_size)
1083 + paderr = -EBADMSG;
1086 + * Step 3 - Verify hash
1087 + * Align the digest result as required by the hash transform. Enough
1088 + * space was allocated in crypto_tls_init_tfm
1090 + hash = (u8 *)ALIGN((unsigned long)hash +
1091 + crypto_ahash_alignmask(ctx->auth),
1092 + crypto_ahash_alignmask(ctx->auth) + 1);
1094 + * Two bytes at the end of the associated data make the length field.
1095 + * It must be updated with the length of the cleartext message before
1096 + * the hash is calculated.
1098 + len_field = sg_virt(req->src) + req->assoclen - 2;
1099 + cryptlen -= hash_size;
1100 + *len_field = htons(cryptlen);
1102 + /* This is the hash from the decrypted packet. Save it for later */
1103 + ihash = hash + hash_size;
1104 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
1106 + /* Now compute and compare our ICV with the one from the packet */
1107 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
1109 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
1111 + if (req->src != req->dst) {
1112 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
1118 + /* return the first found error */
1123 + aead_request_complete(req, err);
1127 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
1129 + struct aead_instance *inst = aead_alg_instance(tfm);
1130 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
1131 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1132 + struct crypto_ahash *auth;
1133 + struct crypto_skcipher *enc;
1134 + struct crypto_skcipher *null;
1137 + auth = crypto_spawn_ahash(&ictx->auth);
1139 + return PTR_ERR(auth);
1141 + enc = crypto_spawn_skcipher(&ictx->enc);
1142 + err = PTR_ERR(enc);
1144 + goto err_free_ahash;
1146 + null = crypto_get_default_null_skcipher2();
1147 + err = PTR_ERR(null);
1149 + goto err_free_skcipher;
1156 + * Allow enough space for two digests. The two digests will be compared
1157 + * during the decryption phase. One will come from the decrypted packet
1158 + * and the other will be calculated. For encryption, one digest is
1159 + * padded (up to a cipher blocksize) and chained with the payload
1161 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
1162 + crypto_ahash_alignmask(auth),
1163 + crypto_ahash_alignmask(auth) + 1) +
1164 + max(crypto_ahash_digestsize(auth),
1165 + crypto_skcipher_blocksize(enc));
1167 + crypto_aead_set_reqsize(tfm,
1168 + sizeof(struct tls_request_ctx) +
1170 + max_t(unsigned int,
1171 + crypto_ahash_reqsize(auth) +
1172 + sizeof(struct ahash_request),
1173 + crypto_skcipher_reqsize(enc) +
1174 + sizeof(struct skcipher_request)));
1179 + crypto_free_skcipher(enc);
1181 + crypto_free_ahash(auth);
1185 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
1187 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1189 + crypto_free_ahash(ctx->auth);
1190 + crypto_free_skcipher(ctx->enc);
1191 + crypto_put_default_null_skcipher2();
1194 +static void crypto_tls_free(struct aead_instance *inst)
1196 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
1198 + crypto_drop_skcipher(&ctx->enc);
1199 + crypto_drop_ahash(&ctx->auth);
1203 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
1205 + struct crypto_attr_type *algt;
1206 + struct aead_instance *inst;
1207 + struct hash_alg_common *auth;
1208 + struct crypto_alg *auth_base;
1209 + struct skcipher_alg *enc;
1210 + struct tls_instance_ctx *ctx;
1211 + const char *enc_name;
1214 + algt = crypto_get_attr_type(tb);
1216 + return PTR_ERR(algt);
1218 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
1221 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
1222 + CRYPTO_ALG_TYPE_AHASH_MASK |
1223 + crypto_requires_sync(algt->type, algt->mask));
1225 + return PTR_ERR(auth);
1227 + auth_base = &auth->base;
1229 + enc_name = crypto_attr_alg_name(tb[2]);
1230 + err = PTR_ERR(enc_name);
1231 + if (IS_ERR(enc_name))
1232 + goto out_put_auth;
1234 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1237 + goto out_put_auth;
1239 + ctx = aead_instance_ctx(inst);
1241 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
1242 + aead_crypto_instance(inst));
1244 + goto err_free_inst;
1246 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
1247 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
1248 + crypto_requires_sync(algt->type,
1251 + goto err_drop_auth;
1253 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
1255 + err = -ENAMETOOLONG;
1256 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
1257 + "tls10(%s,%s)", auth_base->cra_name,
1258 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
1259 + goto err_drop_enc;
1261 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1262 + "tls10(%s,%s)", auth_base->cra_driver_name,
1263 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
1264 + goto err_drop_enc;
1266 + inst->alg.base.cra_flags = (auth_base->cra_flags |
1267 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
1268 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
1269 + auth_base->cra_priority;
1270 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
1271 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
1272 + enc->base.cra_alignmask;
1273 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
1275 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
1276 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
1277 + inst->alg.maxauthsize = auth->digestsize;
1279 + inst->alg.init = crypto_tls_init_tfm;
1280 + inst->alg.exit = crypto_tls_exit_tfm;
1282 + inst->alg.setkey = crypto_tls_setkey;
1283 + inst->alg.encrypt = crypto_tls_encrypt;
1284 + inst->alg.decrypt = crypto_tls_decrypt;
1286 + inst->free = crypto_tls_free;
1288 + err = aead_register_instance(tmpl, inst);
1290 + goto err_drop_enc;
1293 + crypto_mod_put(auth_base);
1297 + crypto_drop_skcipher(&ctx->enc);
1299 + crypto_drop_ahash(&ctx->auth);
1306 +static struct crypto_template crypto_tls_tmpl = {
1308 + .create = crypto_tls_create,
1309 + .module = THIS_MODULE,
1312 +static int __init crypto_tls_module_init(void)
1314 + return crypto_register_template(&crypto_tls_tmpl);
1317 +static void __exit crypto_tls_module_exit(void)
1319 + crypto_unregister_template(&crypto_tls_tmpl);
1322 +module_init(crypto_tls_module_init);
1323 +module_exit(crypto_tls_module_exit);
1325 +MODULE_LICENSE("GPL");
1326 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
1327 --- a/drivers/crypto/Makefile
1328 +++ b/drivers/crypto/Makefile
1329 @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
1330 obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
1331 obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
1332 obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
1333 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
1334 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
1335 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
1336 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
1337 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
1338 --- a/drivers/crypto/caam/Kconfig
1339 +++ b/drivers/crypto/caam/Kconfig
1341 +config CRYPTO_DEV_FSL_CAAM_COMMON
1344 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1347 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1350 config CRYPTO_DEV_FSL_CAAM
1351 - tristate "Freescale CAAM-Multicore driver backend"
1352 + tristate "Freescale CAAM-Multicore platform driver backend"
1353 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
1355 + select CRYPTO_DEV_FSL_CAAM_COMMON
1357 Enables the driver module for Freescale's Cryptographic Accelerator
1358 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
1359 @@ -12,9 +22,16 @@ config CRYPTO_DEV_FSL_CAAM
1360 To compile this driver as a module, choose M here: the module
1361 will be called caam.
1363 +if CRYPTO_DEV_FSL_CAAM
1365 +config CRYPTO_DEV_FSL_CAAM_DEBUG
1366 + bool "Enable debug output in CAAM driver"
1368 + Selecting this will enable printing of various debug
1369 + information in the CAAM driver.
1371 config CRYPTO_DEV_FSL_CAAM_JR
1372 tristate "Freescale CAAM Job Ring driver backend"
1373 - depends on CRYPTO_DEV_FSL_CAAM
1376 Enables the driver module for Job Rings which are part of
1377 @@ -25,9 +42,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
1378 To compile this driver as a module, choose M here: the module
1379 will be called caam_jr.
1381 +if CRYPTO_DEV_FSL_CAAM_JR
1383 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1385 - depends on CRYPTO_DEV_FSL_CAAM_JR
1389 @@ -45,7 +63,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1391 config CRYPTO_DEV_FSL_CAAM_INTC
1392 bool "Job Ring interrupt coalescing"
1393 - depends on CRYPTO_DEV_FSL_CAAM_JR
1395 Enable the Job Ring's interrupt coalescing feature.
1397 @@ -74,9 +91,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
1398 threshold. Range is 1-65535.
1400 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1401 - tristate "Register algorithm implementations with the Crypto API"
1402 - depends on CRYPTO_DEV_FSL_CAAM_JR
1403 + bool "Register algorithm implementations with the Crypto API"
1405 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1407 select CRYPTO_AUTHENC
1408 select CRYPTO_BLKCIPHER
1409 @@ -85,13 +102,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1410 scatterlist crypto API (such as the linux native IPSec
1411 stack) to the SEC4 via job ring.
1413 - To compile this as a module, choose M here: the module
1414 - will be called caamalg.
1416 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1417 - tristate "Queue Interface as Crypto API backend"
1418 - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
1419 + bool "Queue Interface as Crypto API backend"
1420 + depends on FSL_SDK_DPA && NET
1422 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1423 select CRYPTO_AUTHENC
1424 select CRYPTO_BLKCIPHER
1426 @@ -102,36 +117,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1427 assigned to the kernel should also be more than the number of
1430 - To compile this as a module, choose M here: the module
1431 - will be called caamalg_qi.
1433 config CRYPTO_DEV_FSL_CAAM_AHASH_API
1434 - tristate "Register hash algorithm implementations with Crypto API"
1435 - depends on CRYPTO_DEV_FSL_CAAM_JR
1436 + bool "Register hash algorithm implementations with Crypto API"
1438 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1441 Selecting this will offload ahash for users of the
1442 scatterlist crypto API to the SEC4 via job ring.
1444 - To compile this as a module, choose M here: the module
1445 - will be called caamhash.
1447 config CRYPTO_DEV_FSL_CAAM_PKC_API
1448 - tristate "Register public key cryptography implementations with Crypto API"
1449 - depends on CRYPTO_DEV_FSL_CAAM_JR
1450 + bool "Register public key cryptography implementations with Crypto API"
1454 Selecting this will allow SEC Public key support for RSA.
1455 Supported cryptographic primitives: encryption, decryption,
1456 signature and verification.
1457 - To compile this as a module, choose M here: the module
1458 - will be called caam_pkc.
1460 config CRYPTO_DEV_FSL_CAAM_RNG_API
1461 - tristate "Register caam device for hwrng API"
1462 - depends on CRYPTO_DEV_FSL_CAAM_JR
1463 + bool "Register caam device for hwrng API"
1467 @@ -139,16 +144,24 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
1468 Selecting this will register the SEC4 hardware rng to
1469 the hw_random API for suppying the kernel entropy pool.
1471 - To compile this as a module, choose M here: the module
1472 - will be called caamrng.
1473 +endif # CRYPTO_DEV_FSL_CAAM_JR
1475 -config CRYPTO_DEV_FSL_CAAM_DEBUG
1476 - bool "Enable debug output in CAAM driver"
1477 - depends on CRYPTO_DEV_FSL_CAAM
1479 - Selecting this will enable printing of various debug
1480 - information in the CAAM driver.
1481 +endif # CRYPTO_DEV_FSL_CAAM
1483 -config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1484 - def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
1485 - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
1486 +config CRYPTO_DEV_FSL_DPAA2_CAAM
1487 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
1488 + depends on FSL_MC_DPIO
1489 + select CRYPTO_DEV_FSL_CAAM_COMMON
1490 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1491 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1492 + select CRYPTO_BLKCIPHER
1493 + select CRYPTO_AUTHENC
1494 + select CRYPTO_AEAD
1495 + select CRYPTO_HASH
1497 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
1498 + It handles DPSECI DPAA2 objects that sit on the Management Complex
1501 + To compile this as a module, choose M here: the module
1502 + will be called dpaa2_caam.
1503 --- a/drivers/crypto/caam/Makefile
1504 +++ b/drivers/crypto/caam/Makefile
1505 @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
1506 ccflags-y := -DDEBUG
1509 +ccflags-y += -DVERSION=\"\"
1511 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
1512 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
1513 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
1514 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1515 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1516 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
1517 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1518 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1519 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
1521 -caam-objs := ctrl.o
1522 -caam_jr-objs := jr.o key_gen.o error.o
1523 -caam_pkc-y := caampkc.o pkc_desc.o
1524 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
1527 +caam_jr-y := jr.o key_gen.o
1528 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1529 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1530 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1531 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1532 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
1534 +caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
1535 ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
1536 ccflags-y += -DCONFIG_CAAM_QI
1540 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
1542 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
1543 --- a/drivers/crypto/caam/caamalg.c
1544 +++ b/drivers/crypto/caam/caamalg.c
1546 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
1549 +#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
1551 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
1552 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
1554 @@ -108,6 +110,7 @@ struct caam_ctx {
1555 dma_addr_t sh_desc_dec_dma;
1556 dma_addr_t sh_desc_givenc_dma;
1558 + enum dma_data_direction dir;
1559 struct device *jrdev;
1560 struct alginfo adata;
1561 struct alginfo cdata;
1562 @@ -118,6 +121,7 @@ static int aead_null_set_sh_desc(struct
1564 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1565 struct device *jrdev = ctx->jrdev;
1566 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1568 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
1569 ctx->adata.keylen_pad;
1570 @@ -136,9 +140,10 @@ static int aead_null_set_sh_desc(struct
1572 /* aead_encrypt shared descriptor */
1573 desc = ctx->sh_desc_enc;
1574 - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
1575 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
1577 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1578 - desc_bytes(desc), DMA_TO_DEVICE);
1579 + desc_bytes(desc), ctx->dir);
1582 * Job Descriptor and Shared Descriptors
1583 @@ -154,9 +159,10 @@ static int aead_null_set_sh_desc(struct
1585 /* aead_decrypt shared descriptor */
1586 desc = ctx->sh_desc_dec;
1587 - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
1588 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
1590 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1591 - desc_bytes(desc), DMA_TO_DEVICE);
1592 + desc_bytes(desc), ctx->dir);
1596 @@ -168,6 +174,7 @@ static int aead_set_sh_desc(struct crypt
1597 unsigned int ivsize = crypto_aead_ivsize(aead);
1598 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1599 struct device *jrdev = ctx->jrdev;
1600 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1601 u32 ctx1_iv_off = 0;
1602 u32 *desc, *nonce = NULL;
1604 @@ -234,9 +241,9 @@ static int aead_set_sh_desc(struct crypt
1605 desc = ctx->sh_desc_enc;
1606 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
1607 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
1609 + false, ctrlpriv->era);
1610 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1611 - desc_bytes(desc), DMA_TO_DEVICE);
1612 + desc_bytes(desc), ctx->dir);
1616 @@ -266,9 +273,9 @@ skip_enc:
1617 desc = ctx->sh_desc_dec;
1618 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
1619 ctx->authsize, alg->caam.geniv, is_rfc3686,
1620 - nonce, ctx1_iv_off, false);
1621 + nonce, ctx1_iv_off, false, ctrlpriv->era);
1622 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1623 - desc_bytes(desc), DMA_TO_DEVICE);
1624 + desc_bytes(desc), ctx->dir);
1626 if (!alg->caam.geniv)
1628 @@ -300,9 +307,9 @@ skip_enc:
1629 desc = ctx->sh_desc_enc;
1630 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
1631 ctx->authsize, is_rfc3686, nonce,
1632 - ctx1_iv_off, false);
1633 + ctx1_iv_off, false, ctrlpriv->era);
1634 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1635 - desc_bytes(desc), DMA_TO_DEVICE);
1636 + desc_bytes(desc), ctx->dir);
1640 @@ -323,6 +330,7 @@ static int gcm_set_sh_desc(struct crypto
1642 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1643 struct device *jrdev = ctx->jrdev;
1644 + unsigned int ivsize = crypto_aead_ivsize(aead);
1646 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1648 @@ -344,9 +352,9 @@ static int gcm_set_sh_desc(struct crypto
1651 desc = ctx->sh_desc_enc;
1652 - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
1653 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1654 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1655 - desc_bytes(desc), DMA_TO_DEVICE);
1656 + desc_bytes(desc), ctx->dir);
1659 * Job Descriptor and Shared Descriptors
1660 @@ -361,9 +369,9 @@ static int gcm_set_sh_desc(struct crypto
1663 desc = ctx->sh_desc_dec;
1664 - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
1665 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1666 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1667 - desc_bytes(desc), DMA_TO_DEVICE);
1668 + desc_bytes(desc), ctx->dir);
1672 @@ -382,6 +390,7 @@ static int rfc4106_set_sh_desc(struct cr
1674 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1675 struct device *jrdev = ctx->jrdev;
1676 + unsigned int ivsize = crypto_aead_ivsize(aead);
1678 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1680 @@ -403,9 +412,10 @@ static int rfc4106_set_sh_desc(struct cr
1683 desc = ctx->sh_desc_enc;
1684 - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
1685 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1687 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1688 - desc_bytes(desc), DMA_TO_DEVICE);
1689 + desc_bytes(desc), ctx->dir);
1692 * Job Descriptor and Shared Descriptors
1693 @@ -420,9 +430,10 @@ static int rfc4106_set_sh_desc(struct cr
1696 desc = ctx->sh_desc_dec;
1697 - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
1698 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1700 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1701 - desc_bytes(desc), DMA_TO_DEVICE);
1702 + desc_bytes(desc), ctx->dir);
1706 @@ -442,6 +453,7 @@ static int rfc4543_set_sh_desc(struct cr
1708 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1709 struct device *jrdev = ctx->jrdev;
1710 + unsigned int ivsize = crypto_aead_ivsize(aead);
1712 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1714 @@ -463,9 +475,10 @@ static int rfc4543_set_sh_desc(struct cr
1717 desc = ctx->sh_desc_enc;
1718 - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
1719 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1721 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1722 - desc_bytes(desc), DMA_TO_DEVICE);
1723 + desc_bytes(desc), ctx->dir);
1726 * Job Descriptor and Shared Descriptors
1727 @@ -480,9 +493,10 @@ static int rfc4543_set_sh_desc(struct cr
1730 desc = ctx->sh_desc_dec;
1731 - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
1732 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1734 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1735 - desc_bytes(desc), DMA_TO_DEVICE);
1736 + desc_bytes(desc), ctx->dir);
1740 @@ -498,11 +512,67 @@ static int rfc4543_setauthsize(struct cr
1744 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
1746 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1747 + struct device *jrdev = ctx->jrdev;
1748 + unsigned int ivsize = crypto_aead_ivsize(aead);
1751 + if (!ctx->cdata.keylen || !ctx->authsize)
1754 + desc = ctx->sh_desc_enc;
1755 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1756 + ctx->authsize, true, false);
1757 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1758 + desc_bytes(desc), ctx->dir);
1760 + desc = ctx->sh_desc_dec;
1761 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1762 + ctx->authsize, false, false);
1763 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1764 + desc_bytes(desc), ctx->dir);
1769 +static int chachapoly_setauthsize(struct crypto_aead *aead,
1770 + unsigned int authsize)
1772 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1774 + if (authsize != POLY1305_DIGEST_SIZE)
1777 + ctx->authsize = authsize;
1778 + return chachapoly_set_sh_desc(aead);
1781 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
1782 + unsigned int keylen)
1784 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1785 + unsigned int ivsize = crypto_aead_ivsize(aead);
1786 + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
1788 + if (keylen != CHACHA20_KEY_SIZE + saltlen) {
1789 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1793 + ctx->cdata.key_virt = key;
1794 + ctx->cdata.keylen = keylen - saltlen;
1796 + return chachapoly_set_sh_desc(aead);
1799 static int aead_setkey(struct crypto_aead *aead,
1800 const u8 *key, unsigned int keylen)
1802 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1803 struct device *jrdev = ctx->jrdev;
1804 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1805 struct crypto_authenc_keys keys;
1808 @@ -517,6 +587,27 @@ static int aead_setkey(struct crypto_aea
1809 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1813 + * If DKP is supported, use it in the shared descriptor to generate
1816 + if (ctrlpriv->era >= 6) {
1817 + ctx->adata.keylen = keys.authkeylen;
1818 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
1819 + OP_ALG_ALGSEL_MASK);
1821 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1824 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
1825 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
1827 + dma_sync_single_for_device(jrdev, ctx->key_dma,
1828 + ctx->adata.keylen_pad +
1829 + keys.enckeylen, ctx->dir);
1830 + goto skip_split_key;
1833 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
1834 keys.authkeylen, CAAM_MAX_KEY_SIZE -
1836 @@ -527,12 +618,14 @@ static int aead_setkey(struct crypto_aea
1837 /* postpend encryption key to auth split key */
1838 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
1839 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
1840 - keys.enckeylen, DMA_TO_DEVICE);
1841 + keys.enckeylen, ctx->dir);
1843 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1844 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1845 ctx->adata.keylen_pad + keys.enckeylen, 1);
1849 ctx->cdata.keylen = keys.enckeylen;
1850 return aead_set_sh_desc(aead);
1852 @@ -552,7 +645,7 @@ static int gcm_setkey(struct crypto_aead
1855 memcpy(ctx->key, key, keylen);
1856 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
1857 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
1858 ctx->cdata.keylen = keylen;
1860 return gcm_set_sh_desc(aead);
1861 @@ -580,7 +673,7 @@ static int rfc4106_setkey(struct crypto_
1863 ctx->cdata.keylen = keylen - 4;
1864 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1867 return rfc4106_set_sh_desc(aead);
1870 @@ -606,7 +699,7 @@ static int rfc4543_setkey(struct crypto_
1872 ctx->cdata.keylen = keylen - 4;
1873 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1876 return rfc4543_set_sh_desc(aead);
1879 @@ -658,21 +751,21 @@ static int ablkcipher_setkey(struct cryp
1880 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
1882 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1883 - desc_bytes(desc), DMA_TO_DEVICE);
1884 + desc_bytes(desc), ctx->dir);
1886 /* ablkcipher_decrypt shared descriptor */
1887 desc = ctx->sh_desc_dec;
1888 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
1890 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1891 - desc_bytes(desc), DMA_TO_DEVICE);
1892 + desc_bytes(desc), ctx->dir);
1894 /* ablkcipher_givencrypt shared descriptor */
1895 desc = ctx->sh_desc_givenc;
1896 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
1898 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
1899 - desc_bytes(desc), DMA_TO_DEVICE);
1900 + desc_bytes(desc), ctx->dir);
1904 @@ -701,13 +794,13 @@ static int xts_ablkcipher_setkey(struct
1905 desc = ctx->sh_desc_enc;
1906 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
1907 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1908 - desc_bytes(desc), DMA_TO_DEVICE);
1909 + desc_bytes(desc), ctx->dir);
1911 /* xts_ablkcipher_decrypt shared descriptor */
1912 desc = ctx->sh_desc_dec;
1913 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
1914 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1915 - desc_bytes(desc), DMA_TO_DEVICE);
1916 + desc_bytes(desc), ctx->dir);
1920 @@ -989,9 +1082,6 @@ static void init_aead_job(struct aead_re
1921 append_seq_out_ptr(desc, dst_dma,
1922 req->assoclen + req->cryptlen - authsize,
1925 - /* REG3 = assoclen */
1926 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1929 static void init_gcm_job(struct aead_request *req,
1930 @@ -1006,6 +1096,7 @@ static void init_gcm_job(struct aead_req
1933 init_aead_job(req, edesc, all_contig, encrypt);
1934 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1936 /* BUG This should not be specific to generic GCM. */
1938 @@ -1023,6 +1114,40 @@ static void init_gcm_job(struct aead_req
1939 /* End of blank commands */
1942 +static void init_chachapoly_job(struct aead_request *req,
1943 + struct aead_edesc *edesc, bool all_contig,
1946 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
1947 + unsigned int ivsize = crypto_aead_ivsize(aead);
1948 + unsigned int assoclen = req->assoclen;
1949 + u32 *desc = edesc->hw_desc;
1950 + u32 ctx_iv_off = 4;
1952 + init_aead_job(req, edesc, all_contig, encrypt);
1954 + if (ivsize != CHACHAPOLY_IV_SIZE) {
1955 + /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1959 + * The associated data comes already with the IV but we need
1960 + * to skip it when we authenticate or encrypt...
1962 + assoclen -= ivsize;
1965 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1968 + * For IPsec load the IV further in the same register.
1969 + * For RFC7539 simply load the 12 bytes nonce in a single operation
1971 + append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1972 + LDST_SRCDST_BYTE_CONTEXT |
1973 + ctx_iv_off << LDST_OFFSET_SHIFT);
1976 static void init_authenc_job(struct aead_request *req,
1977 struct aead_edesc *edesc,
1978 bool all_contig, bool encrypt)
1979 @@ -1032,6 +1157,7 @@ static void init_authenc_job(struct aead
1980 struct caam_aead_alg, aead);
1981 unsigned int ivsize = crypto_aead_ivsize(aead);
1982 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1983 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1984 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1985 OP_ALG_AAI_CTR_MOD128);
1986 const bool is_rfc3686 = alg->caam.rfc3686;
1987 @@ -1055,6 +1181,15 @@ static void init_authenc_job(struct aead
1989 init_aead_job(req, edesc, all_contig, encrypt);
1992 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1993 + * having DPOVRD as destination.
1995 + if (ctrlpriv->era < 3)
1996 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1998 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
2000 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2001 append_load_as_imm(desc, req->iv, ivsize,
2003 @@ -1227,8 +1362,16 @@ static struct aead_edesc *aead_edesc_all
2008 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2009 + * the end of the table by allocating more S/G entries.
2011 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
2012 - sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2013 + if (mapped_dst_nents > 1)
2014 + sec4_sg_len += ALIGN(mapped_dst_nents, 4);
2016 + sec4_sg_len = ALIGN(sec4_sg_len, 4);
2018 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2020 /* allocate space for base edesc and hw desc commands, link tables */
2021 @@ -1309,6 +1452,72 @@ static int gcm_encrypt(struct aead_reque
2025 +static int chachapoly_encrypt(struct aead_request *req)
2027 + struct aead_edesc *edesc;
2028 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
2029 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
2030 + struct device *jrdev = ctx->jrdev;
2035 + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2037 + if (IS_ERR(edesc))
2038 + return PTR_ERR(edesc);
2040 + desc = edesc->hw_desc;
2042 + init_chachapoly_job(req, edesc, all_contig, true);
2043 + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2044 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2047 + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2049 + ret = -EINPROGRESS;
2051 + aead_unmap(jrdev, edesc, req);
2058 +static int chachapoly_decrypt(struct aead_request *req)
2060 + struct aead_edesc *edesc;
2061 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
2062 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
2063 + struct device *jrdev = ctx->jrdev;
2068 + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2070 + if (IS_ERR(edesc))
2071 + return PTR_ERR(edesc);
2073 + desc = edesc->hw_desc;
2075 + init_chachapoly_job(req, edesc, all_contig, false);
2076 + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2077 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2080 + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2082 + ret = -EINPROGRESS;
2084 + aead_unmap(jrdev, edesc, req);
2091 static int ipsec_gcm_encrypt(struct aead_request *req)
2093 if (req->assoclen < 8)
2094 @@ -1496,7 +1705,25 @@ static struct ablkcipher_edesc *ablkciph
2096 sec4_sg_ents = 1 + mapped_src_nents;
2097 dst_sg_idx = sec4_sg_ents;
2098 - sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2101 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2102 + * the end of the table by allocating more S/G entries. Logic:
2103 + * if (src != dst && output S/G)
2104 + * pad output S/G, if needed
2105 + * else if (src == dst && S/G)
2106 + * overlapping S/Gs; pad one of them
2107 + * else if (input S/G) ...
2108 + * pad input S/G, if needed
2110 + if (mapped_dst_nents > 1)
2111 + sec4_sg_ents += ALIGN(mapped_dst_nents, 4);
2112 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
2113 + sec4_sg_ents = max(ALIGN(sec4_sg_ents, 4),
2114 + 1 + ALIGN(mapped_src_nents, 4));
2116 + sec4_sg_ents = ALIGN(sec4_sg_ents, 4);
2118 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
2121 @@ -3199,6 +3426,50 @@ static struct caam_aead_alg driver_aeads
2128 + .cra_name = "rfc7539(chacha20,poly1305)",
2129 + .cra_driver_name = "rfc7539-chacha20-poly1305-"
2131 + .cra_blocksize = 1,
2133 + .setkey = chachapoly_setkey,
2134 + .setauthsize = chachapoly_setauthsize,
2135 + .encrypt = chachapoly_encrypt,
2136 + .decrypt = chachapoly_decrypt,
2137 + .ivsize = CHACHAPOLY_IV_SIZE,
2138 + .maxauthsize = POLY1305_DIGEST_SIZE,
2141 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2143 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2150 + .cra_name = "rfc7539esp(chacha20,poly1305)",
2151 + .cra_driver_name = "rfc7539esp-chacha20-"
2153 + .cra_blocksize = 1,
2155 + .setkey = chachapoly_setkey,
2156 + .setauthsize = chachapoly_setauthsize,
2157 + .encrypt = chachapoly_encrypt,
2158 + .decrypt = chachapoly_decrypt,
2160 + .maxauthsize = POLY1305_DIGEST_SIZE,
2163 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2165 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2171 struct caam_crypto_alg {
2172 @@ -3207,9 +3478,11 @@ struct caam_crypto_alg {
2173 struct caam_alg_entry caam;
2176 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2177 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2180 dma_addr_t dma_addr;
2181 + struct caam_drv_private *priv;
2183 ctx->jrdev = caam_jr_alloc();
2184 if (IS_ERR(ctx->jrdev)) {
2185 @@ -3217,10 +3490,16 @@ static int caam_init_common(struct caam_
2186 return PTR_ERR(ctx->jrdev);
2189 + priv = dev_get_drvdata(ctx->jrdev->parent);
2190 + if (priv->era >= 6 && uses_dkp)
2191 + ctx->dir = DMA_BIDIRECTIONAL;
2193 + ctx->dir = DMA_TO_DEVICE;
2195 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
2196 offsetof(struct caam_ctx,
2198 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2199 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2200 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
2201 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
2202 caam_jr_free(ctx->jrdev);
2203 @@ -3248,7 +3527,7 @@ static int caam_cra_init(struct crypto_t
2204 container_of(alg, struct caam_crypto_alg, crypto_alg);
2205 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2207 - return caam_init_common(ctx, &caam_alg->caam);
2208 + return caam_init_common(ctx, &caam_alg->caam, false);
2211 static int caam_aead_init(struct crypto_aead *tfm)
2212 @@ -3258,14 +3537,15 @@ static int caam_aead_init(struct crypto_
2213 container_of(alg, struct caam_aead_alg, aead);
2214 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2216 - return caam_init_common(ctx, &caam_alg->caam);
2217 + return caam_init_common(ctx, &caam_alg->caam,
2218 + alg->setkey == aead_setkey);
2221 static void caam_exit_common(struct caam_ctx *ctx)
2223 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
2224 offsetof(struct caam_ctx, sh_desc_enc_dma),
2225 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2226 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2227 caam_jr_free(ctx->jrdev);
2230 @@ -3279,7 +3559,7 @@ static void caam_aead_exit(struct crypto
2231 caam_exit_common(crypto_aead_ctx(tfm));
2234 -static void __exit caam_algapi_exit(void)
2235 +void caam_algapi_exit(void)
2238 struct caam_crypto_alg *t_alg, *n;
2239 @@ -3358,56 +3638,52 @@ static void caam_aead_alg_init(struct ca
2240 alg->exit = caam_aead_exit;
2243 -static int __init caam_algapi_init(void)
2244 +int caam_algapi_init(struct device *ctrldev)
2246 - struct device_node *dev_node;
2247 - struct platform_device *pdev;
2248 - struct device *ctrldev;
2249 - struct caam_drv_private *priv;
2250 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
2252 - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2253 + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
2254 unsigned int md_limit = SHA512_DIGEST_SIZE;
2255 bool registered = false;
2257 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2259 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2264 - pdev = of_find_device_by_node(dev_node);
2266 - of_node_put(dev_node);
2270 - ctrldev = &pdev->dev;
2271 - priv = dev_get_drvdata(ctrldev);
2272 - of_node_put(dev_node);
2275 - * If priv is NULL, it's probably because the caam driver wasn't
2276 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2282 INIT_LIST_HEAD(&alg_list);
2285 * Register crypto algorithms the device supports.
2286 * First, detect presence and attributes of DES, AES, and MD blocks.
2288 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2289 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2290 - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2291 - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2292 - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2293 + if (priv->era < 10) {
2294 + u32 cha_vid, cha_inst;
2296 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2297 + aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2298 + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2300 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2301 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2302 + CHA_ID_LS_DES_SHIFT;
2303 + aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2304 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2310 + aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2311 + mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2313 + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2314 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2316 + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2317 + aes_inst = aesa & CHA_VER_NUM_MASK;
2318 + md_inst = mdha & CHA_VER_NUM_MASK;
2319 + ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
2320 + ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
2323 /* If MD is present, limit digest size based on LP256 */
2324 - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2325 + if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
2326 md_limit = SHA256_DIGEST_SIZE;
2328 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2329 @@ -3429,10 +3705,10 @@ static int __init caam_algapi_init(void)
2330 * Check support for AES modes not available
2333 - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2334 - if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2337 + if (aes_vid == CHA_VER_VID_AES_LP &&
2338 + (alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2342 t_alg = caam_alg_alloc(alg);
2343 if (IS_ERR(t_alg)) {
2344 @@ -3471,21 +3747,28 @@ static int __init caam_algapi_init(void)
2345 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2348 + /* Skip CHACHA20 algorithms if not supported by device */
2349 + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
2352 + /* Skip POLY1305 algorithms if not supported by device */
2353 + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
2357 * Check support for AES algorithms not available
2360 - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2361 - if (alg_aai == OP_ALG_AAI_GCM)
2363 + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
2367 * Skip algorithms requiring message digests
2368 * if MD or MD size is not supported by device.
2371 - (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2373 + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
2374 + (!md_inst || t_alg->aead.maxauthsize > md_limit))
2377 caam_aead_alg_init(t_alg);
2379 @@ -3505,10 +3788,3 @@ static int __init caam_algapi_init(void)
2384 -module_init(caam_algapi_init);
2385 -module_exit(caam_algapi_exit);
2387 -MODULE_LICENSE("GPL");
2388 -MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2389 -MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2390 --- a/drivers/crypto/caam/caamalg_desc.c
2391 +++ b/drivers/crypto/caam/caamalg_desc.c
2392 @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
2393 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
2394 * (non-protocol) with no (null) encryption.
2395 * @desc: pointer to buffer used for descriptor construction
2396 - * @adata: pointer to authentication transform definitions. Note that since a
2397 - * split key is to be used, the size of the split key itself is
2398 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2399 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2400 + * @adata: pointer to authentication transform definitions.
2401 + * A split key is required for SEC Era < 6; the size of the split key
2402 + * is specified in this case. Valid algorithm values - one of
2403 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2404 + * with OP_ALG_AAI_HMAC_PRECOMP.
2405 * @icvsize: integrity check value (ICV) size (truncated or full)
2407 - * Note: Requires an MDHA split key.
2410 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
2411 - unsigned int icvsize)
2412 + unsigned int icvsize, int era)
2414 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2416 @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
2417 /* Skip if already shared */
2418 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2420 - if (adata->key_inline)
2421 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2422 - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
2425 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2426 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2428 + if (adata->key_inline)
2429 + append_key_as_imm(desc, adata->key_virt,
2430 + adata->keylen_pad, adata->keylen,
2431 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2434 + append_key(desc, adata->key_dma, adata->keylen,
2435 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2437 + append_proto_dkp(desc, adata);
2439 set_jump_tgt_here(desc, key_jump_cmd);
2441 /* assoclen + cryptlen = seqinlen */
2442 @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
2443 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
2444 * (non-protocol) with no (null) decryption.
2445 * @desc: pointer to buffer used for descriptor construction
2446 - * @adata: pointer to authentication transform definitions. Note that since a
2447 - * split key is to be used, the size of the split key itself is
2448 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2449 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2450 + * @adata: pointer to authentication transform definitions.
2451 + * A split key is required for SEC Era < 6; the size of the split key
2452 + * is specified in this case. Valid algorithm values - one of
2453 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2454 + * with OP_ALG_AAI_HMAC_PRECOMP.
2455 * @icvsize: integrity check value (ICV) size (truncated or full)
2457 - * Note: Requires an MDHA split key.
2460 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
2461 - unsigned int icvsize)
2462 + unsigned int icvsize, int era)
2464 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
2466 @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
2467 /* Skip if already shared */
2468 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2470 - if (adata->key_inline)
2471 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2472 - adata->keylen, CLASS_2 |
2473 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2475 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2476 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2478 + if (adata->key_inline)
2479 + append_key_as_imm(desc, adata->key_virt,
2480 + adata->keylen_pad, adata->keylen,
2481 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2484 + append_key(desc, adata->key_dma, adata->keylen,
2485 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2487 + append_proto_dkp(desc, adata);
2489 set_jump_tgt_here(desc, key_jump_cmd);
2491 /* Class 2 operation */
2492 @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
2493 static void init_sh_desc_key_aead(u32 * const desc,
2494 struct alginfo * const cdata,
2495 struct alginfo * const adata,
2496 - const bool is_rfc3686, u32 *nonce)
2497 + const bool is_rfc3686, u32 *nonce, int era)
2500 unsigned int enckeylen = cdata->keylen;
2501 @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
2503 enckeylen -= CTR_RFC3686_NONCE_SIZE;
2505 - if (adata->key_inline)
2506 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2507 - adata->keylen, CLASS_2 |
2508 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2510 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2511 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2513 + if (adata->key_inline)
2514 + append_key_as_imm(desc, adata->key_virt,
2515 + adata->keylen_pad, adata->keylen,
2516 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2519 + append_key(desc, adata->key_dma, adata->keylen,
2520 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2522 + append_proto_dkp(desc, adata);
2525 if (cdata->key_inline)
2526 append_key_as_imm(desc, cdata->key_virt, enckeylen,
2527 @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
2528 * @cdata: pointer to block cipher transform definitions
2529 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2530 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2531 - * @adata: pointer to authentication transform definitions. Note that since a
2532 - * split key is to be used, the size of the split key itself is
2533 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2534 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2535 + * @adata: pointer to authentication transform definitions.
2536 + * A split key is required for SEC Era < 6; the size of the split key
2537 + * is specified in this case. Valid algorithm values - one of
2538 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2539 + * with OP_ALG_AAI_HMAC_PRECOMP.
2540 * @ivsize: initialization vector size
2541 * @icvsize: integrity check value (ICV) size (truncated or full)
2542 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2543 * @nonce: pointer to rfc3686 nonce
2544 * @ctx1_iv_off: IV offset in CONTEXT1 register
2545 * @is_qi: true when called from caam/qi
2547 - * Note: Requires an MDHA split key.
2550 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2551 struct alginfo *adata, unsigned int ivsize,
2552 unsigned int icvsize, const bool is_rfc3686,
2553 - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
2554 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
2557 /* Note: Context registers are saved. */
2558 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2559 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2561 /* Class 2 operation */
2562 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2563 @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
2566 /* Read and write assoclen bytes */
2567 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2568 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2569 + if (is_qi || era < 3) {
2570 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2571 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2573 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2574 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2577 /* Skip assoc data */
2578 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2579 @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
2580 * @cdata: pointer to block cipher transform definitions
2581 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2582 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2583 - * @adata: pointer to authentication transform definitions. Note that since a
2584 - * split key is to be used, the size of the split key itself is
2585 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2586 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2587 + * @adata: pointer to authentication transform definitions.
2588 + * A split key is required for SEC Era < 6; the size of the split key
2589 + * is specified in this case. Valid algorithm values - one of
2590 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2591 + * with OP_ALG_AAI_HMAC_PRECOMP.
2592 * @ivsize: initialization vector size
2593 * @icvsize: integrity check value (ICV) size (truncated or full)
2594 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2595 * @nonce: pointer to rfc3686 nonce
2596 * @ctx1_iv_off: IV offset in CONTEXT1 register
2597 * @is_qi: true when called from caam/qi
2599 - * Note: Requires an MDHA split key.
2602 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2603 struct alginfo *adata, unsigned int ivsize,
2604 unsigned int icvsize, const bool geniv,
2605 const bool is_rfc3686, u32 *nonce,
2606 - const u32 ctx1_iv_off, const bool is_qi)
2607 + const u32 ctx1_iv_off, const bool is_qi, int era)
2609 /* Note: Context registers are saved. */
2610 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2611 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2613 /* Class 2 operation */
2614 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2615 @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
2618 /* Read and write assoclen bytes */
2619 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2621 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
2623 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2624 + if (is_qi || era < 3) {
2625 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2627 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
2630 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
2633 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2635 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
2638 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
2642 /* Skip assoc data */
2643 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2644 @@ -456,29 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
2645 * @cdata: pointer to block cipher transform definitions
2646 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2647 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2648 - * @adata: pointer to authentication transform definitions. Note that since a
2649 - * split key is to be used, the size of the split key itself is
2650 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2651 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2652 + * @adata: pointer to authentication transform definitions.
2653 + * A split key is required for SEC Era < 6; the size of the split key
2654 + * is specified in this case. Valid algorithm values - one of
2655 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2656 + * with OP_ALG_AAI_HMAC_PRECOMP.
2657 * @ivsize: initialization vector size
2658 * @icvsize: integrity check value (ICV) size (truncated or full)
2659 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2660 * @nonce: pointer to rfc3686 nonce
2661 * @ctx1_iv_off: IV offset in CONTEXT1 register
2662 * @is_qi: true when called from caam/qi
2664 - * Note: Requires an MDHA split key.
2667 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2668 struct alginfo *adata, unsigned int ivsize,
2669 unsigned int icvsize, const bool is_rfc3686,
2670 u32 *nonce, const u32 ctx1_iv_off,
2672 + const bool is_qi, int era)
2676 /* Note: Context registers are saved. */
2677 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2678 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2682 @@ -528,8 +561,13 @@ copy_iv:
2685 /* Read and write assoclen bytes */
2686 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2687 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2688 + if (is_qi || era < 3) {
2689 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2690 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2692 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2693 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2696 /* Skip assoc data */
2697 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2698 @@ -583,14 +621,431 @@ copy_iv:
2699 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
2702 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
2703 + * @desc: pointer to buffer used for descriptor construction
2704 + * @cdata: pointer to block cipher transform definitions
2705 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2706 + * with OP_ALG_AAI_CBC
2707 + * @adata: pointer to authentication transform definitions.
2708 + * A split key is required for SEC Era < 6; the size of the split key
2709 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2710 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2711 + * @assoclen: associated data length
2712 + * @ivsize: initialization vector size
2713 + * @authsize: authentication data size
2714 + * @blocksize: block cipher size
2717 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
2718 + struct alginfo *adata, unsigned int assoclen,
2719 + unsigned int ivsize, unsigned int authsize,
2720 + unsigned int blocksize, int era)
2722 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
2723 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
2726 + * Compute the index (in bytes) for the LOAD with destination of
2727 + * Class 1 Data Size Register and for the LOAD that generates padding
2729 + if (adata->key_inline) {
2730 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2731 + cdata->keylen - 4 * CAAM_CMD_SZ;
2732 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2733 + cdata->keylen - 2 * CAAM_CMD_SZ;
2735 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2737 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2741 + stidx = 1 << HDR_START_IDX_SHIFT;
2742 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2744 + /* skip key loading if they are loaded due to sharing */
2745 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2749 + if (adata->key_inline)
2750 + append_key_as_imm(desc, adata->key_virt,
2751 + adata->keylen_pad, adata->keylen,
2752 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2755 + append_key(desc, adata->key_dma, adata->keylen,
2756 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2758 + append_proto_dkp(desc, adata);
2761 + if (cdata->key_inline)
2762 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
2763 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
2765 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2766 + KEY_DEST_CLASS_REG);
2768 + set_jump_tgt_here(desc, key_jump_cmd);
2770 + /* class 2 operation */
2771 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2773 + /* class 1 operation */
2774 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2777 + /* payloadlen = input data length - (assoclen + ivlen) */
2778 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
2780 + /* math1 = payloadlen + icvlen */
2781 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
2783 + /* padlen = block_size - math1 % block_size */
2784 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
2785 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
2787 + /* cryptlen = payloadlen + icvlen + padlen */
2788 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
2791 + * update immediate data with the padding length value
2792 + * for the LOAD in the class 1 data size register.
2794 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2795 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
2796 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2797 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
2799 + /* overwrite PL field for the padding iNFO FIFO entry */
2800 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2801 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
2802 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2803 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
2805 + /* store encrypted payload, icv and padding */
2806 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2808 + /* if payload length is zero, jump to zero-payload commands */
2809 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
2810 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2811 + JUMP_COND_MATH_Z);
2813 + /* load iv in context1 */
2814 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2815 + LDST_CLASS_1_CCB | ivsize);
2817 + /* read assoc for authentication */
2818 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2820 + /* insnoop payload */
2821 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
2822 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
2824 + /* jump the zero-payload commands */
2825 + append_jump(desc, JUMP_TEST_ALL | 3);
2827 + /* zero-payload commands */
2828 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
2830 + /* load iv in context1 */
2831 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2832 + LDST_CLASS_1_CCB | ivsize);
2834 + /* assoc data is the only data for authentication */
2835 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2836 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
2838 + /* send icv to encryption */
2839 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
2842 + /* update class 1 data size register with padding length */
2843 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
2844 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
2846 + /* generate padding and send it to encryption */
2847 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
2848 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
2849 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
2850 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2853 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
2854 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
2855 + desc_bytes(desc), 1);
2858 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
2861 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
2862 + * @desc: pointer to buffer used for descriptor construction
2863 + * @cdata: pointer to block cipher transform definitions
2864 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2865 + * with OP_ALG_AAI_CBC
2866 + * @adata: pointer to authentication transform definitions.
2867 + * A split key is required for SEC Era < 6; the size of the split key
2868 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2869 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2870 + * @assoclen: associated data length
2871 + * @ivsize: initialization vector size
2872 + * @authsize: authentication data size
2873 + * @blocksize: block cipher size
2876 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
2877 + struct alginfo *adata, unsigned int assoclen,
2878 + unsigned int ivsize, unsigned int authsize,
2879 + unsigned int blocksize, int era)
2881 + u32 stidx, jumpback;
2882 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
2884 + * Pointer Size bool determines the size of address pointers.
2885 + * false - Pointers fit in one 32-bit word.
2886 + * true - Pointers fit in two 32-bit words.
2888 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
2890 + stidx = 1 << HDR_START_IDX_SHIFT;
2891 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2893 + /* skip key loading if they are loaded due to sharing */
2894 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2898 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2899 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
2901 + append_proto_dkp(desc, adata);
2903 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2904 + KEY_DEST_CLASS_REG);
2906 + set_jump_tgt_here(desc, key_jump_cmd);
2908 + /* class 2 operation */
2909 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2910 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2911 + /* class 1 operation */
2912 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2915 + /* VSIL = input data length - 2 * block_size */
2916 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
2920 + * payloadlen + icvlen + padlen = input data length - (assoclen +
2923 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
2925 + /* skip data to the last but one cipher block */
2926 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
2928 + /* load iv for the last cipher block */
2929 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2930 + LDST_CLASS_1_CCB | ivsize);
2932 + /* read last cipher block */
2933 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2934 + FIFOLD_TYPE_LAST1 | blocksize);
2936 + /* move decrypted block into math0 and math1 */
2937 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
2940 + /* reset AES CHA */
2941 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
2942 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
2944 + /* rewind input sequence */
2945 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
2947 + /* key1 is in decryption form */
2948 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
2949 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
2951 + /* load iv in context1 */
2952 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
2953 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
2955 + /* read sequence number */
2956 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
2957 + /* load Type, Version and Len fields in math0 */
2958 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
2959 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
2961 + /* compute (padlen - 1) */
2962 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
2964 + /* math2 = icvlen + (padlen - 1) + 1 */
2965 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
2967 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2969 + /* VSOL = payloadlen + icvlen + padlen */
2970 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
2972 + if (caam_little_end)
2973 + append_moveb(desc, MOVE_WAITCOMP |
2974 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
2976 + /* update Len field */
2977 + append_math_sub(desc, REG0, REG0, REG2, 8);
2979 + /* store decrypted payload, icv and padding */
2980 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2982 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
2983 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2985 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2986 + JUMP_COND_MATH_Z);
2988 + /* send Type, Version and Len(pre ICV) fields to authentication */
2989 + append_move(desc, MOVE_WAITCOMP |
2990 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2991 + (3 << MOVE_OFFSET_SHIFT) | 5);
2993 + /* outsnooping payload */
2994 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
2995 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
2997 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
2999 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
3000 + /* send Type, Version and Len(pre ICV) fields to authentication */
3001 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
3002 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
3003 + (3 << MOVE_OFFSET_SHIFT) | 5);
3005 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
3006 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
3008 + /* load icvlen and padlen */
3009 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
3010 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
3012 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
3013 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
3016 + * Start a new input sequence using the SEQ OUT PTR command options,
3017 + * pointer and length used when the current output sequence was defined.
3021 + * Move the lower 32 bits of Shared Descriptor address, the
3022 + * SEQ OUT PTR command, Output Pointer (2 words) and
3023 + * Output Length into math registers.
3025 + if (caam_little_end)
3026 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3028 + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
3030 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3032 + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
3034 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3035 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
3036 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
3037 + /* Append a JUMP command after the copied fields */
3038 + jumpback = CMD_JUMP | (char)-9;
3039 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3040 + LDST_SRCDST_WORD_DECO_MATH2 |
3041 + (4 << LDST_OFFSET_SHIFT));
3042 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3043 + /* Move the updated fields back to the Job Descriptor */
3044 + if (caam_little_end)
3045 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3046 + MOVE_DEST_DESCBUF |
3047 + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
3049 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3050 + MOVE_DEST_DESCBUF |
3051 + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
3054 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
3055 + * and then jump back to the next command from the
3056 + * Shared Descriptor.
3058 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
3061 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
3062 + * Output Length into math registers.
3064 + if (caam_little_end)
3065 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3067 + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
3069 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3071 + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
3073 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3074 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
3075 + ~(((u64)(CMD_SEQ_IN_PTR ^
3076 + CMD_SEQ_OUT_PTR)) << 32));
3077 + /* Append a JUMP command after the copied fields */
3078 + jumpback = CMD_JUMP | (char)-7;
3079 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3080 + LDST_SRCDST_WORD_DECO_MATH1 |
3081 + (4 << LDST_OFFSET_SHIFT));
3082 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3083 + /* Move the updated fields back to the Job Descriptor */
3084 + if (caam_little_end)
3085 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3086 + MOVE_DEST_DESCBUF |
3087 + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
3089 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3090 + MOVE_DEST_DESCBUF |
3091 + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
3094 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
3095 + * and then jump back to the next command from the
3096 + * Shared Descriptor.
3098 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
3101 + /* skip payload */
3102 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
3104 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
3105 + FIFOLD_TYPE_LAST2 | authsize);
3108 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
3109 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
3110 + desc_bytes(desc), 1);
3113 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
3116 * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
3117 * @desc: pointer to buffer used for descriptor construction
3118 * @cdata: pointer to block cipher transform definitions
3119 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3120 + * @ivsize: initialization vector size
3121 * @icvsize: integrity check value (ICV) size (truncated or full)
3122 + * @is_qi: true when called from caam/qi
3124 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3125 - unsigned int icvsize)
3126 + unsigned int ivsize, unsigned int icvsize,
3129 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
3130 *zero_assoc_jump_cmd2;
3131 @@ -612,11 +1067,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3132 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3136 + u32 *wait_load_cmd;
3138 + /* REG3 = assoclen */
3139 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3140 + LDST_SRCDST_WORD_DECO_MATH3 |
3141 + (4 << LDST_OFFSET_SHIFT));
3143 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3144 + JUMP_COND_CALM | JUMP_COND_NCP |
3145 + JUMP_COND_NOP | JUMP_COND_NIP |
3147 + set_jump_tgt_here(desc, wait_load_cmd);
3149 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
3152 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3156 /* if assoclen + cryptlen is ZERO, skip to ICV write */
3157 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3158 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
3162 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3163 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3165 /* if assoclen is ZERO, skip reading the assoc data */
3166 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3167 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3168 @@ -648,8 +1127,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3169 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3170 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
3172 - /* jump the zero-payload commands */
3173 - append_jump(desc, JUMP_TEST_ALL | 2);
3174 + /* jump to ICV writing */
3176 + append_jump(desc, JUMP_TEST_ALL | 4);
3178 + append_jump(desc, JUMP_TEST_ALL | 2);
3180 /* zero-payload commands */
3181 set_jump_tgt_here(desc, zero_payload_jump_cmd);
3182 @@ -657,10 +1139,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3183 /* read assoc data */
3184 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3185 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
3187 + /* jump to ICV writing */
3188 + append_jump(desc, JUMP_TEST_ALL | 2);
3190 /* There is no input data */
3191 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
3194 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3195 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
3196 + FIFOLD_TYPE_LAST1);
3199 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
3200 LDST_SRCDST_BYTE_CONTEXT);
3201 @@ -677,10 +1167,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
3202 * @desc: pointer to buffer used for descriptor construction
3203 * @cdata: pointer to block cipher transform definitions
3204 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3205 + * @ivsize: initialization vector size
3206 * @icvsize: integrity check value (ICV) size (truncated or full)
3207 + * @is_qi: true when called from caam/qi
3209 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3210 - unsigned int icvsize)
3211 + unsigned int ivsize, unsigned int icvsize,
3214 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
3216 @@ -701,6 +1194,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
3217 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3218 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3221 + u32 *wait_load_cmd;
3223 + /* REG3 = assoclen */
3224 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3225 + LDST_SRCDST_WORD_DECO_MATH3 |
3226 + (4 << LDST_OFFSET_SHIFT));
3228 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3229 + JUMP_COND_CALM | JUMP_COND_NCP |
3230 + JUMP_COND_NOP | JUMP_COND_NIP |
3232 + set_jump_tgt_here(desc, wait_load_cmd);
3234 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3235 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3238 /* if assoclen is ZERO, skip reading the assoc data */
3239 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3240 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3241 @@ -753,10 +1264,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
3242 * @desc: pointer to buffer used for descriptor construction
3243 * @cdata: pointer to block cipher transform definitions
3244 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3245 + * @ivsize: initialization vector size
3246 * @icvsize: integrity check value (ICV) size (truncated or full)
3247 + * @is_qi: true when called from caam/qi
3249 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3250 - unsigned int icvsize)
3251 + unsigned int ivsize, unsigned int icvsize,
3256 @@ -777,7 +1291,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3257 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3260 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3262 + u32 *wait_load_cmd;
3264 + /* REG3 = assoclen */
3265 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3266 + LDST_SRCDST_WORD_DECO_MATH3 |
3267 + (4 << LDST_OFFSET_SHIFT));
3269 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3270 + JUMP_COND_CALM | JUMP_COND_NCP |
3271 + JUMP_COND_NOP | JUMP_COND_NIP |
3273 + set_jump_tgt_here(desc, wait_load_cmd);
3275 + /* Read salt and IV */
3276 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3277 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3279 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3280 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3283 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3284 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3286 /* Read assoc data */
3287 @@ -785,7 +1321,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3288 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3291 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3292 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3294 /* Will read cryptlen bytes */
3295 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3296 @@ -824,10 +1360,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
3297 * @desc: pointer to buffer used for descriptor construction
3298 * @cdata: pointer to block cipher transform definitions
3299 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3300 + * @ivsize: initialization vector size
3301 * @icvsize: integrity check value (ICV) size (truncated or full)
3302 + * @is_qi: true when called from caam/qi
3304 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3305 - unsigned int icvsize)
3306 + unsigned int ivsize, unsigned int icvsize,
3311 @@ -849,7 +1388,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3312 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3313 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3315 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3317 + u32 *wait_load_cmd;
3319 + /* REG3 = assoclen */
3320 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3321 + LDST_SRCDST_WORD_DECO_MATH3 |
3322 + (4 << LDST_OFFSET_SHIFT));
3324 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3325 + JUMP_COND_CALM | JUMP_COND_NCP |
3326 + JUMP_COND_NOP | JUMP_COND_NIP |
3328 + set_jump_tgt_here(desc, wait_load_cmd);
3330 + /* Read salt and IV */
3331 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3332 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3334 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3335 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3338 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3339 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3341 /* Read assoc data */
3342 @@ -857,7 +1418,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3343 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3346 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3347 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3349 /* Will read cryptlen bytes */
3350 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
3351 @@ -896,10 +1457,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
3352 * @desc: pointer to buffer used for descriptor construction
3353 * @cdata: pointer to block cipher transform definitions
3354 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3355 + * @ivsize: initialization vector size
3356 * @icvsize: integrity check value (ICV) size (truncated or full)
3357 + * @is_qi: true when called from caam/qi
3359 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3360 - unsigned int icvsize)
3361 + unsigned int ivsize, unsigned int icvsize,
3364 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3366 @@ -920,6 +1484,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3367 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3371 + /* assoclen is not needed, skip it */
3372 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3374 + /* Read salt and IV */
3375 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3376 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3378 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3379 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3382 /* assoclen + cryptlen = seqinlen */
3383 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
3385 @@ -931,7 +1507,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3386 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3387 (0x6 << MOVE_LEN_SHIFT));
3388 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3389 - (0x8 << MOVE_LEN_SHIFT));
3390 + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3392 /* Will read assoclen + cryptlen bytes */
3393 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3394 @@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
3395 * @desc: pointer to buffer used for descriptor construction
3396 * @cdata: pointer to block cipher transform definitions
3397 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3398 + * @ivsize: initialization vector size
3399 * @icvsize: integrity check value (ICV) size (truncated or full)
3400 + * @is_qi: true when called from caam/qi
3402 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3403 - unsigned int icvsize)
3404 + unsigned int ivsize, unsigned int icvsize,
3407 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3409 @@ -990,6 +1569,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3410 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3411 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3414 + /* assoclen is not needed, skip it */
3415 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3417 + /* Read salt and IV */
3418 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3419 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3421 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3422 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3425 /* assoclen + cryptlen = seqoutlen */
3426 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3428 @@ -1001,7 +1592,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3429 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3430 (0x6 << MOVE_LEN_SHIFT));
3431 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3432 - (0x8 << MOVE_LEN_SHIFT));
3433 + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3435 /* Will read assoclen + cryptlen bytes */
3436 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3437 @@ -1035,6 +1626,138 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3439 EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
3442 + * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
3443 + * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
3444 + * descriptor (non-protocol).
3445 + * @desc: pointer to buffer used for descriptor construction
3446 + * @cdata: pointer to block cipher transform definitions
3447 + * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
3448 + * OP_ALG_AAI_AEAD.
3449 + * @adata: pointer to authentication transform definitions
3450 + * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
3451 + * OP_ALG_AAI_AEAD.
3452 + * @ivsize: initialization vector size
3453 + * @icvsize: integrity check value (ICV) size (truncated or full)
3454 + * @encap: true if encapsulation, false if decapsulation
3455 + * @is_qi: true when called from caam/qi
3457 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3458 + struct alginfo *adata, unsigned int ivsize,
3459 + unsigned int icvsize, const bool encap,
3462 + u32 *key_jump_cmd, *wait_cmd;
3464 + const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
3466 + /* Note: Context registers are saved. */
3467 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
3469 + /* skip key loading if they are loaded due to sharing */
3470 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3473 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
3474 + CLASS_1 | KEY_DEST_CLASS_REG);
3476 + /* For IPsec load the salt from keymat in the context register */
3478 + append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
3479 + LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
3480 + 4 << LDST_OFFSET_SHIFT);
3482 + set_jump_tgt_here(desc, key_jump_cmd);
3484 + /* Class 2 and 1 operations: Poly & ChaCha */
3486 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3488 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3491 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3492 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3493 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3498 + u32 *wait_load_cmd;
3499 + u32 ctx1_iv_off = is_ipsec ? 8 : 4;
3501 + /* REG3 = assoclen */
3502 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3503 + LDST_SRCDST_WORD_DECO_MATH3 |
3504 + 4 << LDST_OFFSET_SHIFT);
3506 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3507 + JUMP_COND_CALM | JUMP_COND_NCP |
3508 + JUMP_COND_NOP | JUMP_COND_NIP |
3510 + set_jump_tgt_here(desc, wait_load_cmd);
3512 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
3513 + LDST_SRCDST_BYTE_CONTEXT |
3514 + ctx1_iv_off << LDST_OFFSET_SHIFT);
3518 + * MAGIC with NFIFO
3519 + * Read associated data from the input and send them to class1 and
3520 + * class2 alignment blocks. From class1 send data to output fifo and
3521 + * then write it to memory since we don't need to encrypt AD.
3523 + nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
3524 + NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
3525 + append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
3526 + LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
3528 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3529 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3530 + append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
3531 + FIFOLD_CLASS_CLASS1 | LDST_VLF);
3532 + append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
3533 + MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
3534 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
3536 + /* IPsec - copy IV at the output */
3538 + append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
3541 + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
3542 + JUMP_COND_NOP | JUMP_TEST_ALL);
3543 + set_jump_tgt_here(desc, wait_cmd);
3546 + /* Read and write cryptlen bytes */
3547 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3548 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3550 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
3553 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
3554 + LDST_SRCDST_BYTE_CONTEXT);
3556 + /* Read and write cryptlen bytes */
3557 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3558 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
3560 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
3562 + /* Load ICV for verification */
3563 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
3564 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
3567 + print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
3568 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3571 +EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
3574 * For ablkcipher encrypt and decrypt, read from req->src and
3576 @@ -1053,7 +1776,8 @@ static inline void ablkcipher_append_src
3577 * @desc: pointer to buffer used for descriptor construction
3578 * @cdata: pointer to block cipher transform definitions
3579 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3580 - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3581 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3582 + * - OP_ALG_ALGSEL_CHACHA20
3583 * @ivsize: initialization vector size
3584 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3585 * @ctx1_iv_off: IV offset in CONTEXT1 register
3586 @@ -1075,7 +1799,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
3588 /* Load nonce into CONTEXT1 reg */
3590 - u8 *nonce = cdata->key_virt + cdata->keylen;
3591 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3593 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3594 LDST_CLASS_IND_CCB |
3595 @@ -1118,7 +1842,8 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_enc
3596 * @desc: pointer to buffer used for descriptor construction
3597 * @cdata: pointer to block cipher transform definitions
3598 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3599 - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3600 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3601 + * - OP_ALG_ALGSEL_CHACHA20
3602 * @ivsize: initialization vector size
3603 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3604 * @ctx1_iv_off: IV offset in CONTEXT1 register
3605 @@ -1140,7 +1865,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
3607 /* Load nonce into CONTEXT1 reg */
3609 - u8 *nonce = cdata->key_virt + cdata->keylen;
3610 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3612 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3613 LDST_CLASS_IND_CCB |
3614 @@ -1209,7 +1934,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
3616 /* Load Nonce into CONTEXT1 reg */
3618 - u8 *nonce = cdata->key_virt + cdata->keylen;
3619 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3621 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3622 LDST_CLASS_IND_CCB |
3623 --- a/drivers/crypto/caam/caamalg_desc.h
3624 +++ b/drivers/crypto/caam/caamalg_desc.h
3626 #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
3627 #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
3629 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
3630 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
3632 /* Note: Nonce is counted in cdata.keylen */
3633 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
3636 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
3637 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
3638 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
3639 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
3640 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
3642 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
3643 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3644 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3645 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
3646 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
3648 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
3649 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
3650 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
3651 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
3652 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
3654 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
3655 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
3659 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
3660 - unsigned int icvsize);
3661 + unsigned int icvsize, int era);
3663 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
3664 - unsigned int icvsize);
3665 + unsigned int icvsize, int era);
3667 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
3668 struct alginfo *adata, unsigned int ivsize,
3669 unsigned int icvsize, const bool is_rfc3686,
3670 u32 *nonce, const u32 ctx1_iv_off,
3671 - const bool is_qi);
3672 + const bool is_qi, int era);
3674 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
3675 struct alginfo *adata, unsigned int ivsize,
3676 unsigned int icvsize, const bool geniv,
3677 const bool is_rfc3686, u32 *nonce,
3678 - const u32 ctx1_iv_off, const bool is_qi);
3679 + const u32 ctx1_iv_off, const bool is_qi, int era);
3681 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
3682 struct alginfo *adata, unsigned int ivsize,
3683 unsigned int icvsize, const bool is_rfc3686,
3684 u32 *nonce, const u32 ctx1_iv_off,
3685 - const bool is_qi);
3686 + const bool is_qi, int era);
3688 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
3689 + struct alginfo *adata, unsigned int assoclen,
3690 + unsigned int ivsize, unsigned int authsize,
3691 + unsigned int blocksize, int era);
3693 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
3694 + struct alginfo *adata, unsigned int assoclen,
3695 + unsigned int ivsize, unsigned int authsize,
3696 + unsigned int blocksize, int era);
3698 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3699 - unsigned int icvsize);
3700 + unsigned int ivsize, unsigned int icvsize,
3701 + const bool is_qi);
3703 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3704 - unsigned int icvsize);
3705 + unsigned int ivsize, unsigned int icvsize,
3706 + const bool is_qi);
3708 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3709 - unsigned int icvsize);
3710 + unsigned int ivsize, unsigned int icvsize,
3711 + const bool is_qi);
3713 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3714 - unsigned int icvsize);
3715 + unsigned int ivsize, unsigned int icvsize,
3716 + const bool is_qi);
3718 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3719 - unsigned int icvsize);
3720 + unsigned int ivsize, unsigned int icvsize,
3721 + const bool is_qi);
3723 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3724 - unsigned int icvsize);
3725 + unsigned int ivsize, unsigned int icvsize,
3726 + const bool is_qi);
3728 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3729 + struct alginfo *adata, unsigned int ivsize,
3730 + unsigned int icvsize, const bool encap,
3731 + const bool is_qi);
3733 void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
3734 unsigned int ivsize, const bool is_rfc3686,
3735 --- a/drivers/crypto/caam/caamalg_qi.c
3736 +++ b/drivers/crypto/caam/caamalg_qi.c
3745 #include "desc_constr.h"
3746 @@ -53,6 +53,7 @@ struct caam_ctx {
3747 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
3748 u8 key[CAAM_MAX_KEY_SIZE];
3750 + enum dma_data_direction dir;
3751 struct alginfo adata;
3752 struct alginfo cdata;
3753 unsigned int authsize;
3754 @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
3755 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
3756 OP_ALG_AAI_CTR_MOD128);
3757 const bool is_rfc3686 = alg->caam.rfc3686;
3758 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3760 if (!ctx->cdata.keylen || !ctx->authsize)
3762 @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
3764 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3765 ivsize, ctx->authsize, is_rfc3686, nonce,
3766 - ctx1_iv_off, true);
3767 + ctx1_iv_off, true, ctrlpriv->era);
3770 /* aead_decrypt shared descriptor */
3771 @@ -149,7 +151,8 @@ skip_enc:
3773 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3774 ivsize, ctx->authsize, alg->caam.geniv,
3775 - is_rfc3686, nonce, ctx1_iv_off, true);
3776 + is_rfc3686, nonce, ctx1_iv_off, true,
3779 if (!alg->caam.geniv)
3781 @@ -176,7 +179,7 @@ skip_enc:
3783 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3784 ivsize, ctx->authsize, is_rfc3686, nonce,
3785 - ctx1_iv_off, true);
3786 + ctx1_iv_off, true, ctrlpriv->era);
3790 @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
3792 struct caam_ctx *ctx = crypto_aead_ctx(aead);
3793 struct device *jrdev = ctx->jrdev;
3794 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3795 struct crypto_authenc_keys keys;
3798 @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
3799 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3803 + * If DKP is supported, use it in the shared descriptor to generate
3806 + if (ctrlpriv->era >= 6) {
3807 + ctx->adata.keylen = keys.authkeylen;
3808 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3809 + OP_ALG_ALGSEL_MASK);
3811 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3814 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3815 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3817 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
3818 + ctx->adata.keylen_pad +
3819 + keys.enckeylen, ctx->dir);
3820 + goto skip_split_key;
3823 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3824 keys.authkeylen, CAAM_MAX_KEY_SIZE -
3826 @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
3827 /* postpend encryption key to auth split key */
3828 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3829 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3830 - keys.enckeylen, DMA_TO_DEVICE);
3831 + keys.enckeylen, ctx->dir);
3833 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3834 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3835 ctx->adata.keylen_pad + keys.enckeylen, 1);
3839 ctx->cdata.keylen = keys.enckeylen;
3841 ret = aead_set_sh_desc(aead);
3842 @@ -258,6 +284,468 @@ badkey:
3846 +static int tls_set_sh_desc(struct crypto_aead *tls)
3848 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3849 + unsigned int ivsize = crypto_aead_ivsize(tls);
3850 + unsigned int blocksize = crypto_aead_blocksize(tls);
3851 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
3852 + unsigned int data_len[2];
3854 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3856 + if (!ctx->cdata.keylen || !ctx->authsize)
3860 + * TLS 1.0 encrypt shared descriptor
3861 + * Job Descriptor and Shared Descriptor
3862 + * must fit into the 64-word Descriptor h/w Buffer
3864 + data_len[0] = ctx->adata.keylen_pad;
3865 + data_len[1] = ctx->cdata.keylen;
3867 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
3868 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
3872 + ctx->adata.key_virt = ctx->key;
3874 + ctx->adata.key_dma = ctx->key_dma;
3877 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
3879 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3881 + ctx->adata.key_inline = !!(inl_mask & 1);
3882 + ctx->cdata.key_inline = !!(inl_mask & 2);
3884 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3885 + assoclen, ivsize, ctx->authsize, blocksize,
3889 + * TLS 1.0 decrypt shared descriptor
3890 + * Keys do not fit inline, regardless of algorithms used
3892 + ctx->adata.key_inline = false;
3893 + ctx->adata.key_dma = ctx->key_dma;
3894 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3896 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3897 + assoclen, ivsize, ctx->authsize, blocksize,
3903 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
3905 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3907 + ctx->authsize = authsize;
3908 + tls_set_sh_desc(tls);
3913 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
3914 + unsigned int keylen)
3916 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3917 + struct device *jrdev = ctx->jrdev;
3918 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3919 + struct crypto_authenc_keys keys;
3922 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3926 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
3927 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
3929 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3930 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3934 + * If DKP is supported, use it in the shared descriptor to generate
3937 + if (ctrlpriv->era >= 6) {
3938 + ctx->adata.keylen = keys.authkeylen;
3939 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3940 + OP_ALG_ALGSEL_MASK);
3942 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3945 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3946 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3948 + dma_sync_single_for_device(jrdev, ctx->key_dma,
3949 + ctx->adata.keylen_pad +
3950 + keys.enckeylen, ctx->dir);
3951 + goto skip_split_key;
3954 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3955 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
3960 + /* postpend encryption key to auth split key */
3961 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3962 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3963 + keys.enckeylen, ctx->dir);
3966 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
3967 + ctx->adata.keylen, ctx->adata.keylen_pad);
3968 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3969 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3970 + ctx->adata.keylen_pad + keys.enckeylen, 1);
3974 + ctx->cdata.keylen = keys.enckeylen;
3976 + ret = tls_set_sh_desc(tls);
3980 + /* Now update the driver contexts with the new shared descriptor */
3981 + if (ctx->drv_ctx[ENCRYPT]) {
3982 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3983 + ctx->sh_desc_enc);
3985 + dev_err(jrdev, "driver enc context update failed\n");
3990 + if (ctx->drv_ctx[DECRYPT]) {
3991 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3992 + ctx->sh_desc_dec);
3994 + dev_err(jrdev, "driver dec context update failed\n");
4001 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
4005 +static int gcm_set_sh_desc(struct crypto_aead *aead)
4007 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4008 + unsigned int ivsize = crypto_aead_ivsize(aead);
4009 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4010 + ctx->cdata.keylen;
4012 + if (!ctx->cdata.keylen || !ctx->authsize)
4016 + * Job Descriptor and Shared Descriptor
4017 + * must fit into the 64-word Descriptor h/w Buffer
4019 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
4020 + ctx->cdata.key_inline = true;
4021 + ctx->cdata.key_virt = ctx->key;
4023 + ctx->cdata.key_inline = false;
4024 + ctx->cdata.key_dma = ctx->key_dma;
4027 + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4028 + ctx->authsize, true);
4031 + * Job Descriptor and Shared Descriptor
4032 + * must fit into the 64-word Descriptor h/w Buffer
4034 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
4035 + ctx->cdata.key_inline = true;
4036 + ctx->cdata.key_virt = ctx->key;
4038 + ctx->cdata.key_inline = false;
4039 + ctx->cdata.key_dma = ctx->key_dma;
4042 + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4043 + ctx->authsize, true);
4048 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
4050 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4052 + ctx->authsize = authsize;
4053 + gcm_set_sh_desc(authenc);
4058 +static int gcm_setkey(struct crypto_aead *aead,
4059 + const u8 *key, unsigned int keylen)
4061 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4062 + struct device *jrdev = ctx->jrdev;
4066 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4067 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4070 + memcpy(ctx->key, key, keylen);
4071 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
4072 + ctx->cdata.keylen = keylen;
4074 + ret = gcm_set_sh_desc(aead);
4078 + /* Now update the driver contexts with the new shared descriptor */
4079 + if (ctx->drv_ctx[ENCRYPT]) {
4080 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4081 + ctx->sh_desc_enc);
4083 + dev_err(jrdev, "driver enc context update failed\n");
4088 + if (ctx->drv_ctx[DECRYPT]) {
4089 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4090 + ctx->sh_desc_dec);
4092 + dev_err(jrdev, "driver dec context update failed\n");
4100 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
4102 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4103 + unsigned int ivsize = crypto_aead_ivsize(aead);
4104 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4105 + ctx->cdata.keylen;
4107 + if (!ctx->cdata.keylen || !ctx->authsize)
4110 + ctx->cdata.key_virt = ctx->key;
4113 + * Job Descriptor and Shared Descriptor
4114 + * must fit into the 64-word Descriptor h/w Buffer
4116 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
4117 + ctx->cdata.key_inline = true;
4119 + ctx->cdata.key_inline = false;
4120 + ctx->cdata.key_dma = ctx->key_dma;
4123 + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4124 + ctx->authsize, true);
4127 + * Job Descriptor and Shared Descriptor
4128 + * must fit into the 64-word Descriptor h/w Buffer
4130 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
4131 + ctx->cdata.key_inline = true;
4133 + ctx->cdata.key_inline = false;
4134 + ctx->cdata.key_dma = ctx->key_dma;
4137 + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4138 + ctx->authsize, true);
4143 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
4144 + unsigned int authsize)
4146 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4148 + ctx->authsize = authsize;
4149 + rfc4106_set_sh_desc(authenc);
4154 +static int rfc4106_setkey(struct crypto_aead *aead,
4155 + const u8 *key, unsigned int keylen)
4157 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4158 + struct device *jrdev = ctx->jrdev;
4165 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4166 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4169 + memcpy(ctx->key, key, keylen);
4171 + * The last four bytes of the key material are used as the salt value
4172 + * in the nonce. Update the AES key length.
4174 + ctx->cdata.keylen = keylen - 4;
4175 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
4178 + ret = rfc4106_set_sh_desc(aead);
4182 + /* Now update the driver contexts with the new shared descriptor */
4183 + if (ctx->drv_ctx[ENCRYPT]) {
4184 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4185 + ctx->sh_desc_enc);
4187 + dev_err(jrdev, "driver enc context update failed\n");
4192 + if (ctx->drv_ctx[DECRYPT]) {
4193 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4194 + ctx->sh_desc_dec);
4196 + dev_err(jrdev, "driver dec context update failed\n");
4204 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
4206 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4207 + unsigned int ivsize = crypto_aead_ivsize(aead);
4208 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4209 + ctx->cdata.keylen;
4211 + if (!ctx->cdata.keylen || !ctx->authsize)
4214 + ctx->cdata.key_virt = ctx->key;
4217 + * Job Descriptor and Shared Descriptor
4218 + * must fit into the 64-word Descriptor h/w Buffer
4220 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
4221 + ctx->cdata.key_inline = true;
4223 + ctx->cdata.key_inline = false;
4224 + ctx->cdata.key_dma = ctx->key_dma;
4227 + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4228 + ctx->authsize, true);
4231 + * Job Descriptor and Shared Descriptor
4232 + * must fit into the 64-word Descriptor h/w Buffer
4234 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
4235 + ctx->cdata.key_inline = true;
4237 + ctx->cdata.key_inline = false;
4238 + ctx->cdata.key_dma = ctx->key_dma;
4241 + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4242 + ctx->authsize, true);
4247 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
4248 + unsigned int authsize)
4250 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4252 + ctx->authsize = authsize;
4253 + rfc4543_set_sh_desc(authenc);
4258 +static int rfc4543_setkey(struct crypto_aead *aead,
4259 + const u8 *key, unsigned int keylen)
4261 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4262 + struct device *jrdev = ctx->jrdev;
4269 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4270 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4273 + memcpy(ctx->key, key, keylen);
4275 + * The last four bytes of the key material are used as the salt value
4276 + * in the nonce. Update the AES key length.
4278 + ctx->cdata.keylen = keylen - 4;
4279 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
4282 + ret = rfc4543_set_sh_desc(aead);
4286 + /* Now update the driver contexts with the new shared descriptor */
4287 + if (ctx->drv_ctx[ENCRYPT]) {
4288 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4289 + ctx->sh_desc_enc);
4291 + dev_err(jrdev, "driver enc context update failed\n");
4296 + if (ctx->drv_ctx[DECRYPT]) {
4297 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4298 + ctx->sh_desc_dec);
4300 + dev_err(jrdev, "driver dec context update failed\n");
4308 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
4309 const u8 *key, unsigned int keylen)
4311 @@ -414,6 +902,29 @@ struct aead_edesc {
4315 + * tls_edesc - s/w-extended tls descriptor
4316 + * @src_nents: number of segments in input scatterlist
4317 + * @dst_nents: number of segments in output scatterlist
4318 + * @iv_dma: dma address of iv for checking continuity and link table
4319 + * @qm_sg_bytes: length of dma mapped h/w link table
4320 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
4321 + * @qm_sg_dma: bus physical mapped address of h/w link table
4322 + * @drv_req: driver-specific request structure
4323 + * @sgt: the h/w link table, followed by IV
4328 + dma_addr_t iv_dma;
4330 + dma_addr_t qm_sg_dma;
4331 + struct scatterlist tmp[2];
4332 + struct scatterlist *dst;
4333 + struct caam_drv_req drv_req;
4334 + struct qm_sg_entry sgt[0];
4338 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
4339 * @src_nents: number of segments in input scatterlist
4340 * @dst_nents: number of segments in output scatterlist
4341 @@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
4342 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
4345 +static void tls_unmap(struct device *dev,
4346 + struct tls_edesc *edesc,
4347 + struct aead_request *req)
4349 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4350 + int ivsize = crypto_aead_ivsize(aead);
4352 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
4353 + edesc->dst_nents, edesc->iv_dma, ivsize,
4354 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
4355 + edesc->qm_sg_bytes);
4358 static void ablkcipher_unmap(struct device *dev,
4359 struct ablkcipher_edesc *edesc,
4360 struct ablkcipher_request *req)
4361 @@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
4362 qidev = caam_ctx->qidev;
4364 if (unlikely(status)) {
4365 + u32 ssrc = status & JRSTA_SSRC_MASK;
4366 + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
4368 caam_jr_strstatus(qidev, status);
4371 + * verify hw auth check passed else return -EBADMSG
4373 + if (ssrc == JRSTA_SSRC_CCB_ERROR &&
4374 + err_id == JRSTA_CCBERR_ERRID_ICVCHK)
4380 edesc = container_of(drv_req, typeof(*edesc), drv_req);
4381 @@ -647,9 +1181,24 @@ static struct aead_edesc *aead_edesc_all
4383 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
4384 * Input is not contiguous.
4385 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
4386 + * the end of the table by allocating more S/G entries. Logic:
4387 + * if (src != dst && output S/G)
4388 + * pad output S/G, if needed
4389 + * else if (src == dst && S/G)
4390 + * overlapping S/Gs; pad one of them
4391 + * else if (input S/G) ...
4392 + * pad input S/G, if needed
4394 - qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
4395 - (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4396 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
4397 + if (mapped_dst_nents > 1)
4398 + qm_sg_ents += ALIGN(mapped_dst_nents, 4);
4399 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
4400 + qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
4401 + 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
4403 + qm_sg_ents = ALIGN(qm_sg_ents, 4);
4405 sg_table = &edesc->sgt[0];
4406 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4407 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
4408 @@ -785,6 +1334,260 @@ static int aead_decrypt(struct aead_requ
4409 return aead_crypt(req, false);
4412 +static int ipsec_gcm_encrypt(struct aead_request *req)
4414 + if (req->assoclen < 8)
4417 + return aead_crypt(req, true);
4420 +static int ipsec_gcm_decrypt(struct aead_request *req)
4422 + if (req->assoclen < 8)
4425 + return aead_crypt(req, false);
4428 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
4430 + struct device *qidev;
4431 + struct tls_edesc *edesc;
4432 + struct aead_request *aead_req = drv_req->app_ctx;
4433 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
4434 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
4437 + qidev = caam_ctx->qidev;
4439 + if (unlikely(status)) {
4440 + caam_jr_strstatus(qidev, status);
4444 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
4445 + tls_unmap(qidev, edesc, aead_req);
4447 + aead_request_complete(aead_req, ecode);
4448 + qi_cache_free(edesc);
4452 + * allocate and map the tls extended descriptor
4454 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
4456 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4457 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4458 + unsigned int blocksize = crypto_aead_blocksize(aead);
4459 + unsigned int padsize, authsize;
4460 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
4461 + typeof(*alg), aead);
4462 + struct device *qidev = ctx->qidev;
4463 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4464 + GFP_KERNEL : GFP_ATOMIC;
4465 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
4466 + struct tls_edesc *edesc;
4467 + dma_addr_t qm_sg_dma, iv_dma = 0;
4470 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
4471 + int in_len, out_len;
4472 + struct qm_sg_entry *sg_table, *fd_sgt;
4473 + struct caam_drv_ctx *drv_ctx;
4474 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
4475 + struct scatterlist *dst;
4478 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
4480 + authsize = ctx->authsize + padsize;
4482 + authsize = ctx->authsize;
4485 + drv_ctx = get_drv_ctx(ctx, op_type);
4486 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
4487 + return (struct tls_edesc *)drv_ctx;
4489 + /* allocate space for base edesc, link tables and IV */
4490 + edesc = qi_cache_alloc(GFP_DMA | flags);
4491 + if (unlikely(!edesc)) {
4492 + dev_err(qidev, "could not allocate extended descriptor\n");
4493 + return ERR_PTR(-ENOMEM);
4496 + if (likely(req->src == req->dst)) {
4497 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4499 + (encrypt ? authsize : 0));
4500 + if (unlikely(src_nents < 0)) {
4501 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
4502 + req->assoclen + req->cryptlen +
4503 + (encrypt ? authsize : 0));
4504 + qi_cache_free(edesc);
4505 + return ERR_PTR(src_nents);
4508 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
4509 + DMA_BIDIRECTIONAL);
4510 + if (unlikely(!mapped_src_nents)) {
4511 + dev_err(qidev, "unable to map source\n");
4512 + qi_cache_free(edesc);
4513 + return ERR_PTR(-ENOMEM);
4517 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4519 + if (unlikely(src_nents < 0)) {
4520 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
4521 + req->assoclen + req->cryptlen);
4522 + qi_cache_free(edesc);
4523 + return ERR_PTR(src_nents);
4526 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
4527 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
4528 + (encrypt ? authsize : 0));
4529 + if (unlikely(dst_nents < 0)) {
4530 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
4532 + (encrypt ? authsize : 0));
4533 + qi_cache_free(edesc);
4534 + return ERR_PTR(dst_nents);
4538 + mapped_src_nents = dma_map_sg(qidev, req->src,
4539 + src_nents, DMA_TO_DEVICE);
4540 + if (unlikely(!mapped_src_nents)) {
4541 + dev_err(qidev, "unable to map source\n");
4542 + qi_cache_free(edesc);
4543 + return ERR_PTR(-ENOMEM);
4546 + mapped_src_nents = 0;
4549 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
4551 + if (unlikely(!mapped_dst_nents)) {
4552 + dev_err(qidev, "unable to map destination\n");
4553 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
4554 + qi_cache_free(edesc);
4555 + return ERR_PTR(-ENOMEM);
4560 + * Create S/G table: IV, src, dst.
4561 + * Input is not contiguous.
4563 + qm_sg_ents = 1 + mapped_src_nents +
4564 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4565 + sg_table = &edesc->sgt[0];
4566 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4568 + ivsize = crypto_aead_ivsize(aead);
4569 + iv = (u8 *)(sg_table + qm_sg_ents);
4570 + /* Make sure IV is located in a DMAable area */
4571 + memcpy(iv, req->iv, ivsize);
4572 + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
4573 + if (dma_mapping_error(qidev, iv_dma)) {
4574 + dev_err(qidev, "unable to map IV\n");
4575 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
4577 + qi_cache_free(edesc);
4578 + return ERR_PTR(-ENOMEM);
4581 + edesc->src_nents = src_nents;
4582 + edesc->dst_nents = dst_nents;
4584 + edesc->iv_dma = iv_dma;
4585 + edesc->drv_req.app_ctx = req;
4586 + edesc->drv_req.cbk = tls_done;
4587 + edesc->drv_req.drv_ctx = drv_ctx;
4589 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
4592 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
4593 + qm_sg_index += mapped_src_nents;
4595 + if (mapped_dst_nents > 1)
4596 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
4599 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
4600 + if (dma_mapping_error(qidev, qm_sg_dma)) {
4601 + dev_err(qidev, "unable to map S/G table\n");
4602 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
4603 + ivsize, op_type, 0, 0);
4604 + qi_cache_free(edesc);
4605 + return ERR_PTR(-ENOMEM);
4608 + edesc->qm_sg_dma = qm_sg_dma;
4609 + edesc->qm_sg_bytes = qm_sg_bytes;
4611 + out_len = req->cryptlen + (encrypt ? authsize : 0);
4612 + in_len = ivsize + req->assoclen + req->cryptlen;
4614 + fd_sgt = &edesc->drv_req.fd_sgt[0];
4616 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
4618 + if (req->dst == req->src)
4619 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
4620 + (sg_nents_for_len(req->src, req->assoclen) +
4621 + 1) * sizeof(*sg_table), out_len, 0);
4622 + else if (mapped_dst_nents == 1)
4623 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
4625 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
4626 + qm_sg_index, out_len, 0);
4631 +static int tls_crypt(struct aead_request *req, bool encrypt)
4633 + struct tls_edesc *edesc;
4634 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4635 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4638 + if (unlikely(caam_congested))
4641 + edesc = tls_edesc_alloc(req, encrypt);
4642 + if (IS_ERR_OR_NULL(edesc))
4643 + return PTR_ERR(edesc);
4645 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
4647 + ret = -EINPROGRESS;
4649 + tls_unmap(ctx->qidev, edesc, req);
4650 + qi_cache_free(edesc);
4656 +static int tls_encrypt(struct aead_request *req)
4658 + return tls_crypt(req, true);
4661 +static int tls_decrypt(struct aead_request *req)
4663 + return tls_crypt(req, false);
4666 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
4668 struct ablkcipher_edesc *edesc;
4669 @@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
4670 qm_sg_ents = 1 + mapped_src_nents;
4671 dst_sg_idx = qm_sg_ents;
4673 - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
4675 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
4676 + * the end of the table by allocating more S/G entries. Logic:
4677 + * if (src != dst && output S/G)
4678 + * pad output S/G, if needed
4679 + * else if (src == dst && S/G)
4680 + * overlapping S/Gs; pad one of them
4681 + * else if (input S/G) ...
4682 + * pad input S/G, if needed
4684 + if (mapped_dst_nents > 1)
4685 + qm_sg_ents += ALIGN(mapped_dst_nents, 4);
4686 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
4687 + qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
4688 + 1 + ALIGN(mapped_src_nents, 4));
4690 + qm_sg_ents = ALIGN(qm_sg_ents, 4);
4692 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
4693 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
4694 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
4695 @@ -1308,6 +2128,61 @@ static struct caam_alg_template driver_a
4698 static struct caam_aead_alg driver_aeads[] = {
4702 + .cra_name = "rfc4106(gcm(aes))",
4703 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
4704 + .cra_blocksize = 1,
4706 + .setkey = rfc4106_setkey,
4707 + .setauthsize = rfc4106_setauthsize,
4708 + .encrypt = ipsec_gcm_encrypt,
4709 + .decrypt = ipsec_gcm_decrypt,
4711 + .maxauthsize = AES_BLOCK_SIZE,
4714 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4720 + .cra_name = "rfc4543(gcm(aes))",
4721 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
4722 + .cra_blocksize = 1,
4724 + .setkey = rfc4543_setkey,
4725 + .setauthsize = rfc4543_setauthsize,
4726 + .encrypt = ipsec_gcm_encrypt,
4727 + .decrypt = ipsec_gcm_decrypt,
4729 + .maxauthsize = AES_BLOCK_SIZE,
4732 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4735 + /* Galois Counter Mode */
4739 + .cra_name = "gcm(aes)",
4740 + .cra_driver_name = "gcm-aes-caam-qi",
4741 + .cra_blocksize = 1,
4743 + .setkey = gcm_setkey,
4744 + .setauthsize = gcm_setauthsize,
4745 + .encrypt = aead_encrypt,
4746 + .decrypt = aead_decrypt,
4748 + .maxauthsize = AES_BLOCK_SIZE,
4751 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4754 /* single-pass ipsec_esp descriptor */
4757 @@ -2118,6 +2993,26 @@ static struct caam_aead_alg driver_aeads
4764 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
4765 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
4766 + .cra_blocksize = AES_BLOCK_SIZE,
4768 + .setkey = tls_setkey,
4769 + .setauthsize = tls_setauthsize,
4770 + .encrypt = tls_encrypt,
4771 + .decrypt = tls_decrypt,
4772 + .ivsize = AES_BLOCK_SIZE,
4773 + .maxauthsize = SHA1_DIGEST_SIZE,
4776 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
4777 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4778 + OP_ALG_AAI_HMAC_PRECOMP,
4783 struct caam_crypto_alg {
4784 @@ -2126,9 +3021,21 @@ struct caam_crypto_alg {
4785 struct caam_alg_entry caam;
4788 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4789 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
4792 struct caam_drv_private *priv;
4793 + struct device *dev;
4794 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
4795 + static const u8 digest_size[] = {
4798 + SHA224_DIGEST_SIZE,
4799 + SHA256_DIGEST_SIZE,
4800 + SHA384_DIGEST_SIZE,
4801 + SHA512_DIGEST_SIZE
4806 * distribute tfms across job rings to ensure in-order
4807 @@ -2140,10 +3047,19 @@ static int caam_init_common(struct caam_
4808 return PTR_ERR(ctx->jrdev);
4811 - ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
4813 - if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
4814 - dev_err(ctx->jrdev, "unable to map key\n");
4815 + priv = dev_get_drvdata(ctx->jrdev->parent);
4816 + if (priv->era >= 6 && uses_dkp) {
4817 + ctx->dir = DMA_BIDIRECTIONAL;
4818 + dev = ctx->jrdev->parent;
4820 + ctx->dir = DMA_TO_DEVICE;
4824 + ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
4826 + if (dma_mapping_error(dev, ctx->key_dma)) {
4827 + dev_err(dev, "unable to map key\n");
4828 caam_jr_free(ctx->jrdev);
4831 @@ -2152,8 +3068,23 @@ static int caam_init_common(struct caam_
4832 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4833 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4835 - priv = dev_get_drvdata(ctx->jrdev->parent);
4836 - ctx->qidev = priv->qidev;
4837 + if (ctx->adata.algtype) {
4838 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
4839 + >> OP_ALG_ALGSEL_SHIFT;
4840 + if (op_id < ARRAY_SIZE(digest_size)) {
4841 + ctx->authsize = digest_size[op_id];
4843 + dev_err(ctx->jrdev,
4844 + "incorrect op_id %d; must be less than %zu\n",
4845 + op_id, ARRAY_SIZE(digest_size));
4846 + caam_jr_free(ctx->jrdev);
4850 + ctx->authsize = 0;
4853 + ctx->qidev = ctx->jrdev->parent;
4855 spin_lock_init(&ctx->lock);
4856 ctx->drv_ctx[ENCRYPT] = NULL;
4857 @@ -2170,7 +3101,7 @@ static int caam_cra_init(struct crypto_t
4859 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4861 - return caam_init_common(ctx, &caam_alg->caam);
4862 + return caam_init_common(ctx, &caam_alg->caam, false);
4865 static int caam_aead_init(struct crypto_aead *tfm)
4866 @@ -2180,17 +3111,25 @@ static int caam_aead_init(struct crypto_
4868 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4870 - return caam_init_common(ctx, &caam_alg->caam);
4871 + return caam_init_common(ctx, &caam_alg->caam,
4872 + (alg->setkey == aead_setkey) ||
4873 + (alg->setkey == tls_setkey));
4876 static void caam_exit_common(struct caam_ctx *ctx)
4878 + struct device *dev;
4880 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
4881 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
4882 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
4884 - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
4886 + if (ctx->dir == DMA_BIDIRECTIONAL)
4887 + dev = ctx->jrdev->parent;
4891 + dma_unmap_single(dev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
4893 caam_jr_free(ctx->jrdev);
4895 @@ -2206,7 +3145,7 @@ static void caam_aead_exit(struct crypto
4898 static struct list_head alg_list;
4899 -static void __exit caam_qi_algapi_exit(void)
4900 +void caam_qi_algapi_exit(void)
4902 struct caam_crypto_alg *t_alg, *n;
4904 @@ -2282,53 +3221,48 @@ static void caam_aead_alg_init(struct ca
4905 alg->exit = caam_aead_exit;
4908 -static int __init caam_qi_algapi_init(void)
4909 +int caam_qi_algapi_init(struct device *ctrldev)
4911 - struct device_node *dev_node;
4912 - struct platform_device *pdev;
4913 - struct device *ctrldev;
4914 - struct caam_drv_private *priv;
4915 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
4917 - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4918 + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
4919 unsigned int md_limit = SHA512_DIGEST_SIZE;
4920 bool registered = false;
4922 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4924 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4929 - pdev = of_find_device_by_node(dev_node);
4930 - of_node_put(dev_node);
4934 - ctrldev = &pdev->dev;
4935 - priv = dev_get_drvdata(ctrldev);
4938 - * If priv is NULL, it's probably because the caam driver wasn't
4939 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4941 - if (!priv || !priv->qi_present)
4944 INIT_LIST_HEAD(&alg_list);
4947 * Register crypto algorithms the device supports.
4948 * First, detect presence and attributes of DES, AES, and MD blocks.
4950 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4951 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4952 - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4953 - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4954 - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4955 + if (priv->era < 10) {
4956 + u32 cha_vid, cha_inst;
4958 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4959 + aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
4960 + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4962 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4963 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
4964 + CHA_ID_LS_DES_SHIFT;
4965 + aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
4966 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4970 + aesa = rd_reg32(&priv->ctrl->vreg.aesa);
4971 + mdha = rd_reg32(&priv->ctrl->vreg.mdha);
4973 + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
4974 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
4976 + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
4977 + aes_inst = aesa & CHA_VER_NUM_MASK;
4978 + md_inst = mdha & CHA_VER_NUM_MASK;
4981 /* If MD is present, limit digest size based on LP256 */
4982 - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4983 + if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
4984 md_limit = SHA256_DIGEST_SIZE;
4986 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4987 @@ -2349,14 +3283,14 @@ static int __init caam_qi_algapi_init(vo
4988 t_alg = caam_alg_alloc(alg);
4989 if (IS_ERR(t_alg)) {
4990 err = PTR_ERR(t_alg);
4991 - dev_warn(priv->qidev, "%s alg allocation failed\n",
4992 + dev_warn(ctrldev, "%s alg allocation failed\n",
4997 err = crypto_register_alg(&t_alg->crypto_alg);
4999 - dev_warn(priv->qidev, "%s alg registration failed\n",
5000 + dev_warn(ctrldev, "%s alg registration failed\n",
5001 t_alg->crypto_alg.cra_driver_name);
5004 @@ -2388,8 +3322,7 @@ static int __init caam_qi_algapi_init(vo
5005 * Check support for AES algorithms not available
5008 - if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
5009 - (alg_aai == OP_ALG_AAI_GCM))
5010 + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
5014 @@ -2414,14 +3347,7 @@ static int __init caam_qi_algapi_init(vo
5018 - dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
5019 + dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
5024 -module_init(caam_qi_algapi_init);
5025 -module_exit(caam_qi_algapi_exit);
5027 -MODULE_LICENSE("GPL");
5028 -MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
5029 -MODULE_AUTHOR("Freescale Semiconductor");
5031 +++ b/drivers/crypto/caam/caamalg_qi2.c
5033 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
5035 + * Copyright 2015-2016 Freescale Semiconductor Inc.
5036 + * Copyright 2017-2018 NXP
5039 +#include <linux/fsl/mc.h>
5040 +#include "compat.h"
5042 +#include "caamalg_qi2.h"
5043 +#include "dpseci_cmd.h"
5044 +#include "desc_constr.h"
5046 +#include "sg_sw_sec4.h"
5047 +#include "sg_sw_qm2.h"
5048 +#include "key_gen.h"
5049 +#include "caamalg_desc.h"
5050 +#include "caamhash_desc.h"
5051 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
5052 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
5054 +#define CAAM_CRA_PRIORITY 2000
5056 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
5057 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
5058 + SHA512_DIGEST_SIZE * 2)
5061 + * This is a a cache of buffers, from which the users of CAAM QI driver
5062 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
5063 + * NOTE: A more elegant solution would be to have some headroom in the frames
5064 + * being processed. This can be added by the dpaa2-eth driver. This would
5065 + * pose a problem for userspace application processing which cannot
5066 + * know of this limitation. So for now, this will work.
5067 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
5069 +static struct kmem_cache *qi_cache;
5071 +struct caam_alg_entry {
5072 + struct device *dev;
5073 + int class1_alg_type;
5074 + int class2_alg_type;
5079 +struct caam_aead_alg {
5080 + struct aead_alg aead;
5081 + struct caam_alg_entry caam;
5085 +struct caam_skcipher_alg {
5086 + struct skcipher_alg skcipher;
5087 + struct caam_alg_entry caam;
5092 + * caam_ctx - per-session context
5093 + * @flc: Flow Contexts array
5094 + * @key: virtual address of the key(s): [authentication key], encryption key
5095 + * @flc_dma: I/O virtual addresses of the Flow Contexts
5096 + * @key_dma: I/O virtual address of the key
5097 + * @dir: DMA direction for mapping key and Flow Contexts
5098 + * @dev: dpseci device
5099 + * @adata: authentication algorithm details
5100 + * @cdata: encryption algorithm details
5101 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
5104 + struct caam_flc flc[NUM_OP];
5105 + u8 key[CAAM_MAX_KEY_SIZE];
5106 + dma_addr_t flc_dma[NUM_OP];
5107 + dma_addr_t key_dma;
5108 + enum dma_data_direction dir;
5109 + struct device *dev;
5110 + struct alginfo adata;
5111 + struct alginfo cdata;
5112 + unsigned int authsize;
5115 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
5116 + dma_addr_t iova_addr)
5118 + phys_addr_t phys_addr;
5120 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
5123 + return phys_to_virt(phys_addr);
5127 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
5129 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
5130 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
5131 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
5132 + * hosting 16 SG entries.
5134 + * @flags - flags that would be used for the equivalent kmalloc(..) call
5136 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
5138 +static inline void *qi_cache_zalloc(gfp_t flags)
5140 + return kmem_cache_zalloc(qi_cache, flags);
5144 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
5146 + * @obj - buffer previously allocated by qi_cache_zalloc
5148 + * No checking is being done, the call is a passthrough call to
5149 + * kmem_cache_free(...)
5151 +static inline void qi_cache_free(void *obj)
5153 + kmem_cache_free(qi_cache, obj);
5156 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
5158 + switch (crypto_tfm_alg_type(areq->tfm)) {
5159 + case CRYPTO_ALG_TYPE_SKCIPHER:
5160 + return skcipher_request_ctx(skcipher_request_cast(areq));
5161 + case CRYPTO_ALG_TYPE_AEAD:
5162 + return aead_request_ctx(container_of(areq, struct aead_request,
5164 + case CRYPTO_ALG_TYPE_AHASH:
5165 + return ahash_request_ctx(ahash_request_cast(areq));
5167 + return ERR_PTR(-EINVAL);
5171 +static void caam_unmap(struct device *dev, struct scatterlist *src,
5172 + struct scatterlist *dst, int src_nents,
5173 + int dst_nents, dma_addr_t iv_dma, int ivsize,
5174 + dma_addr_t qm_sg_dma, int qm_sg_bytes)
5178 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
5179 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
5181 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
5185 + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
5188 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
5191 +static int aead_set_sh_desc(struct crypto_aead *aead)
5193 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
5194 + typeof(*alg), aead);
5195 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5196 + unsigned int ivsize = crypto_aead_ivsize(aead);
5197 + struct device *dev = ctx->dev;
5198 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5199 + struct caam_flc *flc;
5201 + u32 ctx1_iv_off = 0;
5202 + u32 *nonce = NULL;
5203 + unsigned int data_len[2];
5205 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
5206 + OP_ALG_AAI_CTR_MOD128);
5207 + const bool is_rfc3686 = alg->caam.rfc3686;
5209 + if (!ctx->cdata.keylen || !ctx->authsize)
5213 + * AES-CTR needs to load IV in CONTEXT1 reg
5214 + * at an offset of 128bits (16bytes)
5215 + * CONTEXT1[255:128] = IV
5221 + * RFC3686 specific:
5222 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
5225 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
5226 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
5227 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
5230 + data_len[0] = ctx->adata.keylen_pad;
5231 + data_len[1] = ctx->cdata.keylen;
5233 + /* aead_encrypt shared descriptor */
5234 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
5235 + DESC_QI_AEAD_ENC_LEN) +
5236 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
5237 + DESC_JOB_IO_LEN, data_len, &inl_mask,
5238 + ARRAY_SIZE(data_len)) < 0)
5242 + ctx->adata.key_virt = ctx->key;
5244 + ctx->adata.key_dma = ctx->key_dma;
5247 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5249 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5251 + ctx->adata.key_inline = !!(inl_mask & 1);
5252 + ctx->cdata.key_inline = !!(inl_mask & 2);
5254 + flc = &ctx->flc[ENCRYPT];
5255 + desc = flc->sh_desc;
5257 + if (alg->caam.geniv)
5258 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
5259 + ivsize, ctx->authsize, is_rfc3686,
5260 + nonce, ctx1_iv_off, true,
5261 + priv->sec_attr.era);
5263 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
5264 + ivsize, ctx->authsize, is_rfc3686, nonce,
5265 + ctx1_iv_off, true, priv->sec_attr.era);
5267 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5268 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5269 + sizeof(flc->flc) + desc_bytes(desc),
5272 + /* aead_decrypt shared descriptor */
5273 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
5274 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
5275 + DESC_JOB_IO_LEN, data_len, &inl_mask,
5276 + ARRAY_SIZE(data_len)) < 0)
5280 + ctx->adata.key_virt = ctx->key;
5282 + ctx->adata.key_dma = ctx->key_dma;
5285 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5287 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5289 + ctx->adata.key_inline = !!(inl_mask & 1);
5290 + ctx->cdata.key_inline = !!(inl_mask & 2);
5292 + flc = &ctx->flc[DECRYPT];
5293 + desc = flc->sh_desc;
5294 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
5295 + ivsize, ctx->authsize, alg->caam.geniv,
5296 + is_rfc3686, nonce, ctx1_iv_off, true,
5297 + priv->sec_attr.era);
5298 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5299 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5300 + sizeof(flc->flc) + desc_bytes(desc),
5306 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
5308 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
5310 + ctx->authsize = authsize;
5311 + aead_set_sh_desc(authenc);
5316 +struct split_key_sh_result {
5317 + struct completion completion;
5319 + struct device *dev;
5322 +static void split_key_sh_done(void *cbk_ctx, u32 err)
5324 + struct split_key_sh_result *res = cbk_ctx;
5327 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
5331 + caam_qi2_strstatus(res->dev, err);
5334 + complete(&res->completion);
5337 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
5338 + unsigned int keylen)
5340 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5341 + struct device *dev = ctx->dev;
5342 + struct crypto_authenc_keys keys;
5344 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
5348 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
5349 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
5351 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5352 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5355 + ctx->adata.keylen = keys.authkeylen;
5356 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
5357 + OP_ALG_ALGSEL_MASK);
5359 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
5362 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
5363 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
5364 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
5365 + keys.enckeylen, ctx->dir);
5367 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
5368 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5369 + ctx->adata.keylen_pad + keys.enckeylen, 1);
5372 + ctx->cdata.keylen = keys.enckeylen;
5374 + return aead_set_sh_desc(aead);
5376 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5380 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
5383 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
5384 + struct caam_request *req_ctx = aead_request_ctx(req);
5385 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5386 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5387 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5388 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
5389 + typeof(*alg), aead);
5390 + struct device *dev = ctx->dev;
5391 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5392 + GFP_KERNEL : GFP_ATOMIC;
5393 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5394 + struct aead_edesc *edesc;
5395 + dma_addr_t qm_sg_dma, iv_dma = 0;
5397 + unsigned int authsize = ctx->authsize;
5398 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
5399 + int in_len, out_len;
5400 + struct dpaa2_sg_entry *sg_table;
5402 + /* allocate space for base edesc, link tables and IV */
5403 + edesc = qi_cache_zalloc(GFP_DMA | flags);
5404 + if (unlikely(!edesc)) {
5405 + dev_err(dev, "could not allocate extended descriptor\n");
5406 + return ERR_PTR(-ENOMEM);
5409 + if (unlikely(req->dst != req->src)) {
5410 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5412 + if (unlikely(src_nents < 0)) {
5413 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5414 + req->assoclen + req->cryptlen);
5415 + qi_cache_free(edesc);
5416 + return ERR_PTR(src_nents);
5419 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
5421 + (encrypt ? authsize :
5423 + if (unlikely(dst_nents < 0)) {
5424 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5425 + req->assoclen + req->cryptlen +
5426 + (encrypt ? authsize : (-authsize)));
5427 + qi_cache_free(edesc);
5428 + return ERR_PTR(dst_nents);
5432 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5434 + if (unlikely(!mapped_src_nents)) {
5435 + dev_err(dev, "unable to map source\n");
5436 + qi_cache_free(edesc);
5437 + return ERR_PTR(-ENOMEM);
5440 + mapped_src_nents = 0;
5443 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
5445 + if (unlikely(!mapped_dst_nents)) {
5446 + dev_err(dev, "unable to map destination\n");
5447 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5448 + qi_cache_free(edesc);
5449 + return ERR_PTR(-ENOMEM);
5452 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5454 + (encrypt ? authsize : 0));
5455 + if (unlikely(src_nents < 0)) {
5456 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5457 + req->assoclen + req->cryptlen +
5458 + (encrypt ? authsize : 0));
5459 + qi_cache_free(edesc);
5460 + return ERR_PTR(src_nents);
5463 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5464 + DMA_BIDIRECTIONAL);
5465 + if (unlikely(!mapped_src_nents)) {
5466 + dev_err(dev, "unable to map source\n");
5467 + qi_cache_free(edesc);
5468 + return ERR_PTR(-ENOMEM);
5472 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
5473 + ivsize = crypto_aead_ivsize(aead);
5476 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
5477 + * Input is not contiguous.
5479 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
5480 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
5481 + sg_table = &edesc->sgt[0];
5482 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
5483 + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
5484 + CAAM_QI_MEMCACHE_SIZE)) {
5485 + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
5486 + qm_sg_nents, ivsize);
5487 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
5489 + qi_cache_free(edesc);
5490 + return ERR_PTR(-ENOMEM);
5494 + u8 *iv = (u8 *)(sg_table + qm_sg_nents);
5496 + /* Make sure IV is located in a DMAable area */
5497 + memcpy(iv, req->iv, ivsize);
5499 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5500 + if (dma_mapping_error(dev, iv_dma)) {
5501 + dev_err(dev, "unable to map IV\n");
5502 + caam_unmap(dev, req->src, req->dst, src_nents,
5503 + dst_nents, 0, 0, 0, 0);
5504 + qi_cache_free(edesc);
5505 + return ERR_PTR(-ENOMEM);
5509 + edesc->src_nents = src_nents;
5510 + edesc->dst_nents = dst_nents;
5511 + edesc->iv_dma = iv_dma;
5513 + if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
5514 + OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
5516 + * The associated data comes already with the IV but we need
5517 + * to skip it when we authenticate or encrypt...
5519 + edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
5521 + edesc->assoclen = cpu_to_caam32(req->assoclen);
5522 + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
5524 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
5525 + dev_err(dev, "unable to map assoclen\n");
5526 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5527 + iv_dma, ivsize, 0, 0);
5528 + qi_cache_free(edesc);
5529 + return ERR_PTR(-ENOMEM);
5532 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
5535 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
5538 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
5539 + qm_sg_index += mapped_src_nents;
5541 + if (mapped_dst_nents > 1)
5542 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
5545 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
5546 + if (dma_mapping_error(dev, qm_sg_dma)) {
5547 + dev_err(dev, "unable to map S/G table\n");
5548 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
5549 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5550 + iv_dma, ivsize, 0, 0);
5551 + qi_cache_free(edesc);
5552 + return ERR_PTR(-ENOMEM);
5555 + edesc->qm_sg_dma = qm_sg_dma;
5556 + edesc->qm_sg_bytes = qm_sg_bytes;
5558 + out_len = req->assoclen + req->cryptlen +
5559 + (encrypt ? ctx->authsize : (-ctx->authsize));
5560 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
5562 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5563 + dpaa2_fl_set_final(in_fle, true);
5564 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5565 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
5566 + dpaa2_fl_set_len(in_fle, in_len);
5568 + if (req->dst == req->src) {
5569 + if (mapped_src_nents == 1) {
5570 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5571 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
5573 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5574 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
5575 + (1 + !!ivsize) * sizeof(*sg_table));
5577 + } else if (mapped_dst_nents == 1) {
5578 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5579 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
5581 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5582 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
5583 + sizeof(*sg_table));
5586 + dpaa2_fl_set_len(out_fle, out_len);
5591 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
5593 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5594 + unsigned int ivsize = crypto_aead_ivsize(aead);
5595 + struct device *dev = ctx->dev;
5596 + struct caam_flc *flc;
5599 + if (!ctx->cdata.keylen || !ctx->authsize)
5602 + flc = &ctx->flc[ENCRYPT];
5603 + desc = flc->sh_desc;
5604 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
5605 + ctx->authsize, true, true);
5606 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5607 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5608 + sizeof(flc->flc) + desc_bytes(desc),
5611 + flc = &ctx->flc[DECRYPT];
5612 + desc = flc->sh_desc;
5613 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
5614 + ctx->authsize, false, true);
5615 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5616 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5617 + sizeof(flc->flc) + desc_bytes(desc),
5623 +static int chachapoly_setauthsize(struct crypto_aead *aead,
5624 + unsigned int authsize)
5626 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5628 + if (authsize != POLY1305_DIGEST_SIZE)
5631 + ctx->authsize = authsize;
5632 + return chachapoly_set_sh_desc(aead);
5635 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
5636 + unsigned int keylen)
5638 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5639 + unsigned int ivsize = crypto_aead_ivsize(aead);
5640 + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
5642 + if (keylen != CHACHA20_KEY_SIZE + saltlen) {
5643 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5647 + ctx->cdata.key_virt = key;
5648 + ctx->cdata.keylen = keylen - saltlen;
5650 + return chachapoly_set_sh_desc(aead);
5653 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
5656 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
5657 + unsigned int blocksize = crypto_aead_blocksize(tls);
5658 + unsigned int padsize, authsize;
5659 + struct caam_request *req_ctx = aead_request_ctx(req);
5660 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5661 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5662 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5663 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
5664 + typeof(*alg), aead);
5665 + struct device *dev = ctx->dev;
5666 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5667 + GFP_KERNEL : GFP_ATOMIC;
5668 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5669 + struct tls_edesc *edesc;
5670 + dma_addr_t qm_sg_dma, iv_dma = 0;
5673 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
5674 + int in_len, out_len;
5675 + struct dpaa2_sg_entry *sg_table;
5676 + struct scatterlist *dst;
5679 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
5681 + authsize = ctx->authsize + padsize;
5683 + authsize = ctx->authsize;
5686 + /* allocate space for base edesc, link tables and IV */
5687 + edesc = qi_cache_zalloc(GFP_DMA | flags);
5688 + if (unlikely(!edesc)) {
5689 + dev_err(dev, "could not allocate extended descriptor\n");
5690 + return ERR_PTR(-ENOMEM);
5693 + if (likely(req->src == req->dst)) {
5694 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5696 + (encrypt ? authsize : 0));
5697 + if (unlikely(src_nents < 0)) {
5698 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5699 + req->assoclen + req->cryptlen +
5700 + (encrypt ? authsize : 0));
5701 + qi_cache_free(edesc);
5702 + return ERR_PTR(src_nents);
5705 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5706 + DMA_BIDIRECTIONAL);
5707 + if (unlikely(!mapped_src_nents)) {
5708 + dev_err(dev, "unable to map source\n");
5709 + qi_cache_free(edesc);
5710 + return ERR_PTR(-ENOMEM);
5714 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5716 + if (unlikely(src_nents < 0)) {
5717 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5718 + req->assoclen + req->cryptlen);
5719 + qi_cache_free(edesc);
5720 + return ERR_PTR(src_nents);
5723 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
5724 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
5725 + (encrypt ? authsize : 0));
5726 + if (unlikely(dst_nents < 0)) {
5727 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5729 + (encrypt ? authsize : 0));
5730 + qi_cache_free(edesc);
5731 + return ERR_PTR(dst_nents);
5735 + mapped_src_nents = dma_map_sg(dev, req->src,
5736 + src_nents, DMA_TO_DEVICE);
5737 + if (unlikely(!mapped_src_nents)) {
5738 + dev_err(dev, "unable to map source\n");
5739 + qi_cache_free(edesc);
5740 + return ERR_PTR(-ENOMEM);
5743 + mapped_src_nents = 0;
5746 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
5748 + if (unlikely(!mapped_dst_nents)) {
5749 + dev_err(dev, "unable to map destination\n");
5750 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5751 + qi_cache_free(edesc);
5752 + return ERR_PTR(-ENOMEM);
5757 + * Create S/G table: IV, src, dst.
5758 + * Input is not contiguous.
5760 + qm_sg_ents = 1 + mapped_src_nents +
5761 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
5762 + sg_table = &edesc->sgt[0];
5763 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
5765 + ivsize = crypto_aead_ivsize(tls);
5766 + iv = (u8 *)(sg_table + qm_sg_ents);
5767 + /* Make sure IV is located in a DMAable area */
5768 + memcpy(iv, req->iv, ivsize);
5769 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5770 + if (dma_mapping_error(dev, iv_dma)) {
5771 + dev_err(dev, "unable to map IV\n");
5772 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
5774 + qi_cache_free(edesc);
5775 + return ERR_PTR(-ENOMEM);
5778 + edesc->src_nents = src_nents;
5779 + edesc->dst_nents = dst_nents;
5781 + edesc->iv_dma = iv_dma;
5783 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
5786 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
5787 + qm_sg_index += mapped_src_nents;
5789 + if (mapped_dst_nents > 1)
5790 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
5793 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
5794 + if (dma_mapping_error(dev, qm_sg_dma)) {
5795 + dev_err(dev, "unable to map S/G table\n");
5796 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
5798 + qi_cache_free(edesc);
5799 + return ERR_PTR(-ENOMEM);
5802 + edesc->qm_sg_dma = qm_sg_dma;
5803 + edesc->qm_sg_bytes = qm_sg_bytes;
5805 + out_len = req->cryptlen + (encrypt ? authsize : 0);
5806 + in_len = ivsize + req->assoclen + req->cryptlen;
5808 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5809 + dpaa2_fl_set_final(in_fle, true);
5810 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5811 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
5812 + dpaa2_fl_set_len(in_fle, in_len);
5814 + if (req->dst == req->src) {
5815 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5816 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
5817 + (sg_nents_for_len(req->src, req->assoclen) +
5818 + 1) * sizeof(*sg_table));
5819 + } else if (mapped_dst_nents == 1) {
5820 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5821 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
5823 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5824 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
5825 + sizeof(*sg_table));
5828 + dpaa2_fl_set_len(out_fle, out_len);
5833 +static int tls_set_sh_desc(struct crypto_aead *tls)
5835 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5836 + unsigned int ivsize = crypto_aead_ivsize(tls);
5837 + unsigned int blocksize = crypto_aead_blocksize(tls);
5838 + struct device *dev = ctx->dev;
5839 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5840 + struct caam_flc *flc;
5842 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
5843 + unsigned int data_len[2];
5846 + if (!ctx->cdata.keylen || !ctx->authsize)
5850 + * TLS 1.0 encrypt shared descriptor
5851 + * Job Descriptor and Shared Descriptor
5852 + * must fit into the 64-word Descriptor h/w Buffer
5854 + data_len[0] = ctx->adata.keylen_pad;
5855 + data_len[1] = ctx->cdata.keylen;
5857 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
5858 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
5862 + ctx->adata.key_virt = ctx->key;
5864 + ctx->adata.key_dma = ctx->key_dma;
5867 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5869 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5871 + ctx->adata.key_inline = !!(inl_mask & 1);
5872 + ctx->cdata.key_inline = !!(inl_mask & 2);
5874 + flc = &ctx->flc[ENCRYPT];
5875 + desc = flc->sh_desc;
5876 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
5877 + assoclen, ivsize, ctx->authsize, blocksize,
5878 + priv->sec_attr.era);
5879 + flc->flc[1] = cpu_to_caam32(desc_len(desc));
5880 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5881 + sizeof(flc->flc) + desc_bytes(desc),
5885 + * TLS 1.0 decrypt shared descriptor
5886 + * Keys do not fit inline, regardless of algorithms used
5888 + ctx->adata.key_inline = false;
5889 + ctx->adata.key_dma = ctx->key_dma;
5890 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5892 + flc = &ctx->flc[DECRYPT];
5893 + desc = flc->sh_desc;
5894 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
5895 + ctx->authsize, blocksize, priv->sec_attr.era);
5896 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5897 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5898 + sizeof(flc->flc) + desc_bytes(desc),
5904 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
5905 + unsigned int keylen)
5907 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5908 + struct device *dev = ctx->dev;
5909 + struct crypto_authenc_keys keys;
5911 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
5915 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
5916 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
5918 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5919 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5922 + ctx->adata.keylen = keys.authkeylen;
5923 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
5924 + OP_ALG_ALGSEL_MASK);
5926 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
5929 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
5930 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
5931 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
5932 + keys.enckeylen, ctx->dir);
5934 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
5935 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5936 + ctx->adata.keylen_pad + keys.enckeylen, 1);
5939 + ctx->cdata.keylen = keys.enckeylen;
5941 + return tls_set_sh_desc(tls);
5943 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
5947 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
5949 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5951 + ctx->authsize = authsize;
5952 + tls_set_sh_desc(tls);
5957 +static int gcm_set_sh_desc(struct crypto_aead *aead)
5959 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5960 + struct device *dev = ctx->dev;
5961 + unsigned int ivsize = crypto_aead_ivsize(aead);
5962 + struct caam_flc *flc;
5964 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
5965 + ctx->cdata.keylen;
5967 + if (!ctx->cdata.keylen || !ctx->authsize)
5971 + * AES GCM encrypt shared descriptor
5972 + * Job Descriptor and Shared Descriptor
5973 + * must fit into the 64-word Descriptor h/w Buffer
5975 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
5976 + ctx->cdata.key_inline = true;
5977 + ctx->cdata.key_virt = ctx->key;
5979 + ctx->cdata.key_inline = false;
5980 + ctx->cdata.key_dma = ctx->key_dma;
5983 + flc = &ctx->flc[ENCRYPT];
5984 + desc = flc->sh_desc;
5985 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
5986 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5987 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5988 + sizeof(flc->flc) + desc_bytes(desc),
5992 + * Job Descriptor and Shared Descriptors
5993 + * must all fit into the 64-word Descriptor h/w Buffer
5995 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
5996 + ctx->cdata.key_inline = true;
5997 + ctx->cdata.key_virt = ctx->key;
5999 + ctx->cdata.key_inline = false;
6000 + ctx->cdata.key_dma = ctx->key_dma;
6003 + flc = &ctx->flc[DECRYPT];
6004 + desc = flc->sh_desc;
6005 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
6006 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6007 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6008 + sizeof(flc->flc) + desc_bytes(desc),
6014 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
6016 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6018 + ctx->authsize = authsize;
6019 + gcm_set_sh_desc(authenc);
6024 +static int gcm_setkey(struct crypto_aead *aead,
6025 + const u8 *key, unsigned int keylen)
6027 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6028 + struct device *dev = ctx->dev;
6031 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6032 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6035 + memcpy(ctx->key, key, keylen);
6036 + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
6037 + ctx->cdata.keylen = keylen;
6039 + return gcm_set_sh_desc(aead);
6042 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
6044 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6045 + struct device *dev = ctx->dev;
6046 + unsigned int ivsize = crypto_aead_ivsize(aead);
6047 + struct caam_flc *flc;
6049 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
6050 + ctx->cdata.keylen;
6052 + if (!ctx->cdata.keylen || !ctx->authsize)
6055 + ctx->cdata.key_virt = ctx->key;
6058 + * RFC4106 encrypt shared descriptor
6059 + * Job Descriptor and Shared Descriptor
6060 + * must fit into the 64-word Descriptor h/w Buffer
6062 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
6063 + ctx->cdata.key_inline = true;
6065 + ctx->cdata.key_inline = false;
6066 + ctx->cdata.key_dma = ctx->key_dma;
6069 + flc = &ctx->flc[ENCRYPT];
6070 + desc = flc->sh_desc;
6071 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
6073 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6074 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6075 + sizeof(flc->flc) + desc_bytes(desc),
6079 + * Job Descriptor and Shared Descriptors
6080 + * must all fit into the 64-word Descriptor h/w Buffer
6082 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
6083 + ctx->cdata.key_inline = true;
6085 + ctx->cdata.key_inline = false;
6086 + ctx->cdata.key_dma = ctx->key_dma;
6089 + flc = &ctx->flc[DECRYPT];
6090 + desc = flc->sh_desc;
6091 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
6093 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6094 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6095 + sizeof(flc->flc) + desc_bytes(desc),
6101 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
6102 + unsigned int authsize)
6104 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6106 + ctx->authsize = authsize;
6107 + rfc4106_set_sh_desc(authenc);
6112 +static int rfc4106_setkey(struct crypto_aead *aead,
6113 + const u8 *key, unsigned int keylen)
6115 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6116 + struct device *dev = ctx->dev;
6122 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6123 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6126 + memcpy(ctx->key, key, keylen);
6128 + * The last four bytes of the key material are used as the salt value
6129 + * in the nonce. Update the AES key length.
6131 + ctx->cdata.keylen = keylen - 4;
6132 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
6135 + return rfc4106_set_sh_desc(aead);
6138 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
6140 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6141 + struct device *dev = ctx->dev;
6142 + unsigned int ivsize = crypto_aead_ivsize(aead);
6143 + struct caam_flc *flc;
6145 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
6146 + ctx->cdata.keylen;
6148 + if (!ctx->cdata.keylen || !ctx->authsize)
6151 + ctx->cdata.key_virt = ctx->key;
6154 + * RFC4543 encrypt shared descriptor
6155 + * Job Descriptor and Shared Descriptor
6156 + * must fit into the 64-word Descriptor h/w Buffer
6158 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
6159 + ctx->cdata.key_inline = true;
6161 + ctx->cdata.key_inline = false;
6162 + ctx->cdata.key_dma = ctx->key_dma;
6165 + flc = &ctx->flc[ENCRYPT];
6166 + desc = flc->sh_desc;
6167 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
6169 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6170 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6171 + sizeof(flc->flc) + desc_bytes(desc),
6175 + * Job Descriptor and Shared Descriptors
6176 + * must all fit into the 64-word Descriptor h/w Buffer
6178 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
6179 + ctx->cdata.key_inline = true;
6181 + ctx->cdata.key_inline = false;
6182 + ctx->cdata.key_dma = ctx->key_dma;
6185 + flc = &ctx->flc[DECRYPT];
6186 + desc = flc->sh_desc;
6187 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
6189 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6190 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6191 + sizeof(flc->flc) + desc_bytes(desc),
6197 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
6198 + unsigned int authsize)
6200 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6202 + ctx->authsize = authsize;
6203 + rfc4543_set_sh_desc(authenc);
6208 +static int rfc4543_setkey(struct crypto_aead *aead,
6209 + const u8 *key, unsigned int keylen)
6211 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6212 + struct device *dev = ctx->dev;
6218 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6219 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6222 + memcpy(ctx->key, key, keylen);
6224 + * The last four bytes of the key material are used as the salt value
6225 + * in the nonce. Update the AES key length.
6227 + ctx->cdata.keylen = keylen - 4;
6228 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
6231 + return rfc4543_set_sh_desc(aead);
6234 +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
6235 + unsigned int keylen)
6237 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6238 + struct caam_skcipher_alg *alg =
6239 + container_of(crypto_skcipher_alg(skcipher),
6240 + struct caam_skcipher_alg, skcipher);
6241 + struct device *dev = ctx->dev;
6242 + struct caam_flc *flc;
6243 + unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
6245 + u32 ctx1_iv_off = 0;
6246 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
6247 + OP_ALG_AAI_CTR_MOD128) &&
6248 + ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
6249 + OP_ALG_ALGSEL_CHACHA20);
6250 + const bool is_rfc3686 = alg->caam.rfc3686;
6253 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6254 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6257 + * AES-CTR needs to load IV in CONTEXT1 reg
6258 + * at an offset of 128bits (16bytes)
6259 + * CONTEXT1[255:128] = IV
6265 + * RFC3686 specific:
6266 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
6267 + * | *key = {KEY, NONCE}
6270 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
6271 + keylen -= CTR_RFC3686_NONCE_SIZE;
6274 + ctx->cdata.keylen = keylen;
6275 + ctx->cdata.key_virt = key;
6276 + ctx->cdata.key_inline = true;
6278 + /* skcipher_encrypt shared descriptor */
6279 + flc = &ctx->flc[ENCRYPT];
6280 + desc = flc->sh_desc;
6281 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
6282 + is_rfc3686, ctx1_iv_off);
6283 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6284 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6285 + sizeof(flc->flc) + desc_bytes(desc),
6288 + /* skcipher_decrypt shared descriptor */
6289 + flc = &ctx->flc[DECRYPT];
6290 + desc = flc->sh_desc;
6291 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
6292 + is_rfc3686, ctx1_iv_off);
6293 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6294 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6295 + sizeof(flc->flc) + desc_bytes(desc),
6301 +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
6302 + unsigned int keylen)
6304 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6305 + struct device *dev = ctx->dev;
6306 + struct caam_flc *flc;
6309 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
6310 + dev_err(dev, "key size mismatch\n");
6311 + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
6315 + ctx->cdata.keylen = keylen;
6316 + ctx->cdata.key_virt = key;
6317 + ctx->cdata.key_inline = true;
6319 + /* xts_skcipher_encrypt shared descriptor */
6320 + flc = &ctx->flc[ENCRYPT];
6321 + desc = flc->sh_desc;
6322 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
6323 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6324 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6325 + sizeof(flc->flc) + desc_bytes(desc),
6328 + /* xts_skcipher_decrypt shared descriptor */
6329 + flc = &ctx->flc[DECRYPT];
6330 + desc = flc->sh_desc;
6331 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
6332 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6333 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6334 + sizeof(flc->flc) + desc_bytes(desc),
6340 +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
6342 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6343 + struct caam_request *req_ctx = skcipher_request_ctx(req);
6344 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
6345 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
6346 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6347 + struct device *dev = ctx->dev;
6348 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
6349 + GFP_KERNEL : GFP_ATOMIC;
6350 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
6351 + struct skcipher_edesc *edesc;
6352 + dma_addr_t iv_dma;
6354 + int ivsize = crypto_skcipher_ivsize(skcipher);
6355 + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
6356 + struct dpaa2_sg_entry *sg_table;
6358 + src_nents = sg_nents_for_len(req->src, req->cryptlen);
6359 + if (unlikely(src_nents < 0)) {
6360 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
6362 + return ERR_PTR(src_nents);
6365 + if (unlikely(req->dst != req->src)) {
6366 + dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
6367 + if (unlikely(dst_nents < 0)) {
6368 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
6370 + return ERR_PTR(dst_nents);
6373 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
6375 + if (unlikely(!mapped_src_nents)) {
6376 + dev_err(dev, "unable to map source\n");
6377 + return ERR_PTR(-ENOMEM);
6380 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
6382 + if (unlikely(!mapped_dst_nents)) {
6383 + dev_err(dev, "unable to map destination\n");
6384 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
6385 + return ERR_PTR(-ENOMEM);
6388 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
6389 + DMA_BIDIRECTIONAL);
6390 + if (unlikely(!mapped_src_nents)) {
6391 + dev_err(dev, "unable to map source\n");
6392 + return ERR_PTR(-ENOMEM);
6396 + qm_sg_ents = 1 + mapped_src_nents;
6397 + dst_sg_idx = qm_sg_ents;
6399 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
6400 + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
6401 + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
6402 + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
6403 + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
6404 + qm_sg_ents, ivsize);
6405 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6407 + return ERR_PTR(-ENOMEM);
6410 + /* allocate space for base edesc, link tables and IV */
6411 + edesc = qi_cache_zalloc(GFP_DMA | flags);
6412 + if (unlikely(!edesc)) {
6413 + dev_err(dev, "could not allocate extended descriptor\n");
6414 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6416 + return ERR_PTR(-ENOMEM);
6419 + /* Make sure IV is located in a DMAable area */
6420 + sg_table = &edesc->sgt[0];
6421 + iv = (u8 *)(sg_table + qm_sg_ents);
6422 + memcpy(iv, req->iv, ivsize);
6424 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
6425 + if (dma_mapping_error(dev, iv_dma)) {
6426 + dev_err(dev, "unable to map IV\n");
6427 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6429 + qi_cache_free(edesc);
6430 + return ERR_PTR(-ENOMEM);
6433 + edesc->src_nents = src_nents;
6434 + edesc->dst_nents = dst_nents;
6435 + edesc->iv_dma = iv_dma;
6436 + edesc->qm_sg_bytes = qm_sg_bytes;
6438 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
6439 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
6441 + if (mapped_dst_nents > 1)
6442 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
6445 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
6447 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
6448 + dev_err(dev, "unable to map S/G table\n");
6449 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
6450 + iv_dma, ivsize, 0, 0);
6451 + qi_cache_free(edesc);
6452 + return ERR_PTR(-ENOMEM);
6455 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
6456 + dpaa2_fl_set_final(in_fle, true);
6457 + dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
6458 + dpaa2_fl_set_len(out_fle, req->cryptlen);
6460 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
6461 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
6463 + if (req->src == req->dst) {
6464 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
6465 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
6466 + sizeof(*sg_table));
6467 + } else if (mapped_dst_nents > 1) {
6468 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
6469 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
6470 + sizeof(*sg_table));
6472 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
6473 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
6479 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
6480 + struct aead_request *req)
6482 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6483 + int ivsize = crypto_aead_ivsize(aead);
6485 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
6486 + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
6487 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
6490 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
6491 + struct aead_request *req)
6493 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6494 + int ivsize = crypto_aead_ivsize(tls);
6496 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
6497 + edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma,
6498 + edesc->qm_sg_bytes);
6501 +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
6502 + struct skcipher_request *req)
6504 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6505 + int ivsize = crypto_skcipher_ivsize(skcipher);
6507 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
6508 + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
6511 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
6513 + struct crypto_async_request *areq = cbk_ctx;
6514 + struct aead_request *req = container_of(areq, struct aead_request,
6516 + struct caam_request *req_ctx = to_caam_req(areq);
6517 + struct aead_edesc *edesc = req_ctx->edesc;
6518 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6519 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6523 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6526 + if (unlikely(status)) {
6527 + caam_qi2_strstatus(ctx->dev, status);
6531 + aead_unmap(ctx->dev, edesc, req);
6532 + qi_cache_free(edesc);
6533 + aead_request_complete(req, ecode);
6536 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
6538 + struct crypto_async_request *areq = cbk_ctx;
6539 + struct aead_request *req = container_of(areq, struct aead_request,
6541 + struct caam_request *req_ctx = to_caam_req(areq);
6542 + struct aead_edesc *edesc = req_ctx->edesc;
6543 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6544 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6548 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6551 + if (unlikely(status)) {
6552 + caam_qi2_strstatus(ctx->dev, status);
6554 + * verify hw auth check passed else return -EBADMSG
6556 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
6557 + JRSTA_CCBERR_ERRID_ICVCHK)
6563 + aead_unmap(ctx->dev, edesc, req);
6564 + qi_cache_free(edesc);
6565 + aead_request_complete(req, ecode);
6568 +static int aead_encrypt(struct aead_request *req)
6570 + struct aead_edesc *edesc;
6571 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6572 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6573 + struct caam_request *caam_req = aead_request_ctx(req);
6576 + /* allocate extended descriptor */
6577 + edesc = aead_edesc_alloc(req, true);
6578 + if (IS_ERR(edesc))
6579 + return PTR_ERR(edesc);
6581 + caam_req->flc = &ctx->flc[ENCRYPT];
6582 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6583 + caam_req->cbk = aead_encrypt_done;
6584 + caam_req->ctx = &req->base;
6585 + caam_req->edesc = edesc;
6586 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6587 + if (ret != -EINPROGRESS &&
6588 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6589 + aead_unmap(ctx->dev, edesc, req);
6590 + qi_cache_free(edesc);
6596 +static int aead_decrypt(struct aead_request *req)
6598 + struct aead_edesc *edesc;
6599 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6600 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6601 + struct caam_request *caam_req = aead_request_ctx(req);
6604 + /* allocate extended descriptor */
6605 + edesc = aead_edesc_alloc(req, false);
6606 + if (IS_ERR(edesc))
6607 + return PTR_ERR(edesc);
6609 + caam_req->flc = &ctx->flc[DECRYPT];
6610 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6611 + caam_req->cbk = aead_decrypt_done;
6612 + caam_req->ctx = &req->base;
6613 + caam_req->edesc = edesc;
6614 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6615 + if (ret != -EINPROGRESS &&
6616 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6617 + aead_unmap(ctx->dev, edesc, req);
6618 + qi_cache_free(edesc);
6624 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
6626 + struct crypto_async_request *areq = cbk_ctx;
6627 + struct aead_request *req = container_of(areq, struct aead_request,
6629 + struct caam_request *req_ctx = to_caam_req(areq);
6630 + struct tls_edesc *edesc = req_ctx->edesc;
6631 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6632 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6636 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6639 + if (unlikely(status)) {
6640 + caam_qi2_strstatus(ctx->dev, status);
6644 + tls_unmap(ctx->dev, edesc, req);
6645 + qi_cache_free(edesc);
6646 + aead_request_complete(req, ecode);
6649 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
6651 + struct crypto_async_request *areq = cbk_ctx;
6652 + struct aead_request *req = container_of(areq, struct aead_request,
6654 + struct caam_request *req_ctx = to_caam_req(areq);
6655 + struct tls_edesc *edesc = req_ctx->edesc;
6656 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6657 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6661 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6664 + if (unlikely(status)) {
6665 + caam_qi2_strstatus(ctx->dev, status);
6667 + * verify hw auth check passed else return -EBADMSG
6669 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
6670 + JRSTA_CCBERR_ERRID_ICVCHK)
6676 + tls_unmap(ctx->dev, edesc, req);
6677 + qi_cache_free(edesc);
6678 + aead_request_complete(req, ecode);
6681 +static int tls_encrypt(struct aead_request *req)
6683 + struct tls_edesc *edesc;
6684 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6685 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6686 + struct caam_request *caam_req = aead_request_ctx(req);
6689 + /* allocate extended descriptor */
6690 + edesc = tls_edesc_alloc(req, true);
6691 + if (IS_ERR(edesc))
6692 + return PTR_ERR(edesc);
6694 + caam_req->flc = &ctx->flc[ENCRYPT];
6695 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6696 + caam_req->cbk = tls_encrypt_done;
6697 + caam_req->ctx = &req->base;
6698 + caam_req->edesc = edesc;
6699 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6700 + if (ret != -EINPROGRESS &&
6701 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6702 + tls_unmap(ctx->dev, edesc, req);
6703 + qi_cache_free(edesc);
6709 +static int tls_decrypt(struct aead_request *req)
6711 + struct tls_edesc *edesc;
6712 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6713 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6714 + struct caam_request *caam_req = aead_request_ctx(req);
6717 + /* allocate extended descriptor */
6718 + edesc = tls_edesc_alloc(req, false);
6719 + if (IS_ERR(edesc))
6720 + return PTR_ERR(edesc);
6722 + caam_req->flc = &ctx->flc[DECRYPT];
6723 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6724 + caam_req->cbk = tls_decrypt_done;
6725 + caam_req->ctx = &req->base;
6726 + caam_req->edesc = edesc;
6727 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6728 + if (ret != -EINPROGRESS &&
6729 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6730 + tls_unmap(ctx->dev, edesc, req);
6731 + qi_cache_free(edesc);
6737 +static int ipsec_gcm_encrypt(struct aead_request *req)
6739 + if (req->assoclen < 8)
6742 + return aead_encrypt(req);
6745 +static int ipsec_gcm_decrypt(struct aead_request *req)
6747 + if (req->assoclen < 8)
6750 + return aead_decrypt(req);
6753 +static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
6755 + struct crypto_async_request *areq = cbk_ctx;
6756 + struct skcipher_request *req = skcipher_request_cast(areq);
6757 + struct caam_request *req_ctx = to_caam_req(areq);
6758 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6759 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6760 + struct skcipher_edesc *edesc = req_ctx->edesc;
6762 + int ivsize = crypto_skcipher_ivsize(skcipher);
6765 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6768 + if (unlikely(status)) {
6769 + caam_qi2_strstatus(ctx->dev, status);
6774 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
6775 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
6776 + edesc->src_nents > 1 ? 100 : ivsize, 1);
6777 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
6778 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
6779 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
6782 + skcipher_unmap(ctx->dev, edesc, req);
6785 + * The crypto API expects us to set the IV (req->iv) to the last
6786 + * ciphertext block. This is used e.g. by the CTS mode.
6788 + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
6791 + qi_cache_free(edesc);
6792 + skcipher_request_complete(req, ecode);
6795 +static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
6797 + struct crypto_async_request *areq = cbk_ctx;
6798 + struct skcipher_request *req = skcipher_request_cast(areq);
6799 + struct caam_request *req_ctx = to_caam_req(areq);
6800 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6801 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6802 + struct skcipher_edesc *edesc = req_ctx->edesc;
6805 + int ivsize = crypto_skcipher_ivsize(skcipher);
6807 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6810 + if (unlikely(status)) {
6811 + caam_qi2_strstatus(ctx->dev, status);
6816 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
6817 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
6818 + edesc->src_nents > 1 ? 100 : ivsize, 1);
6819 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
6820 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
6821 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
6824 + skcipher_unmap(ctx->dev, edesc, req);
6825 + qi_cache_free(edesc);
6826 + skcipher_request_complete(req, ecode);
6829 +static int skcipher_encrypt(struct skcipher_request *req)
6831 + struct skcipher_edesc *edesc;
6832 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6833 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6834 + struct caam_request *caam_req = skcipher_request_ctx(req);
6837 + /* allocate extended descriptor */
6838 + edesc = skcipher_edesc_alloc(req);
6839 + if (IS_ERR(edesc))
6840 + return PTR_ERR(edesc);
6842 + caam_req->flc = &ctx->flc[ENCRYPT];
6843 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6844 + caam_req->cbk = skcipher_encrypt_done;
6845 + caam_req->ctx = &req->base;
6846 + caam_req->edesc = edesc;
6847 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6848 + if (ret != -EINPROGRESS &&
6849 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6850 + skcipher_unmap(ctx->dev, edesc, req);
6851 + qi_cache_free(edesc);
6857 +static int skcipher_decrypt(struct skcipher_request *req)
6859 + struct skcipher_edesc *edesc;
6860 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6861 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6862 + struct caam_request *caam_req = skcipher_request_ctx(req);
6863 + int ivsize = crypto_skcipher_ivsize(skcipher);
6866 + /* allocate extended descriptor */
6867 + edesc = skcipher_edesc_alloc(req);
6868 + if (IS_ERR(edesc))
6869 + return PTR_ERR(edesc);
6872 + * The crypto API expects us to set the IV (req->iv) to the last
6873 + * ciphertext block.
6875 + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
6878 + caam_req->flc = &ctx->flc[DECRYPT];
6879 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6880 + caam_req->cbk = skcipher_decrypt_done;
6881 + caam_req->ctx = &req->base;
6882 + caam_req->edesc = edesc;
6883 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6884 + if (ret != -EINPROGRESS &&
6885 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6886 + skcipher_unmap(ctx->dev, edesc, req);
6887 + qi_cache_free(edesc);
6893 +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
6896 + dma_addr_t dma_addr;
6899 + /* copy descriptor header template value */
6900 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
6901 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
6903 + ctx->dev = caam->dev;
6904 + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
6906 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
6907 + offsetof(struct caam_ctx, flc_dma),
6908 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
6909 + if (dma_mapping_error(ctx->dev, dma_addr)) {
6910 + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
6914 + for (i = 0; i < NUM_OP; i++)
6915 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
6916 + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
6921 +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
6923 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
6924 + struct caam_skcipher_alg *caam_alg =
6925 + container_of(alg, typeof(*caam_alg), skcipher);
6927 + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
6928 + return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
6931 +static int caam_cra_init_aead(struct crypto_aead *tfm)
6933 + struct aead_alg *alg = crypto_aead_alg(tfm);
6934 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
6937 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
6938 + return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
6939 + (alg->setkey == aead_setkey) ||
6940 + (alg->setkey == tls_setkey));
6943 +static void caam_exit_common(struct caam_ctx *ctx)
6945 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
6946 + offsetof(struct caam_ctx, flc_dma), ctx->dir,
6947 + DMA_ATTR_SKIP_CPU_SYNC);
6950 +static void caam_cra_exit(struct crypto_skcipher *tfm)
6952 + caam_exit_common(crypto_skcipher_ctx(tfm));
6955 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
6957 + caam_exit_common(crypto_aead_ctx(tfm));
6960 +static struct caam_skcipher_alg driver_algs[] = {
6964 + .cra_name = "cbc(aes)",
6965 + .cra_driver_name = "cbc-aes-caam-qi2",
6966 + .cra_blocksize = AES_BLOCK_SIZE,
6968 + .setkey = skcipher_setkey,
6969 + .encrypt = skcipher_encrypt,
6970 + .decrypt = skcipher_decrypt,
6971 + .min_keysize = AES_MIN_KEY_SIZE,
6972 + .max_keysize = AES_MAX_KEY_SIZE,
6973 + .ivsize = AES_BLOCK_SIZE,
6975 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6980 + .cra_name = "cbc(des3_ede)",
6981 + .cra_driver_name = "cbc-3des-caam-qi2",
6982 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6984 + .setkey = skcipher_setkey,
6985 + .encrypt = skcipher_encrypt,
6986 + .decrypt = skcipher_decrypt,
6987 + .min_keysize = DES3_EDE_KEY_SIZE,
6988 + .max_keysize = DES3_EDE_KEY_SIZE,
6989 + .ivsize = DES3_EDE_BLOCK_SIZE,
6991 + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6996 + .cra_name = "cbc(des)",
6997 + .cra_driver_name = "cbc-des-caam-qi2",
6998 + .cra_blocksize = DES_BLOCK_SIZE,
7000 + .setkey = skcipher_setkey,
7001 + .encrypt = skcipher_encrypt,
7002 + .decrypt = skcipher_decrypt,
7003 + .min_keysize = DES_KEY_SIZE,
7004 + .max_keysize = DES_KEY_SIZE,
7005 + .ivsize = DES_BLOCK_SIZE,
7007 + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7012 + .cra_name = "ctr(aes)",
7013 + .cra_driver_name = "ctr-aes-caam-qi2",
7014 + .cra_blocksize = 1,
7016 + .setkey = skcipher_setkey,
7017 + .encrypt = skcipher_encrypt,
7018 + .decrypt = skcipher_decrypt,
7019 + .min_keysize = AES_MIN_KEY_SIZE,
7020 + .max_keysize = AES_MAX_KEY_SIZE,
7021 + .ivsize = AES_BLOCK_SIZE,
7022 + .chunksize = AES_BLOCK_SIZE,
7024 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
7025 + OP_ALG_AAI_CTR_MOD128,
7030 + .cra_name = "rfc3686(ctr(aes))",
7031 + .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
7032 + .cra_blocksize = 1,
7034 + .setkey = skcipher_setkey,
7035 + .encrypt = skcipher_encrypt,
7036 + .decrypt = skcipher_decrypt,
7037 + .min_keysize = AES_MIN_KEY_SIZE +
7038 + CTR_RFC3686_NONCE_SIZE,
7039 + .max_keysize = AES_MAX_KEY_SIZE +
7040 + CTR_RFC3686_NONCE_SIZE,
7041 + .ivsize = CTR_RFC3686_IV_SIZE,
7042 + .chunksize = AES_BLOCK_SIZE,
7045 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7046 + OP_ALG_AAI_CTR_MOD128,
7053 + .cra_name = "xts(aes)",
7054 + .cra_driver_name = "xts-aes-caam-qi2",
7055 + .cra_blocksize = AES_BLOCK_SIZE,
7057 + .setkey = xts_skcipher_setkey,
7058 + .encrypt = skcipher_encrypt,
7059 + .decrypt = skcipher_decrypt,
7060 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
7061 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
7062 + .ivsize = AES_BLOCK_SIZE,
7064 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
7069 + .cra_name = "chacha20",
7070 + .cra_driver_name = "chacha20-caam-qi2",
7071 + .cra_blocksize = 1,
7073 + .setkey = skcipher_setkey,
7074 + .encrypt = skcipher_encrypt,
7075 + .decrypt = skcipher_decrypt,
7076 + .min_keysize = CHACHA20_KEY_SIZE,
7077 + .max_keysize = CHACHA20_KEY_SIZE,
7078 + .ivsize = CHACHA20_IV_SIZE,
7080 + .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
7084 +static struct caam_aead_alg driver_aeads[] = {
7088 + .cra_name = "rfc4106(gcm(aes))",
7089 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
7090 + .cra_blocksize = 1,
7092 + .setkey = rfc4106_setkey,
7093 + .setauthsize = rfc4106_setauthsize,
7094 + .encrypt = ipsec_gcm_encrypt,
7095 + .decrypt = ipsec_gcm_decrypt,
7097 + .maxauthsize = AES_BLOCK_SIZE,
7100 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7106 + .cra_name = "rfc4543(gcm(aes))",
7107 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
7108 + .cra_blocksize = 1,
7110 + .setkey = rfc4543_setkey,
7111 + .setauthsize = rfc4543_setauthsize,
7112 + .encrypt = ipsec_gcm_encrypt,
7113 + .decrypt = ipsec_gcm_decrypt,
7115 + .maxauthsize = AES_BLOCK_SIZE,
7118 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7121 + /* Galois Counter Mode */
7125 + .cra_name = "gcm(aes)",
7126 + .cra_driver_name = "gcm-aes-caam-qi2",
7127 + .cra_blocksize = 1,
7129 + .setkey = gcm_setkey,
7130 + .setauthsize = gcm_setauthsize,
7131 + .encrypt = aead_encrypt,
7132 + .decrypt = aead_decrypt,
7134 + .maxauthsize = AES_BLOCK_SIZE,
7137 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7140 + /* single-pass ipsec_esp descriptor */
7144 + .cra_name = "authenc(hmac(md5),cbc(aes))",
7145 + .cra_driver_name = "authenc-hmac-md5-"
7146 + "cbc-aes-caam-qi2",
7147 + .cra_blocksize = AES_BLOCK_SIZE,
7149 + .setkey = aead_setkey,
7150 + .setauthsize = aead_setauthsize,
7151 + .encrypt = aead_encrypt,
7152 + .decrypt = aead_decrypt,
7153 + .ivsize = AES_BLOCK_SIZE,
7154 + .maxauthsize = MD5_DIGEST_SIZE,
7157 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7158 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7159 + OP_ALG_AAI_HMAC_PRECOMP,
7165 + .cra_name = "echainiv(authenc(hmac(md5),"
7167 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7168 + "cbc-aes-caam-qi2",
7169 + .cra_blocksize = AES_BLOCK_SIZE,
7171 + .setkey = aead_setkey,
7172 + .setauthsize = aead_setauthsize,
7173 + .encrypt = aead_encrypt,
7174 + .decrypt = aead_decrypt,
7175 + .ivsize = AES_BLOCK_SIZE,
7176 + .maxauthsize = MD5_DIGEST_SIZE,
7179 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7180 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7181 + OP_ALG_AAI_HMAC_PRECOMP,
7188 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
7189 + .cra_driver_name = "authenc-hmac-sha1-"
7190 + "cbc-aes-caam-qi2",
7191 + .cra_blocksize = AES_BLOCK_SIZE,
7193 + .setkey = aead_setkey,
7194 + .setauthsize = aead_setauthsize,
7195 + .encrypt = aead_encrypt,
7196 + .decrypt = aead_decrypt,
7197 + .ivsize = AES_BLOCK_SIZE,
7198 + .maxauthsize = SHA1_DIGEST_SIZE,
7201 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7202 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7203 + OP_ALG_AAI_HMAC_PRECOMP,
7209 + .cra_name = "echainiv(authenc(hmac(sha1),"
7211 + .cra_driver_name = "echainiv-authenc-"
7212 + "hmac-sha1-cbc-aes-caam-qi2",
7213 + .cra_blocksize = AES_BLOCK_SIZE,
7215 + .setkey = aead_setkey,
7216 + .setauthsize = aead_setauthsize,
7217 + .encrypt = aead_encrypt,
7218 + .decrypt = aead_decrypt,
7219 + .ivsize = AES_BLOCK_SIZE,
7220 + .maxauthsize = SHA1_DIGEST_SIZE,
7223 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7224 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7225 + OP_ALG_AAI_HMAC_PRECOMP,
7232 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
7233 + .cra_driver_name = "authenc-hmac-sha224-"
7234 + "cbc-aes-caam-qi2",
7235 + .cra_blocksize = AES_BLOCK_SIZE,
7237 + .setkey = aead_setkey,
7238 + .setauthsize = aead_setauthsize,
7239 + .encrypt = aead_encrypt,
7240 + .decrypt = aead_decrypt,
7241 + .ivsize = AES_BLOCK_SIZE,
7242 + .maxauthsize = SHA224_DIGEST_SIZE,
7245 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7246 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7247 + OP_ALG_AAI_HMAC_PRECOMP,
7253 + .cra_name = "echainiv(authenc(hmac(sha224),"
7255 + .cra_driver_name = "echainiv-authenc-"
7256 + "hmac-sha224-cbc-aes-caam-qi2",
7257 + .cra_blocksize = AES_BLOCK_SIZE,
7259 + .setkey = aead_setkey,
7260 + .setauthsize = aead_setauthsize,
7261 + .encrypt = aead_encrypt,
7262 + .decrypt = aead_decrypt,
7263 + .ivsize = AES_BLOCK_SIZE,
7264 + .maxauthsize = SHA224_DIGEST_SIZE,
7267 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7268 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7269 + OP_ALG_AAI_HMAC_PRECOMP,
7276 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
7277 + .cra_driver_name = "authenc-hmac-sha256-"
7278 + "cbc-aes-caam-qi2",
7279 + .cra_blocksize = AES_BLOCK_SIZE,
7281 + .setkey = aead_setkey,
7282 + .setauthsize = aead_setauthsize,
7283 + .encrypt = aead_encrypt,
7284 + .decrypt = aead_decrypt,
7285 + .ivsize = AES_BLOCK_SIZE,
7286 + .maxauthsize = SHA256_DIGEST_SIZE,
7289 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7290 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7291 + OP_ALG_AAI_HMAC_PRECOMP,
7297 + .cra_name = "echainiv(authenc(hmac(sha256),"
7299 + .cra_driver_name = "echainiv-authenc-"
7300 + "hmac-sha256-cbc-aes-"
7302 + .cra_blocksize = AES_BLOCK_SIZE,
7304 + .setkey = aead_setkey,
7305 + .setauthsize = aead_setauthsize,
7306 + .encrypt = aead_encrypt,
7307 + .decrypt = aead_decrypt,
7308 + .ivsize = AES_BLOCK_SIZE,
7309 + .maxauthsize = SHA256_DIGEST_SIZE,
7312 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7313 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7314 + OP_ALG_AAI_HMAC_PRECOMP,
7321 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
7322 + .cra_driver_name = "authenc-hmac-sha384-"
7323 + "cbc-aes-caam-qi2",
7324 + .cra_blocksize = AES_BLOCK_SIZE,
7326 + .setkey = aead_setkey,
7327 + .setauthsize = aead_setauthsize,
7328 + .encrypt = aead_encrypt,
7329 + .decrypt = aead_decrypt,
7330 + .ivsize = AES_BLOCK_SIZE,
7331 + .maxauthsize = SHA384_DIGEST_SIZE,
7334 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7335 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7336 + OP_ALG_AAI_HMAC_PRECOMP,
7342 + .cra_name = "echainiv(authenc(hmac(sha384),"
7344 + .cra_driver_name = "echainiv-authenc-"
7345 + "hmac-sha384-cbc-aes-"
7347 + .cra_blocksize = AES_BLOCK_SIZE,
7349 + .setkey = aead_setkey,
7350 + .setauthsize = aead_setauthsize,
7351 + .encrypt = aead_encrypt,
7352 + .decrypt = aead_decrypt,
7353 + .ivsize = AES_BLOCK_SIZE,
7354 + .maxauthsize = SHA384_DIGEST_SIZE,
7357 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7358 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7359 + OP_ALG_AAI_HMAC_PRECOMP,
7366 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
7367 + .cra_driver_name = "authenc-hmac-sha512-"
7368 + "cbc-aes-caam-qi2",
7369 + .cra_blocksize = AES_BLOCK_SIZE,
7371 + .setkey = aead_setkey,
7372 + .setauthsize = aead_setauthsize,
7373 + .encrypt = aead_encrypt,
7374 + .decrypt = aead_decrypt,
7375 + .ivsize = AES_BLOCK_SIZE,
7376 + .maxauthsize = SHA512_DIGEST_SIZE,
7379 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7380 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7381 + OP_ALG_AAI_HMAC_PRECOMP,
7387 + .cra_name = "echainiv(authenc(hmac(sha512),"
7389 + .cra_driver_name = "echainiv-authenc-"
7390 + "hmac-sha512-cbc-aes-"
7392 + .cra_blocksize = AES_BLOCK_SIZE,
7394 + .setkey = aead_setkey,
7395 + .setauthsize = aead_setauthsize,
7396 + .encrypt = aead_encrypt,
7397 + .decrypt = aead_decrypt,
7398 + .ivsize = AES_BLOCK_SIZE,
7399 + .maxauthsize = SHA512_DIGEST_SIZE,
7402 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7403 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7404 + OP_ALG_AAI_HMAC_PRECOMP,
7411 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
7412 + .cra_driver_name = "authenc-hmac-md5-"
7413 + "cbc-des3_ede-caam-qi2",
7414 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7416 + .setkey = aead_setkey,
7417 + .setauthsize = aead_setauthsize,
7418 + .encrypt = aead_encrypt,
7419 + .decrypt = aead_decrypt,
7420 + .ivsize = DES3_EDE_BLOCK_SIZE,
7421 + .maxauthsize = MD5_DIGEST_SIZE,
7424 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7425 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7426 + OP_ALG_AAI_HMAC_PRECOMP,
7432 + .cra_name = "echainiv(authenc(hmac(md5),"
7433 + "cbc(des3_ede)))",
7434 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7435 + "cbc-des3_ede-caam-qi2",
7436 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7438 + .setkey = aead_setkey,
7439 + .setauthsize = aead_setauthsize,
7440 + .encrypt = aead_encrypt,
7441 + .decrypt = aead_decrypt,
7442 + .ivsize = DES3_EDE_BLOCK_SIZE,
7443 + .maxauthsize = MD5_DIGEST_SIZE,
7446 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7447 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7448 + OP_ALG_AAI_HMAC_PRECOMP,
7455 + .cra_name = "authenc(hmac(sha1),"
7457 + .cra_driver_name = "authenc-hmac-sha1-"
7458 + "cbc-des3_ede-caam-qi2",
7459 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7461 + .setkey = aead_setkey,
7462 + .setauthsize = aead_setauthsize,
7463 + .encrypt = aead_encrypt,
7464 + .decrypt = aead_decrypt,
7465 + .ivsize = DES3_EDE_BLOCK_SIZE,
7466 + .maxauthsize = SHA1_DIGEST_SIZE,
7469 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7470 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7471 + OP_ALG_AAI_HMAC_PRECOMP,
7477 + .cra_name = "echainiv(authenc(hmac(sha1),"
7478 + "cbc(des3_ede)))",
7479 + .cra_driver_name = "echainiv-authenc-"
7481 + "cbc-des3_ede-caam-qi2",
7482 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7484 + .setkey = aead_setkey,
7485 + .setauthsize = aead_setauthsize,
7486 + .encrypt = aead_encrypt,
7487 + .decrypt = aead_decrypt,
7488 + .ivsize = DES3_EDE_BLOCK_SIZE,
7489 + .maxauthsize = SHA1_DIGEST_SIZE,
7492 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7493 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7494 + OP_ALG_AAI_HMAC_PRECOMP,
7501 + .cra_name = "authenc(hmac(sha224),"
7503 + .cra_driver_name = "authenc-hmac-sha224-"
7504 + "cbc-des3_ede-caam-qi2",
7505 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7507 + .setkey = aead_setkey,
7508 + .setauthsize = aead_setauthsize,
7509 + .encrypt = aead_encrypt,
7510 + .decrypt = aead_decrypt,
7511 + .ivsize = DES3_EDE_BLOCK_SIZE,
7512 + .maxauthsize = SHA224_DIGEST_SIZE,
7515 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7516 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7517 + OP_ALG_AAI_HMAC_PRECOMP,
7523 + .cra_name = "echainiv(authenc(hmac(sha224),"
7524 + "cbc(des3_ede)))",
7525 + .cra_driver_name = "echainiv-authenc-"
7527 + "cbc-des3_ede-caam-qi2",
7528 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7530 + .setkey = aead_setkey,
7531 + .setauthsize = aead_setauthsize,
7532 + .encrypt = aead_encrypt,
7533 + .decrypt = aead_decrypt,
7534 + .ivsize = DES3_EDE_BLOCK_SIZE,
7535 + .maxauthsize = SHA224_DIGEST_SIZE,
7538 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7539 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7540 + OP_ALG_AAI_HMAC_PRECOMP,
7547 + .cra_name = "authenc(hmac(sha256),"
7549 + .cra_driver_name = "authenc-hmac-sha256-"
7550 + "cbc-des3_ede-caam-qi2",
7551 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7553 + .setkey = aead_setkey,
7554 + .setauthsize = aead_setauthsize,
7555 + .encrypt = aead_encrypt,
7556 + .decrypt = aead_decrypt,
7557 + .ivsize = DES3_EDE_BLOCK_SIZE,
7558 + .maxauthsize = SHA256_DIGEST_SIZE,
7561 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7562 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7563 + OP_ALG_AAI_HMAC_PRECOMP,
7569 + .cra_name = "echainiv(authenc(hmac(sha256),"
7570 + "cbc(des3_ede)))",
7571 + .cra_driver_name = "echainiv-authenc-"
7573 + "cbc-des3_ede-caam-qi2",
7574 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7576 + .setkey = aead_setkey,
7577 + .setauthsize = aead_setauthsize,
7578 + .encrypt = aead_encrypt,
7579 + .decrypt = aead_decrypt,
7580 + .ivsize = DES3_EDE_BLOCK_SIZE,
7581 + .maxauthsize = SHA256_DIGEST_SIZE,
7584 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7585 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7586 + OP_ALG_AAI_HMAC_PRECOMP,
7593 + .cra_name = "authenc(hmac(sha384),"
7595 + .cra_driver_name = "authenc-hmac-sha384-"
7596 + "cbc-des3_ede-caam-qi2",
7597 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7599 + .setkey = aead_setkey,
7600 + .setauthsize = aead_setauthsize,
7601 + .encrypt = aead_encrypt,
7602 + .decrypt = aead_decrypt,
7603 + .ivsize = DES3_EDE_BLOCK_SIZE,
7604 + .maxauthsize = SHA384_DIGEST_SIZE,
7607 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7608 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7609 + OP_ALG_AAI_HMAC_PRECOMP,
7615 + .cra_name = "echainiv(authenc(hmac(sha384),"
7616 + "cbc(des3_ede)))",
7617 + .cra_driver_name = "echainiv-authenc-"
7619 + "cbc-des3_ede-caam-qi2",
7620 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7622 + .setkey = aead_setkey,
7623 + .setauthsize = aead_setauthsize,
7624 + .encrypt = aead_encrypt,
7625 + .decrypt = aead_decrypt,
7626 + .ivsize = DES3_EDE_BLOCK_SIZE,
7627 + .maxauthsize = SHA384_DIGEST_SIZE,
7630 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7631 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7632 + OP_ALG_AAI_HMAC_PRECOMP,
7639 + .cra_name = "authenc(hmac(sha512),"
7641 + .cra_driver_name = "authenc-hmac-sha512-"
7642 + "cbc-des3_ede-caam-qi2",
7643 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7645 + .setkey = aead_setkey,
7646 + .setauthsize = aead_setauthsize,
7647 + .encrypt = aead_encrypt,
7648 + .decrypt = aead_decrypt,
7649 + .ivsize = DES3_EDE_BLOCK_SIZE,
7650 + .maxauthsize = SHA512_DIGEST_SIZE,
7653 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7654 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7655 + OP_ALG_AAI_HMAC_PRECOMP,
7661 + .cra_name = "echainiv(authenc(hmac(sha512),"
7662 + "cbc(des3_ede)))",
7663 + .cra_driver_name = "echainiv-authenc-"
7665 + "cbc-des3_ede-caam-qi2",
7666 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7668 + .setkey = aead_setkey,
7669 + .setauthsize = aead_setauthsize,
7670 + .encrypt = aead_encrypt,
7671 + .decrypt = aead_decrypt,
7672 + .ivsize = DES3_EDE_BLOCK_SIZE,
7673 + .maxauthsize = SHA512_DIGEST_SIZE,
7676 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7677 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7678 + OP_ALG_AAI_HMAC_PRECOMP,
7685 + .cra_name = "authenc(hmac(md5),cbc(des))",
7686 + .cra_driver_name = "authenc-hmac-md5-"
7687 + "cbc-des-caam-qi2",
7688 + .cra_blocksize = DES_BLOCK_SIZE,
7690 + .setkey = aead_setkey,
7691 + .setauthsize = aead_setauthsize,
7692 + .encrypt = aead_encrypt,
7693 + .decrypt = aead_decrypt,
7694 + .ivsize = DES_BLOCK_SIZE,
7695 + .maxauthsize = MD5_DIGEST_SIZE,
7698 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7699 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7700 + OP_ALG_AAI_HMAC_PRECOMP,
7706 + .cra_name = "echainiv(authenc(hmac(md5),"
7708 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7709 + "cbc-des-caam-qi2",
7710 + .cra_blocksize = DES_BLOCK_SIZE,
7712 + .setkey = aead_setkey,
7713 + .setauthsize = aead_setauthsize,
7714 + .encrypt = aead_encrypt,
7715 + .decrypt = aead_decrypt,
7716 + .ivsize = DES_BLOCK_SIZE,
7717 + .maxauthsize = MD5_DIGEST_SIZE,
7720 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7721 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7722 + OP_ALG_AAI_HMAC_PRECOMP,
7729 + .cra_name = "authenc(hmac(sha1),cbc(des))",
7730 + .cra_driver_name = "authenc-hmac-sha1-"
7731 + "cbc-des-caam-qi2",
7732 + .cra_blocksize = DES_BLOCK_SIZE,
7734 + .setkey = aead_setkey,
7735 + .setauthsize = aead_setauthsize,
7736 + .encrypt = aead_encrypt,
7737 + .decrypt = aead_decrypt,
7738 + .ivsize = DES_BLOCK_SIZE,
7739 + .maxauthsize = SHA1_DIGEST_SIZE,
7742 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7743 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7744 + OP_ALG_AAI_HMAC_PRECOMP,
7750 + .cra_name = "echainiv(authenc(hmac(sha1),"
7752 + .cra_driver_name = "echainiv-authenc-"
7753 + "hmac-sha1-cbc-des-caam-qi2",
7754 + .cra_blocksize = DES_BLOCK_SIZE,
7756 + .setkey = aead_setkey,
7757 + .setauthsize = aead_setauthsize,
7758 + .encrypt = aead_encrypt,
7759 + .decrypt = aead_decrypt,
7760 + .ivsize = DES_BLOCK_SIZE,
7761 + .maxauthsize = SHA1_DIGEST_SIZE,
7764 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7765 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7766 + OP_ALG_AAI_HMAC_PRECOMP,
7773 + .cra_name = "authenc(hmac(sha224),cbc(des))",
7774 + .cra_driver_name = "authenc-hmac-sha224-"
7775 + "cbc-des-caam-qi2",
7776 + .cra_blocksize = DES_BLOCK_SIZE,
7778 + .setkey = aead_setkey,
7779 + .setauthsize = aead_setauthsize,
7780 + .encrypt = aead_encrypt,
7781 + .decrypt = aead_decrypt,
7782 + .ivsize = DES_BLOCK_SIZE,
7783 + .maxauthsize = SHA224_DIGEST_SIZE,
7786 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7787 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7788 + OP_ALG_AAI_HMAC_PRECOMP,
7794 + .cra_name = "echainiv(authenc(hmac(sha224),"
7796 + .cra_driver_name = "echainiv-authenc-"
7797 + "hmac-sha224-cbc-des-"
7799 + .cra_blocksize = DES_BLOCK_SIZE,
7801 + .setkey = aead_setkey,
7802 + .setauthsize = aead_setauthsize,
7803 + .encrypt = aead_encrypt,
7804 + .decrypt = aead_decrypt,
7805 + .ivsize = DES_BLOCK_SIZE,
7806 + .maxauthsize = SHA224_DIGEST_SIZE,
7809 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7810 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7811 + OP_ALG_AAI_HMAC_PRECOMP,
7818 + .cra_name = "authenc(hmac(sha256),cbc(des))",
7819 + .cra_driver_name = "authenc-hmac-sha256-"
7820 + "cbc-des-caam-qi2",
7821 + .cra_blocksize = DES_BLOCK_SIZE,
7823 + .setkey = aead_setkey,
7824 + .setauthsize = aead_setauthsize,
7825 + .encrypt = aead_encrypt,
7826 + .decrypt = aead_decrypt,
7827 + .ivsize = DES_BLOCK_SIZE,
7828 + .maxauthsize = SHA256_DIGEST_SIZE,
7831 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7832 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7833 + OP_ALG_AAI_HMAC_PRECOMP,
7839 + .cra_name = "echainiv(authenc(hmac(sha256),"
7841 + .cra_driver_name = "echainiv-authenc-"
7842 + "hmac-sha256-cbc-desi-"
7844 + .cra_blocksize = DES_BLOCK_SIZE,
7846 + .setkey = aead_setkey,
7847 + .setauthsize = aead_setauthsize,
7848 + .encrypt = aead_encrypt,
7849 + .decrypt = aead_decrypt,
7850 + .ivsize = DES_BLOCK_SIZE,
7851 + .maxauthsize = SHA256_DIGEST_SIZE,
7854 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7855 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7856 + OP_ALG_AAI_HMAC_PRECOMP,
7863 + .cra_name = "authenc(hmac(sha384),cbc(des))",
7864 + .cra_driver_name = "authenc-hmac-sha384-"
7865 + "cbc-des-caam-qi2",
7866 + .cra_blocksize = DES_BLOCK_SIZE,
7868 + .setkey = aead_setkey,
7869 + .setauthsize = aead_setauthsize,
7870 + .encrypt = aead_encrypt,
7871 + .decrypt = aead_decrypt,
7872 + .ivsize = DES_BLOCK_SIZE,
7873 + .maxauthsize = SHA384_DIGEST_SIZE,
7876 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7877 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7878 + OP_ALG_AAI_HMAC_PRECOMP,
7884 + .cra_name = "echainiv(authenc(hmac(sha384),"
7886 + .cra_driver_name = "echainiv-authenc-"
7887 + "hmac-sha384-cbc-des-"
7889 + .cra_blocksize = DES_BLOCK_SIZE,
7891 + .setkey = aead_setkey,
7892 + .setauthsize = aead_setauthsize,
7893 + .encrypt = aead_encrypt,
7894 + .decrypt = aead_decrypt,
7895 + .ivsize = DES_BLOCK_SIZE,
7896 + .maxauthsize = SHA384_DIGEST_SIZE,
7899 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7900 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7901 + OP_ALG_AAI_HMAC_PRECOMP,
7908 + .cra_name = "authenc(hmac(sha512),cbc(des))",
7909 + .cra_driver_name = "authenc-hmac-sha512-"
7910 + "cbc-des-caam-qi2",
7911 + .cra_blocksize = DES_BLOCK_SIZE,
7913 + .setkey = aead_setkey,
7914 + .setauthsize = aead_setauthsize,
7915 + .encrypt = aead_encrypt,
7916 + .decrypt = aead_decrypt,
7917 + .ivsize = DES_BLOCK_SIZE,
7918 + .maxauthsize = SHA512_DIGEST_SIZE,
7921 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7922 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7923 + OP_ALG_AAI_HMAC_PRECOMP,
7929 + .cra_name = "echainiv(authenc(hmac(sha512),"
7931 + .cra_driver_name = "echainiv-authenc-"
7932 + "hmac-sha512-cbc-des-"
7934 + .cra_blocksize = DES_BLOCK_SIZE,
7936 + .setkey = aead_setkey,
7937 + .setauthsize = aead_setauthsize,
7938 + .encrypt = aead_encrypt,
7939 + .decrypt = aead_decrypt,
7940 + .ivsize = DES_BLOCK_SIZE,
7941 + .maxauthsize = SHA512_DIGEST_SIZE,
7944 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7945 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7946 + OP_ALG_AAI_HMAC_PRECOMP,
7953 + .cra_name = "authenc(hmac(md5),"
7954 + "rfc3686(ctr(aes)))",
7955 + .cra_driver_name = "authenc-hmac-md5-"
7956 + "rfc3686-ctr-aes-caam-qi2",
7957 + .cra_blocksize = 1,
7959 + .setkey = aead_setkey,
7960 + .setauthsize = aead_setauthsize,
7961 + .encrypt = aead_encrypt,
7962 + .decrypt = aead_decrypt,
7963 + .ivsize = CTR_RFC3686_IV_SIZE,
7964 + .maxauthsize = MD5_DIGEST_SIZE,
7967 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7968 + OP_ALG_AAI_CTR_MOD128,
7969 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7970 + OP_ALG_AAI_HMAC_PRECOMP,
7977 + .cra_name = "seqiv(authenc("
7978 + "hmac(md5),rfc3686(ctr(aes))))",
7979 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
7980 + "rfc3686-ctr-aes-caam-qi2",
7981 + .cra_blocksize = 1,
7983 + .setkey = aead_setkey,
7984 + .setauthsize = aead_setauthsize,
7985 + .encrypt = aead_encrypt,
7986 + .decrypt = aead_decrypt,
7987 + .ivsize = CTR_RFC3686_IV_SIZE,
7988 + .maxauthsize = MD5_DIGEST_SIZE,
7991 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7992 + OP_ALG_AAI_CTR_MOD128,
7993 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7994 + OP_ALG_AAI_HMAC_PRECOMP,
8002 + .cra_name = "authenc(hmac(sha1),"
8003 + "rfc3686(ctr(aes)))",
8004 + .cra_driver_name = "authenc-hmac-sha1-"
8005 + "rfc3686-ctr-aes-caam-qi2",
8006 + .cra_blocksize = 1,
8008 + .setkey = aead_setkey,
8009 + .setauthsize = aead_setauthsize,
8010 + .encrypt = aead_encrypt,
8011 + .decrypt = aead_decrypt,
8012 + .ivsize = CTR_RFC3686_IV_SIZE,
8013 + .maxauthsize = SHA1_DIGEST_SIZE,
8016 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8017 + OP_ALG_AAI_CTR_MOD128,
8018 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8019 + OP_ALG_AAI_HMAC_PRECOMP,
8026 + .cra_name = "seqiv(authenc("
8027 + "hmac(sha1),rfc3686(ctr(aes))))",
8028 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
8029 + "rfc3686-ctr-aes-caam-qi2",
8030 + .cra_blocksize = 1,
8032 + .setkey = aead_setkey,
8033 + .setauthsize = aead_setauthsize,
8034 + .encrypt = aead_encrypt,
8035 + .decrypt = aead_decrypt,
8036 + .ivsize = CTR_RFC3686_IV_SIZE,
8037 + .maxauthsize = SHA1_DIGEST_SIZE,
8040 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8041 + OP_ALG_AAI_CTR_MOD128,
8042 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8043 + OP_ALG_AAI_HMAC_PRECOMP,
8051 + .cra_name = "authenc(hmac(sha224),"
8052 + "rfc3686(ctr(aes)))",
8053 + .cra_driver_name = "authenc-hmac-sha224-"
8054 + "rfc3686-ctr-aes-caam-qi2",
8055 + .cra_blocksize = 1,
8057 + .setkey = aead_setkey,
8058 + .setauthsize = aead_setauthsize,
8059 + .encrypt = aead_encrypt,
8060 + .decrypt = aead_decrypt,
8061 + .ivsize = CTR_RFC3686_IV_SIZE,
8062 + .maxauthsize = SHA224_DIGEST_SIZE,
8065 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8066 + OP_ALG_AAI_CTR_MOD128,
8067 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
8068 + OP_ALG_AAI_HMAC_PRECOMP,
8075 + .cra_name = "seqiv(authenc("
8076 + "hmac(sha224),rfc3686(ctr(aes))))",
8077 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
8078 + "rfc3686-ctr-aes-caam-qi2",
8079 + .cra_blocksize = 1,
8081 + .setkey = aead_setkey,
8082 + .setauthsize = aead_setauthsize,
8083 + .encrypt = aead_encrypt,
8084 + .decrypt = aead_decrypt,
8085 + .ivsize = CTR_RFC3686_IV_SIZE,
8086 + .maxauthsize = SHA224_DIGEST_SIZE,
8089 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8090 + OP_ALG_AAI_CTR_MOD128,
8091 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
8092 + OP_ALG_AAI_HMAC_PRECOMP,
8100 + .cra_name = "authenc(hmac(sha256),"
8101 + "rfc3686(ctr(aes)))",
8102 + .cra_driver_name = "authenc-hmac-sha256-"
8103 + "rfc3686-ctr-aes-caam-qi2",
8104 + .cra_blocksize = 1,
8106 + .setkey = aead_setkey,
8107 + .setauthsize = aead_setauthsize,
8108 + .encrypt = aead_encrypt,
8109 + .decrypt = aead_decrypt,
8110 + .ivsize = CTR_RFC3686_IV_SIZE,
8111 + .maxauthsize = SHA256_DIGEST_SIZE,
8114 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8115 + OP_ALG_AAI_CTR_MOD128,
8116 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
8117 + OP_ALG_AAI_HMAC_PRECOMP,
8124 + .cra_name = "seqiv(authenc(hmac(sha256),"
8125 + "rfc3686(ctr(aes))))",
8126 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
8127 + "rfc3686-ctr-aes-caam-qi2",
8128 + .cra_blocksize = 1,
8130 + .setkey = aead_setkey,
8131 + .setauthsize = aead_setauthsize,
8132 + .encrypt = aead_encrypt,
8133 + .decrypt = aead_decrypt,
8134 + .ivsize = CTR_RFC3686_IV_SIZE,
8135 + .maxauthsize = SHA256_DIGEST_SIZE,
8138 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8139 + OP_ALG_AAI_CTR_MOD128,
8140 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
8141 + OP_ALG_AAI_HMAC_PRECOMP,
8149 + .cra_name = "authenc(hmac(sha384),"
8150 + "rfc3686(ctr(aes)))",
8151 + .cra_driver_name = "authenc-hmac-sha384-"
8152 + "rfc3686-ctr-aes-caam-qi2",
8153 + .cra_blocksize = 1,
8155 + .setkey = aead_setkey,
8156 + .setauthsize = aead_setauthsize,
8157 + .encrypt = aead_encrypt,
8158 + .decrypt = aead_decrypt,
8159 + .ivsize = CTR_RFC3686_IV_SIZE,
8160 + .maxauthsize = SHA384_DIGEST_SIZE,
8163 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8164 + OP_ALG_AAI_CTR_MOD128,
8165 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
8166 + OP_ALG_AAI_HMAC_PRECOMP,
8173 + .cra_name = "seqiv(authenc(hmac(sha384),"
8174 + "rfc3686(ctr(aes))))",
8175 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
8176 + "rfc3686-ctr-aes-caam-qi2",
8177 + .cra_blocksize = 1,
8179 + .setkey = aead_setkey,
8180 + .setauthsize = aead_setauthsize,
8181 + .encrypt = aead_encrypt,
8182 + .decrypt = aead_decrypt,
8183 + .ivsize = CTR_RFC3686_IV_SIZE,
8184 + .maxauthsize = SHA384_DIGEST_SIZE,
8187 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8188 + OP_ALG_AAI_CTR_MOD128,
8189 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
8190 + OP_ALG_AAI_HMAC_PRECOMP,
8198 + .cra_name = "rfc7539(chacha20,poly1305)",
8199 + .cra_driver_name = "rfc7539-chacha20-poly1305-"
8201 + .cra_blocksize = 1,
8203 + .setkey = chachapoly_setkey,
8204 + .setauthsize = chachapoly_setauthsize,
8205 + .encrypt = aead_encrypt,
8206 + .decrypt = aead_decrypt,
8207 + .ivsize = CHACHAPOLY_IV_SIZE,
8208 + .maxauthsize = POLY1305_DIGEST_SIZE,
8211 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
8213 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
8220 + .cra_name = "rfc7539esp(chacha20,poly1305)",
8221 + .cra_driver_name = "rfc7539esp-chacha20-"
8222 + "poly1305-caam-qi2",
8223 + .cra_blocksize = 1,
8225 + .setkey = chachapoly_setkey,
8226 + .setauthsize = chachapoly_setauthsize,
8227 + .encrypt = aead_encrypt,
8228 + .decrypt = aead_decrypt,
8230 + .maxauthsize = POLY1305_DIGEST_SIZE,
8233 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
8235 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
8242 + .cra_name = "authenc(hmac(sha512),"
8243 + "rfc3686(ctr(aes)))",
8244 + .cra_driver_name = "authenc-hmac-sha512-"
8245 + "rfc3686-ctr-aes-caam-qi2",
8246 + .cra_blocksize = 1,
8248 + .setkey = aead_setkey,
8249 + .setauthsize = aead_setauthsize,
8250 + .encrypt = aead_encrypt,
8251 + .decrypt = aead_decrypt,
8252 + .ivsize = CTR_RFC3686_IV_SIZE,
8253 + .maxauthsize = SHA512_DIGEST_SIZE,
8256 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8257 + OP_ALG_AAI_CTR_MOD128,
8258 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
8259 + OP_ALG_AAI_HMAC_PRECOMP,
8266 + .cra_name = "seqiv(authenc(hmac(sha512),"
8267 + "rfc3686(ctr(aes))))",
8268 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
8269 + "rfc3686-ctr-aes-caam-qi2",
8270 + .cra_blocksize = 1,
8272 + .setkey = aead_setkey,
8273 + .setauthsize = aead_setauthsize,
8274 + .encrypt = aead_encrypt,
8275 + .decrypt = aead_decrypt,
8276 + .ivsize = CTR_RFC3686_IV_SIZE,
8277 + .maxauthsize = SHA512_DIGEST_SIZE,
8280 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8281 + OP_ALG_AAI_CTR_MOD128,
8282 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
8283 + OP_ALG_AAI_HMAC_PRECOMP,
8291 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
8292 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
8293 + .cra_blocksize = AES_BLOCK_SIZE,
8295 + .setkey = tls_setkey,
8296 + .setauthsize = tls_setauthsize,
8297 + .encrypt = tls_encrypt,
8298 + .decrypt = tls_decrypt,
8299 + .ivsize = AES_BLOCK_SIZE,
8300 + .maxauthsize = SHA1_DIGEST_SIZE,
8303 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
8304 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8305 + OP_ALG_AAI_HMAC_PRECOMP,
8310 +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
8312 + struct skcipher_alg *alg = &t_alg->skcipher;
8314 + alg->base.cra_module = THIS_MODULE;
8315 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
8316 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
8317 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
8319 + alg->init = caam_cra_init_skcipher;
8320 + alg->exit = caam_cra_exit;
8323 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
8325 + struct aead_alg *alg = &t_alg->aead;
8327 + alg->base.cra_module = THIS_MODULE;
8328 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
8329 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
8330 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
8332 + alg->init = caam_cra_init_aead;
8333 + alg->exit = caam_cra_exit_aead;
8336 +/* max hash key is max split key size */
8337 +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
8339 +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
8340 +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
8342 +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
8343 + CAAM_MAX_HASH_KEY_SIZE)
8344 +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
8346 +/* caam context sizes for hashes: running digest + 8 */
8347 +#define HASH_MSG_LEN 8
8348 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
8359 + * caam_hash_ctx - ahash per-session context
8360 + * @flc: Flow Contexts array
8361 + * @flc_dma: I/O virtual addresses of the Flow Contexts
8362 + * @key: virtual address of the authentication key
8363 + * @dev: dpseci device
8364 + * @ctx_len: size of Context Register
8365 + * @adata: hashing algorithm details
8367 +struct caam_hash_ctx {
8368 + struct caam_flc flc[HASH_NUM_OP];
8369 + dma_addr_t flc_dma[HASH_NUM_OP];
8370 + u8 key[CAAM_MAX_HASH_KEY_SIZE];
8371 + struct device *dev;
8373 + struct alginfo adata;
8377 +struct caam_hash_state {
8378 + struct caam_request caam_req;
8379 + dma_addr_t buf_dma;
8380 + dma_addr_t ctx_dma;
8381 + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
8383 + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
8385 + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
8386 + int (*update)(struct ahash_request *req);
8387 + int (*final)(struct ahash_request *req);
8388 + int (*finup)(struct ahash_request *req);
8392 +struct caam_export_state {
8393 + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
8394 + u8 caam_ctx[MAX_CTX_LEN];
8396 + int (*update)(struct ahash_request *req);
8397 + int (*final)(struct ahash_request *req);
8398 + int (*finup)(struct ahash_request *req);
8401 +static inline void switch_buf(struct caam_hash_state *state)
8403 + state->current_buf ^= 1;
8406 +static inline u8 *current_buf(struct caam_hash_state *state)
8408 + return state->current_buf ? state->buf_1 : state->buf_0;
8411 +static inline u8 *alt_buf(struct caam_hash_state *state)
8413 + return state->current_buf ? state->buf_0 : state->buf_1;
8416 +static inline int *current_buflen(struct caam_hash_state *state)
8418 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
8421 +static inline int *alt_buflen(struct caam_hash_state *state)
8423 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
8426 +/* Map current buffer in state (if length > 0) and put it in link table */
8427 +static inline int buf_map_to_qm_sg(struct device *dev,
8428 + struct dpaa2_sg_entry *qm_sg,
8429 + struct caam_hash_state *state)
8431 + int buflen = *current_buflen(state);
8436 + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
8438 + if (dma_mapping_error(dev, state->buf_dma)) {
8439 + dev_err(dev, "unable to map buf\n");
8440 + state->buf_dma = 0;
8444 + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
8449 +/* Map state->caam_ctx, and add it to link table */
8450 +static inline int ctx_map_to_qm_sg(struct device *dev,
8451 + struct caam_hash_state *state, int ctx_len,
8452 + struct dpaa2_sg_entry *qm_sg, u32 flag)
8454 + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
8455 + if (dma_mapping_error(dev, state->ctx_dma)) {
8456 + dev_err(dev, "unable to map ctx\n");
8457 + state->ctx_dma = 0;
8461 + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
8466 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
8468 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8469 + int digestsize = crypto_ahash_digestsize(ahash);
8470 + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
8471 + struct caam_flc *flc;
8474 + ctx->adata.key_virt = ctx->key;
8475 + ctx->adata.key_inline = true;
8477 + /* ahash_update shared descriptor */
8478 + flc = &ctx->flc[UPDATE];
8479 + desc = flc->sh_desc;
8480 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
8481 + ctx->ctx_len, true, priv->sec_attr.era);
8482 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8483 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
8484 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8486 + print_hex_dump(KERN_ERR,
8487 + "ahash update shdesc@" __stringify(__LINE__)": ",
8488 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8491 + /* ahash_update_first shared descriptor */
8492 + flc = &ctx->flc[UPDATE_FIRST];
8493 + desc = flc->sh_desc;
8494 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
8495 + ctx->ctx_len, false, priv->sec_attr.era);
8496 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8497 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
8498 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8500 + print_hex_dump(KERN_ERR,
8501 + "ahash update first shdesc@" __stringify(__LINE__)": ",
8502 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8505 + /* ahash_final shared descriptor */
8506 + flc = &ctx->flc[FINALIZE];
8507 + desc = flc->sh_desc;
8508 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
8509 + ctx->ctx_len, true, priv->sec_attr.era);
8510 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8511 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
8512 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8514 + print_hex_dump(KERN_ERR,
8515 + "ahash final shdesc@" __stringify(__LINE__)": ",
8516 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8519 + /* ahash_digest shared descriptor */
8520 + flc = &ctx->flc[DIGEST];
8521 + desc = flc->sh_desc;
8522 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
8523 + ctx->ctx_len, false, priv->sec_attr.era);
8524 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8525 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
8526 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8528 + print_hex_dump(KERN_ERR,
8529 + "ahash digest shdesc@" __stringify(__LINE__)": ",
8530 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8536 +/* Digest hash size if it is too large */
8537 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
8538 + u32 *keylen, u8 *key_out, u32 digestsize)
8540 + struct caam_request *req_ctx;
8542 + struct split_key_sh_result result;
8543 + dma_addr_t src_dma, dst_dma;
8544 + struct caam_flc *flc;
8545 + dma_addr_t flc_dma;
8546 + int ret = -ENOMEM;
8547 + struct dpaa2_fl_entry *in_fle, *out_fle;
8549 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
8553 + in_fle = &req_ctx->fd_flt[1];
8554 + out_fle = &req_ctx->fd_flt[0];
8556 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
8560 + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
8562 + if (dma_mapping_error(ctx->dev, src_dma)) {
8563 + dev_err(ctx->dev, "unable to map key input memory\n");
8566 + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
8568 + if (dma_mapping_error(ctx->dev, dst_dma)) {
8569 + dev_err(ctx->dev, "unable to map key output memory\n");
8573 + desc = flc->sh_desc;
8575 + init_sh_desc(desc, 0);
8577 + /* descriptor to perform unkeyed hash on key_in */
8578 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
8579 + OP_ALG_AS_INITFINAL);
8580 + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
8581 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
8582 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
8583 + LDST_SRCDST_BYTE_CONTEXT);
8585 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8586 + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
8587 + desc_bytes(desc), DMA_TO_DEVICE);
8588 + if (dma_mapping_error(ctx->dev, flc_dma)) {
8589 + dev_err(ctx->dev, "unable to map shared descriptor\n");
8593 + dpaa2_fl_set_final(in_fle, true);
8594 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
8595 + dpaa2_fl_set_addr(in_fle, src_dma);
8596 + dpaa2_fl_set_len(in_fle, *keylen);
8597 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8598 + dpaa2_fl_set_addr(out_fle, dst_dma);
8599 + dpaa2_fl_set_len(out_fle, digestsize);
8602 + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
8603 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
8604 + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
8605 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8609 + init_completion(&result.completion);
8610 + result.dev = ctx->dev;
8612 + req_ctx->flc = flc;
8613 + req_ctx->flc_dma = flc_dma;
8614 + req_ctx->cbk = split_key_sh_done;
8615 + req_ctx->ctx = &result;
8617 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8618 + if (ret == -EINPROGRESS) {
8620 + wait_for_completion(&result.completion);
8623 + print_hex_dump(KERN_ERR,
8624 + "digested key@" __stringify(__LINE__)": ",
8625 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
8630 + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
8633 + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
8635 + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
8641 + *keylen = digestsize;
8646 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
8647 + unsigned int keylen)
8649 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8650 + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
8651 + unsigned int digestsize = crypto_ahash_digestsize(ahash);
8653 + u8 *hashed_key = NULL;
8656 + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
8659 + if (keylen > blocksize) {
8660 + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
8661 + GFP_KERNEL | GFP_DMA);
8664 + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
8667 + goto bad_free_key;
8671 + ctx->adata.keylen = keylen;
8672 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
8673 + OP_ALG_ALGSEL_MASK);
8674 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
8675 + goto bad_free_key;
8677 + memcpy(ctx->key, key, keylen);
8679 + kfree(hashed_key);
8680 + return ahash_set_sh_desc(ahash);
8682 + kfree(hashed_key);
8683 + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
8687 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
8688 + struct ahash_request *req, int dst_len)
8690 + struct caam_hash_state *state = ahash_request_ctx(req);
8692 + if (edesc->src_nents)
8693 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
8694 + if (edesc->dst_dma)
8695 + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
8697 + if (edesc->qm_sg_bytes)
8698 + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
8701 + if (state->buf_dma) {
8702 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
8704 + state->buf_dma = 0;
8708 +static inline void ahash_unmap_ctx(struct device *dev,
8709 + struct ahash_edesc *edesc,
8710 + struct ahash_request *req, int dst_len,
8713 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8714 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8715 + struct caam_hash_state *state = ahash_request_ctx(req);
8717 + if (state->ctx_dma) {
8718 + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
8719 + state->ctx_dma = 0;
8721 + ahash_unmap(dev, edesc, req, dst_len);
8724 +static void ahash_done(void *cbk_ctx, u32 status)
8726 + struct crypto_async_request *areq = cbk_ctx;
8727 + struct ahash_request *req = ahash_request_cast(areq);
8728 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8729 + struct caam_hash_state *state = ahash_request_ctx(req);
8730 + struct ahash_edesc *edesc = state->caam_req.edesc;
8731 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8732 + int digestsize = crypto_ahash_digestsize(ahash);
8736 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8739 + if (unlikely(status)) {
8740 + caam_qi2_strstatus(ctx->dev, status);
8744 + ahash_unmap(ctx->dev, edesc, req, digestsize);
8745 + qi_cache_free(edesc);
8748 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8749 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8752 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8753 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8757 + req->base.complete(&req->base, ecode);
8760 +static void ahash_done_bi(void *cbk_ctx, u32 status)
8762 + struct crypto_async_request *areq = cbk_ctx;
8763 + struct ahash_request *req = ahash_request_cast(areq);
8764 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8765 + struct caam_hash_state *state = ahash_request_ctx(req);
8766 + struct ahash_edesc *edesc = state->caam_req.edesc;
8767 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8770 + int digestsize = crypto_ahash_digestsize(ahash);
8772 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8775 + if (unlikely(status)) {
8776 + caam_qi2_strstatus(ctx->dev, status);
8780 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
8781 + switch_buf(state);
8782 + qi_cache_free(edesc);
8785 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8786 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8789 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8790 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8794 + req->base.complete(&req->base, ecode);
8797 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
8799 + struct crypto_async_request *areq = cbk_ctx;
8800 + struct ahash_request *req = ahash_request_cast(areq);
8801 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8802 + struct caam_hash_state *state = ahash_request_ctx(req);
8803 + struct ahash_edesc *edesc = state->caam_req.edesc;
8804 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8805 + int digestsize = crypto_ahash_digestsize(ahash);
8809 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8812 + if (unlikely(status)) {
8813 + caam_qi2_strstatus(ctx->dev, status);
8817 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
8818 + qi_cache_free(edesc);
8821 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8822 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8825 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8826 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8830 + req->base.complete(&req->base, ecode);
8833 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
8835 + struct crypto_async_request *areq = cbk_ctx;
8836 + struct ahash_request *req = ahash_request_cast(areq);
8837 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8838 + struct caam_hash_state *state = ahash_request_ctx(req);
8839 + struct ahash_edesc *edesc = state->caam_req.edesc;
8840 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8843 + int digestsize = crypto_ahash_digestsize(ahash);
8845 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8848 + if (unlikely(status)) {
8849 + caam_qi2_strstatus(ctx->dev, status);
8853 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
8854 + switch_buf(state);
8855 + qi_cache_free(edesc);
8858 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8859 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8862 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8863 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8867 + req->base.complete(&req->base, ecode);
8870 +static int ahash_update_ctx(struct ahash_request *req)
8872 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8873 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8874 + struct caam_hash_state *state = ahash_request_ctx(req);
8875 + struct caam_request *req_ctx = &state->caam_req;
8876 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8877 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8878 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8879 + GFP_KERNEL : GFP_ATOMIC;
8880 + u8 *buf = current_buf(state);
8881 + int *buflen = current_buflen(state);
8882 + u8 *next_buf = alt_buf(state);
8883 + int *next_buflen = alt_buflen(state), last_buflen;
8884 + int in_len = *buflen + req->nbytes, to_hash;
8885 + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
8886 + struct ahash_edesc *edesc;
8889 + last_buflen = *next_buflen;
8890 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
8891 + to_hash = in_len - *next_buflen;
8894 + struct dpaa2_sg_entry *sg_table;
8896 + src_nents = sg_nents_for_len(req->src,
8897 + req->nbytes - (*next_buflen));
8898 + if (src_nents < 0) {
8899 + dev_err(ctx->dev, "Invalid number of src SG.\n");
8904 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8906 + if (!mapped_nents) {
8907 + dev_err(ctx->dev, "unable to DMA map source\n");
8914 + /* allocate space for base edesc and link tables */
8915 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8917 + dma_unmap_sg(ctx->dev, req->src, src_nents,
8922 + edesc->src_nents = src_nents;
8923 + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
8924 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
8925 + sizeof(*sg_table);
8926 + sg_table = &edesc->sgt[0];
8928 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
8929 + DMA_BIDIRECTIONAL);
8933 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
8937 + if (mapped_nents) {
8938 + sg_to_qm_sg_last(req->src, mapped_nents,
8939 + sg_table + qm_sg_src_index, 0);
8941 + scatterwalk_map_and_copy(next_buf, req->src,
8942 + to_hash - *buflen,
8945 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
8949 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
8950 + qm_sg_bytes, DMA_TO_DEVICE);
8951 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8952 + dev_err(ctx->dev, "unable to map S/G table\n");
8956 + edesc->qm_sg_bytes = qm_sg_bytes;
8958 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8959 + dpaa2_fl_set_final(in_fle, true);
8960 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8961 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8962 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
8963 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8964 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
8965 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
8967 + req_ctx->flc = &ctx->flc[UPDATE];
8968 + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
8969 + req_ctx->cbk = ahash_done_bi;
8970 + req_ctx->ctx = &req->base;
8971 + req_ctx->edesc = edesc;
8973 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8974 + if (ret != -EINPROGRESS &&
8975 + !(ret == -EBUSY &&
8976 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8978 + } else if (*next_buflen) {
8979 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
8981 + *buflen = *next_buflen;
8982 + *next_buflen = last_buflen;
8985 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
8986 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
8987 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
8988 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
8994 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
8995 + qi_cache_free(edesc);
8999 +static int ahash_final_ctx(struct ahash_request *req)
9001 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9002 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9003 + struct caam_hash_state *state = ahash_request_ctx(req);
9004 + struct caam_request *req_ctx = &state->caam_req;
9005 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9006 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9007 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9008 + GFP_KERNEL : GFP_ATOMIC;
9009 + int buflen = *current_buflen(state);
9010 + int qm_sg_bytes, qm_sg_src_index;
9011 + int digestsize = crypto_ahash_digestsize(ahash);
9012 + struct ahash_edesc *edesc;
9013 + struct dpaa2_sg_entry *sg_table;
9016 + /* allocate space for base edesc and link tables */
9017 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9021 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
9022 + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
9023 + sg_table = &edesc->sgt[0];
9025 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
9030 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
9034 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
9036 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9038 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9039 + dev_err(ctx->dev, "unable to map S/G table\n");
9043 + edesc->qm_sg_bytes = qm_sg_bytes;
9045 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9047 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9048 + dev_err(ctx->dev, "unable to map dst\n");
9049 + edesc->dst_dma = 0;
9054 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9055 + dpaa2_fl_set_final(in_fle, true);
9056 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9057 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9058 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
9059 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9060 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9061 + dpaa2_fl_set_len(out_fle, digestsize);
9063 + req_ctx->flc = &ctx->flc[FINALIZE];
9064 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
9065 + req_ctx->cbk = ahash_done_ctx_src;
9066 + req_ctx->ctx = &req->base;
9067 + req_ctx->edesc = edesc;
9069 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9070 + if (ret == -EINPROGRESS ||
9071 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9075 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
9076 + qi_cache_free(edesc);
9080 +static int ahash_finup_ctx(struct ahash_request *req)
9082 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9083 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9084 + struct caam_hash_state *state = ahash_request_ctx(req);
9085 + struct caam_request *req_ctx = &state->caam_req;
9086 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9087 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9088 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9089 + GFP_KERNEL : GFP_ATOMIC;
9090 + int buflen = *current_buflen(state);
9091 + int qm_sg_bytes, qm_sg_src_index;
9092 + int src_nents, mapped_nents;
9093 + int digestsize = crypto_ahash_digestsize(ahash);
9094 + struct ahash_edesc *edesc;
9095 + struct dpaa2_sg_entry *sg_table;
9098 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9099 + if (src_nents < 0) {
9100 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9105 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9107 + if (!mapped_nents) {
9108 + dev_err(ctx->dev, "unable to DMA map source\n");
9115 + /* allocate space for base edesc and link tables */
9116 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9118 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9122 + edesc->src_nents = src_nents;
9123 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
9124 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
9125 + sg_table = &edesc->sgt[0];
9127 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
9132 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
9136 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
9138 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9140 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9141 + dev_err(ctx->dev, "unable to map S/G table\n");
9145 + edesc->qm_sg_bytes = qm_sg_bytes;
9147 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9149 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9150 + dev_err(ctx->dev, "unable to map dst\n");
9151 + edesc->dst_dma = 0;
9156 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9157 + dpaa2_fl_set_final(in_fle, true);
9158 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9159 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9160 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
9161 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9162 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9163 + dpaa2_fl_set_len(out_fle, digestsize);
9165 + req_ctx->flc = &ctx->flc[FINALIZE];
9166 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
9167 + req_ctx->cbk = ahash_done_ctx_src;
9168 + req_ctx->ctx = &req->base;
9169 + req_ctx->edesc = edesc;
9171 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9172 + if (ret == -EINPROGRESS ||
9173 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9177 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
9178 + qi_cache_free(edesc);
9182 +static int ahash_digest(struct ahash_request *req)
9184 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9185 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9186 + struct caam_hash_state *state = ahash_request_ctx(req);
9187 + struct caam_request *req_ctx = &state->caam_req;
9188 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9189 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9190 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9191 + GFP_KERNEL : GFP_ATOMIC;
9192 + int digestsize = crypto_ahash_digestsize(ahash);
9193 + int src_nents, mapped_nents;
9194 + struct ahash_edesc *edesc;
9195 + int ret = -ENOMEM;
9197 + state->buf_dma = 0;
9199 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9200 + if (src_nents < 0) {
9201 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9206 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9208 + if (!mapped_nents) {
9209 + dev_err(ctx->dev, "unable to map source for DMA\n");
9216 + /* allocate space for base edesc and link tables */
9217 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9219 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9223 + edesc->src_nents = src_nents;
9224 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9226 + if (mapped_nents > 1) {
9228 + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
9230 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
9231 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
9232 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9233 + qm_sg_bytes, DMA_TO_DEVICE);
9234 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9235 + dev_err(ctx->dev, "unable to map S/G table\n");
9238 + edesc->qm_sg_bytes = qm_sg_bytes;
9239 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9240 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9242 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9243 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
9246 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9248 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9249 + dev_err(ctx->dev, "unable to map dst\n");
9250 + edesc->dst_dma = 0;
9254 + dpaa2_fl_set_final(in_fle, true);
9255 + dpaa2_fl_set_len(in_fle, req->nbytes);
9256 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9257 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9258 + dpaa2_fl_set_len(out_fle, digestsize);
9260 + req_ctx->flc = &ctx->flc[DIGEST];
9261 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9262 + req_ctx->cbk = ahash_done;
9263 + req_ctx->ctx = &req->base;
9264 + req_ctx->edesc = edesc;
9265 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9266 + if (ret == -EINPROGRESS ||
9267 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9271 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9272 + qi_cache_free(edesc);
9276 +static int ahash_final_no_ctx(struct ahash_request *req)
9278 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9279 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9280 + struct caam_hash_state *state = ahash_request_ctx(req);
9281 + struct caam_request *req_ctx = &state->caam_req;
9282 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9283 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9284 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9285 + GFP_KERNEL : GFP_ATOMIC;
9286 + u8 *buf = current_buf(state);
9287 + int buflen = *current_buflen(state);
9288 + int digestsize = crypto_ahash_digestsize(ahash);
9289 + struct ahash_edesc *edesc;
9290 + int ret = -ENOMEM;
9292 + /* allocate space for base edesc and link tables */
9293 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9297 + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
9298 + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
9299 + dev_err(ctx->dev, "unable to map src\n");
9303 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9305 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9306 + dev_err(ctx->dev, "unable to map dst\n");
9307 + edesc->dst_dma = 0;
9311 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9312 + dpaa2_fl_set_final(in_fle, true);
9313 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9314 + dpaa2_fl_set_addr(in_fle, state->buf_dma);
9315 + dpaa2_fl_set_len(in_fle, buflen);
9316 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9317 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9318 + dpaa2_fl_set_len(out_fle, digestsize);
9320 + req_ctx->flc = &ctx->flc[DIGEST];
9321 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9322 + req_ctx->cbk = ahash_done;
9323 + req_ctx->ctx = &req->base;
9324 + req_ctx->edesc = edesc;
9326 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9327 + if (ret == -EINPROGRESS ||
9328 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9332 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9333 + qi_cache_free(edesc);
9337 +static int ahash_update_no_ctx(struct ahash_request *req)
9339 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9340 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9341 + struct caam_hash_state *state = ahash_request_ctx(req);
9342 + struct caam_request *req_ctx = &state->caam_req;
9343 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9344 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9345 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9346 + GFP_KERNEL : GFP_ATOMIC;
9347 + u8 *buf = current_buf(state);
9348 + int *buflen = current_buflen(state);
9349 + u8 *next_buf = alt_buf(state);
9350 + int *next_buflen = alt_buflen(state);
9351 + int in_len = *buflen + req->nbytes, to_hash;
9352 + int qm_sg_bytes, src_nents, mapped_nents;
9353 + struct ahash_edesc *edesc;
9356 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
9357 + to_hash = in_len - *next_buflen;
9360 + struct dpaa2_sg_entry *sg_table;
9362 + src_nents = sg_nents_for_len(req->src,
9363 + req->nbytes - *next_buflen);
9364 + if (src_nents < 0) {
9365 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9370 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9372 + if (!mapped_nents) {
9373 + dev_err(ctx->dev, "unable to DMA map source\n");
9380 + /* allocate space for base edesc and link tables */
9381 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9383 + dma_unmap_sg(ctx->dev, req->src, src_nents,
9388 + edesc->src_nents = src_nents;
9389 + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
9390 + sg_table = &edesc->sgt[0];
9392 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
9396 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
9399 + scatterwalk_map_and_copy(next_buf, req->src,
9400 + to_hash - *buflen,
9403 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9404 + qm_sg_bytes, DMA_TO_DEVICE);
9405 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9406 + dev_err(ctx->dev, "unable to map S/G table\n");
9410 + edesc->qm_sg_bytes = qm_sg_bytes;
9412 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
9413 + ctx->ctx_len, DMA_FROM_DEVICE);
9414 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
9415 + dev_err(ctx->dev, "unable to map ctx\n");
9416 + state->ctx_dma = 0;
9421 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9422 + dpaa2_fl_set_final(in_fle, true);
9423 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9424 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9425 + dpaa2_fl_set_len(in_fle, to_hash);
9426 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9427 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
9428 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
9430 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
9431 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
9432 + req_ctx->cbk = ahash_done_ctx_dst;
9433 + req_ctx->ctx = &req->base;
9434 + req_ctx->edesc = edesc;
9436 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9437 + if (ret != -EINPROGRESS &&
9438 + !(ret == -EBUSY &&
9439 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9442 + state->update = ahash_update_ctx;
9443 + state->finup = ahash_finup_ctx;
9444 + state->final = ahash_final_ctx;
9445 + } else if (*next_buflen) {
9446 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
9448 + *buflen = *next_buflen;
9452 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
9453 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
9454 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
9455 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
9461 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
9462 + qi_cache_free(edesc);
9466 +static int ahash_finup_no_ctx(struct ahash_request *req)
9468 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9469 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9470 + struct caam_hash_state *state = ahash_request_ctx(req);
9471 + struct caam_request *req_ctx = &state->caam_req;
9472 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9473 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9474 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9475 + GFP_KERNEL : GFP_ATOMIC;
9476 + int buflen = *current_buflen(state);
9477 + int qm_sg_bytes, src_nents, mapped_nents;
9478 + int digestsize = crypto_ahash_digestsize(ahash);
9479 + struct ahash_edesc *edesc;
9480 + struct dpaa2_sg_entry *sg_table;
9483 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9484 + if (src_nents < 0) {
9485 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9490 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9492 + if (!mapped_nents) {
9493 + dev_err(ctx->dev, "unable to DMA map source\n");
9500 + /* allocate space for base edesc and link tables */
9501 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9503 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9507 + edesc->src_nents = src_nents;
9508 + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
9509 + sg_table = &edesc->sgt[0];
9511 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
9515 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
9517 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9519 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9520 + dev_err(ctx->dev, "unable to map S/G table\n");
9524 + edesc->qm_sg_bytes = qm_sg_bytes;
9526 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9528 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9529 + dev_err(ctx->dev, "unable to map dst\n");
9530 + edesc->dst_dma = 0;
9535 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9536 + dpaa2_fl_set_final(in_fle, true);
9537 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9538 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9539 + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
9540 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9541 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9542 + dpaa2_fl_set_len(out_fle, digestsize);
9544 + req_ctx->flc = &ctx->flc[DIGEST];
9545 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9546 + req_ctx->cbk = ahash_done;
9547 + req_ctx->ctx = &req->base;
9548 + req_ctx->edesc = edesc;
9549 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9550 + if (ret != -EINPROGRESS &&
9551 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9556 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9557 + qi_cache_free(edesc);
9561 +static int ahash_update_first(struct ahash_request *req)
9563 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9564 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9565 + struct caam_hash_state *state = ahash_request_ctx(req);
9566 + struct caam_request *req_ctx = &state->caam_req;
9567 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9568 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9569 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9570 + GFP_KERNEL : GFP_ATOMIC;
9571 + u8 *next_buf = alt_buf(state);
9572 + int *next_buflen = alt_buflen(state);
9574 + int src_nents, mapped_nents;
9575 + struct ahash_edesc *edesc;
9578 + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
9580 + to_hash = req->nbytes - *next_buflen;
9583 + struct dpaa2_sg_entry *sg_table;
9585 + src_nents = sg_nents_for_len(req->src,
9586 + req->nbytes - (*next_buflen));
9587 + if (src_nents < 0) {
9588 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9593 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9595 + if (!mapped_nents) {
9596 + dev_err(ctx->dev, "unable to map source for DMA\n");
9603 + /* allocate space for base edesc and link tables */
9604 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9606 + dma_unmap_sg(ctx->dev, req->src, src_nents,
9611 + edesc->src_nents = src_nents;
9612 + sg_table = &edesc->sgt[0];
9614 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9615 + dpaa2_fl_set_final(in_fle, true);
9616 + dpaa2_fl_set_len(in_fle, to_hash);
9618 + if (mapped_nents > 1) {
9621 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
9622 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
9623 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9626 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9627 + dev_err(ctx->dev, "unable to map S/G table\n");
9631 + edesc->qm_sg_bytes = qm_sg_bytes;
9632 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9633 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9635 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9636 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
9640 + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
9643 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
9644 + ctx->ctx_len, DMA_FROM_DEVICE);
9645 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
9646 + dev_err(ctx->dev, "unable to map ctx\n");
9647 + state->ctx_dma = 0;
9652 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9653 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
9654 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
9656 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
9657 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
9658 + req_ctx->cbk = ahash_done_ctx_dst;
9659 + req_ctx->ctx = &req->base;
9660 + req_ctx->edesc = edesc;
9662 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9663 + if (ret != -EINPROGRESS &&
9664 + !(ret == -EBUSY && req->base.flags &
9665 + CRYPTO_TFM_REQ_MAY_BACKLOG))
9668 + state->update = ahash_update_ctx;
9669 + state->finup = ahash_finup_ctx;
9670 + state->final = ahash_final_ctx;
9671 + } else if (*next_buflen) {
9672 + state->update = ahash_update_no_ctx;
9673 + state->finup = ahash_finup_no_ctx;
9674 + state->final = ahash_final_no_ctx;
9675 + scatterwalk_map_and_copy(next_buf, req->src, 0,
9677 + switch_buf(state);
9680 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
9681 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
9686 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
9687 + qi_cache_free(edesc);
9691 +static int ahash_finup_first(struct ahash_request *req)
9693 + return ahash_digest(req);
9696 +static int ahash_init(struct ahash_request *req)
9698 + struct caam_hash_state *state = ahash_request_ctx(req);
9700 + state->update = ahash_update_first;
9701 + state->finup = ahash_finup_first;
9702 + state->final = ahash_final_no_ctx;
9704 + state->ctx_dma = 0;
9705 + state->current_buf = 0;
9706 + state->buf_dma = 0;
9707 + state->buflen_0 = 0;
9708 + state->buflen_1 = 0;
9713 +static int ahash_update(struct ahash_request *req)
9715 + struct caam_hash_state *state = ahash_request_ctx(req);
9717 + return state->update(req);
9720 +static int ahash_finup(struct ahash_request *req)
9722 + struct caam_hash_state *state = ahash_request_ctx(req);
9724 + return state->finup(req);
9727 +static int ahash_final(struct ahash_request *req)
9729 + struct caam_hash_state *state = ahash_request_ctx(req);
9731 + return state->final(req);
9734 +static int ahash_export(struct ahash_request *req, void *out)
9736 + struct caam_hash_state *state = ahash_request_ctx(req);
9737 + struct caam_export_state *export = out;
9741 + if (state->current_buf) {
9742 + buf = state->buf_1;
9743 + len = state->buflen_1;
9745 + buf = state->buf_0;
9746 + len = state->buflen_0;
9749 + memcpy(export->buf, buf, len);
9750 + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
9751 + export->buflen = len;
9752 + export->update = state->update;
9753 + export->final = state->final;
9754 + export->finup = state->finup;
9759 +static int ahash_import(struct ahash_request *req, const void *in)
9761 + struct caam_hash_state *state = ahash_request_ctx(req);
9762 + const struct caam_export_state *export = in;
9764 + memset(state, 0, sizeof(*state));
9765 + memcpy(state->buf_0, export->buf, export->buflen);
9766 + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
9767 + state->buflen_0 = export->buflen;
9768 + state->update = export->update;
9769 + state->final = export->final;
9770 + state->finup = export->finup;
9775 +struct caam_hash_template {
9776 + char name[CRYPTO_MAX_ALG_NAME];
9777 + char driver_name[CRYPTO_MAX_ALG_NAME];
9778 + char hmac_name[CRYPTO_MAX_ALG_NAME];
9779 + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
9780 + unsigned int blocksize;
9781 + struct ahash_alg template_ahash;
9785 +/* ahash descriptors */
9786 +static struct caam_hash_template driver_hash[] = {
9789 + .driver_name = "sha1-caam-qi2",
9790 + .hmac_name = "hmac(sha1)",
9791 + .hmac_driver_name = "hmac-sha1-caam-qi2",
9792 + .blocksize = SHA1_BLOCK_SIZE,
9793 + .template_ahash = {
9794 + .init = ahash_init,
9795 + .update = ahash_update,
9796 + .final = ahash_final,
9797 + .finup = ahash_finup,
9798 + .digest = ahash_digest,
9799 + .export = ahash_export,
9800 + .import = ahash_import,
9801 + .setkey = ahash_setkey,
9803 + .digestsize = SHA1_DIGEST_SIZE,
9804 + .statesize = sizeof(struct caam_export_state),
9807 + .alg_type = OP_ALG_ALGSEL_SHA1,
9810 + .driver_name = "sha224-caam-qi2",
9811 + .hmac_name = "hmac(sha224)",
9812 + .hmac_driver_name = "hmac-sha224-caam-qi2",
9813 + .blocksize = SHA224_BLOCK_SIZE,
9814 + .template_ahash = {
9815 + .init = ahash_init,
9816 + .update = ahash_update,
9817 + .final = ahash_final,
9818 + .finup = ahash_finup,
9819 + .digest = ahash_digest,
9820 + .export = ahash_export,
9821 + .import = ahash_import,
9822 + .setkey = ahash_setkey,
9824 + .digestsize = SHA224_DIGEST_SIZE,
9825 + .statesize = sizeof(struct caam_export_state),
9828 + .alg_type = OP_ALG_ALGSEL_SHA224,
9831 + .driver_name = "sha256-caam-qi2",
9832 + .hmac_name = "hmac(sha256)",
9833 + .hmac_driver_name = "hmac-sha256-caam-qi2",
9834 + .blocksize = SHA256_BLOCK_SIZE,
9835 + .template_ahash = {
9836 + .init = ahash_init,
9837 + .update = ahash_update,
9838 + .final = ahash_final,
9839 + .finup = ahash_finup,
9840 + .digest = ahash_digest,
9841 + .export = ahash_export,
9842 + .import = ahash_import,
9843 + .setkey = ahash_setkey,
9845 + .digestsize = SHA256_DIGEST_SIZE,
9846 + .statesize = sizeof(struct caam_export_state),
9849 + .alg_type = OP_ALG_ALGSEL_SHA256,
9852 + .driver_name = "sha384-caam-qi2",
9853 + .hmac_name = "hmac(sha384)",
9854 + .hmac_driver_name = "hmac-sha384-caam-qi2",
9855 + .blocksize = SHA384_BLOCK_SIZE,
9856 + .template_ahash = {
9857 + .init = ahash_init,
9858 + .update = ahash_update,
9859 + .final = ahash_final,
9860 + .finup = ahash_finup,
9861 + .digest = ahash_digest,
9862 + .export = ahash_export,
9863 + .import = ahash_import,
9864 + .setkey = ahash_setkey,
9866 + .digestsize = SHA384_DIGEST_SIZE,
9867 + .statesize = sizeof(struct caam_export_state),
9870 + .alg_type = OP_ALG_ALGSEL_SHA384,
9873 + .driver_name = "sha512-caam-qi2",
9874 + .hmac_name = "hmac(sha512)",
9875 + .hmac_driver_name = "hmac-sha512-caam-qi2",
9876 + .blocksize = SHA512_BLOCK_SIZE,
9877 + .template_ahash = {
9878 + .init = ahash_init,
9879 + .update = ahash_update,
9880 + .final = ahash_final,
9881 + .finup = ahash_finup,
9882 + .digest = ahash_digest,
9883 + .export = ahash_export,
9884 + .import = ahash_import,
9885 + .setkey = ahash_setkey,
9887 + .digestsize = SHA512_DIGEST_SIZE,
9888 + .statesize = sizeof(struct caam_export_state),
9891 + .alg_type = OP_ALG_ALGSEL_SHA512,
9894 + .driver_name = "md5-caam-qi2",
9895 + .hmac_name = "hmac(md5)",
9896 + .hmac_driver_name = "hmac-md5-caam-qi2",
9897 + .blocksize = MD5_BLOCK_WORDS * 4,
9898 + .template_ahash = {
9899 + .init = ahash_init,
9900 + .update = ahash_update,
9901 + .final = ahash_final,
9902 + .finup = ahash_finup,
9903 + .digest = ahash_digest,
9904 + .export = ahash_export,
9905 + .import = ahash_import,
9906 + .setkey = ahash_setkey,
9908 + .digestsize = MD5_DIGEST_SIZE,
9909 + .statesize = sizeof(struct caam_export_state),
9912 + .alg_type = OP_ALG_ALGSEL_MD5,
9916 +struct caam_hash_alg {
9917 + struct list_head entry;
9918 + struct device *dev;
9920 + struct ahash_alg ahash_alg;
9923 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
9925 + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
9926 + struct crypto_alg *base = tfm->__crt_alg;
9927 + struct hash_alg_common *halg =
9928 + container_of(base, struct hash_alg_common, base);
9929 + struct ahash_alg *alg =
9930 + container_of(halg, struct ahash_alg, halg);
9931 + struct caam_hash_alg *caam_hash =
9932 + container_of(alg, struct caam_hash_alg, ahash_alg);
9933 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
9934 + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
9935 + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
9936 + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
9937 + HASH_MSG_LEN + 32,
9938 + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
9939 + HASH_MSG_LEN + 64,
9940 + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
9941 + dma_addr_t dma_addr;
9944 + ctx->dev = caam_hash->dev;
9946 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
9947 + DMA_BIDIRECTIONAL,
9948 + DMA_ATTR_SKIP_CPU_SYNC);
9949 + if (dma_mapping_error(ctx->dev, dma_addr)) {
9950 + dev_err(ctx->dev, "unable to map shared descriptors\n");
9954 + for (i = 0; i < HASH_NUM_OP; i++)
9955 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
9957 + /* copy descriptor header template value */
9958 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
9960 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
9961 + OP_ALG_ALGSEL_SUBMASK) >>
9962 + OP_ALG_ALGSEL_SHIFT];
9964 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
9965 + sizeof(struct caam_hash_state));
9967 + return ahash_set_sh_desc(ahash);
9970 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
9972 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
9974 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
9975 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
9978 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
9979 + struct caam_hash_template *template, bool keyed)
9981 + struct caam_hash_alg *t_alg;
9982 + struct ahash_alg *halg;
9983 + struct crypto_alg *alg;
9985 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
9987 + return ERR_PTR(-ENOMEM);
9989 + t_alg->ahash_alg = template->template_ahash;
9990 + halg = &t_alg->ahash_alg;
9991 + alg = &halg->halg.base;
9994 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
9995 + template->hmac_name);
9996 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
9997 + template->hmac_driver_name);
9999 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
10001 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
10002 + template->driver_name);
10003 + t_alg->ahash_alg.setkey = NULL;
10005 + alg->cra_module = THIS_MODULE;
10006 + alg->cra_init = caam_hash_cra_init;
10007 + alg->cra_exit = caam_hash_cra_exit;
10008 + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
10009 + alg->cra_priority = CAAM_CRA_PRIORITY;
10010 + alg->cra_blocksize = template->blocksize;
10011 + alg->cra_alignmask = 0;
10012 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
10013 + alg->cra_type = &crypto_ahash_type;
10015 + t_alg->alg_type = template->alg_type;
10016 + t_alg->dev = dev;
10021 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
10023 + struct dpaa2_caam_priv_per_cpu *ppriv;
10025 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
10026 + napi_schedule_irqoff(&ppriv->napi);
10029 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
10031 + struct device *dev = priv->dev;
10032 + struct dpaa2_io_notification_ctx *nctx;
10033 + struct dpaa2_caam_priv_per_cpu *ppriv;
10034 + int err, i = 0, cpu;
10036 + for_each_online_cpu(cpu) {
10037 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10038 + ppriv->priv = priv;
10039 + nctx = &ppriv->nctx;
10040 + nctx->is_cdan = 0;
10041 + nctx->id = ppriv->rsp_fqid;
10042 + nctx->desired_cpu = cpu;
10043 + nctx->cb = dpaa2_caam_fqdan_cb;
10045 + /* Register notification callbacks */
10046 + ppriv->dpio = dpaa2_io_service_select(cpu);
10047 + err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
10048 + if (unlikely(err)) {
10049 + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
10052 + * If no affine DPIO for this core, there's probably
10053 + * none available for next cores either. Signal we want
10054 + * to retry later, in case the DPIO devices weren't
10057 + err = -EPROBE_DEFER;
10061 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
10063 + if (unlikely(!ppriv->store)) {
10064 + dev_err(dev, "dpaa2_io_store_create() failed\n");
10069 + if (++i == priv->num_pairs)
10076 + for_each_online_cpu(cpu) {
10077 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10078 + if (!ppriv->nctx.cb)
10080 + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
10083 + for_each_online_cpu(cpu) {
10084 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10085 + if (!ppriv->store)
10087 + dpaa2_io_store_destroy(ppriv->store);
10093 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
10095 + struct dpaa2_caam_priv_per_cpu *ppriv;
10096 + struct device *dev = priv->dev;
10099 + for_each_online_cpu(cpu) {
10100 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10101 + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
10102 + dpaa2_io_store_destroy(ppriv->store);
10104 + if (++i == priv->num_pairs)
10109 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
10111 + struct dpseci_rx_queue_cfg rx_queue_cfg;
10112 + struct device *dev = priv->dev;
10113 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10114 + struct dpaa2_caam_priv_per_cpu *ppriv;
10115 + int err = 0, i = 0, cpu;
10117 + /* Configure Rx queues */
10118 + for_each_online_cpu(cpu) {
10119 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10121 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
10122 + DPSECI_QUEUE_OPT_USER_CTX;
10123 + rx_queue_cfg.order_preservation_en = 0;
10124 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
10125 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
10127 + * Rx priority (WQ) doesn't really matter, since we use
10128 + * pull mode, i.e. volatile dequeues from specific FQs
10130 + rx_queue_cfg.dest_cfg.priority = 0;
10131 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
10133 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10136 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
10141 + if (++i == priv->num_pairs)
10148 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
10150 + struct device *dev = priv->dev;
10152 + if (!priv->cscn_mem)
10155 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10156 + kfree(priv->cscn_mem);
10159 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
10161 + struct device *dev = priv->dev;
10162 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10164 + dpaa2_dpseci_congestion_free(priv);
10165 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
10168 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
10169 + const struct dpaa2_fd *fd)
10171 + struct caam_request *req;
10174 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
10175 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
10179 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
10180 + if (unlikely(fd_err))
10181 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
10184 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
10185 + * in FD[ERR] or FD[FRC].
10187 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
10188 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
10189 + DMA_BIDIRECTIONAL);
10190 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
10193 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
10197 + /* Retry while portal is busy */
10199 + err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
10201 + } while (err == -EBUSY);
10203 + if (unlikely(err))
10204 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
10209 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
10211 + struct dpaa2_dq *dq;
10212 + int cleaned = 0, is_last;
10215 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
10216 + if (unlikely(!dq)) {
10217 + if (unlikely(!is_last)) {
10218 + dev_dbg(ppriv->priv->dev,
10219 + "FQ %d returned no valid frames\n",
10220 + ppriv->rsp_fqid);
10222 + * MUST retry until we get some sort of
10223 + * valid response token (be it "empty dequeue"
10224 + * or a valid frame).
10232 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
10234 + } while (!is_last);
10239 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
10241 + struct dpaa2_caam_priv_per_cpu *ppriv;
10242 + struct dpaa2_caam_priv *priv;
10243 + int err, cleaned = 0, store_cleaned;
10245 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
10246 + priv = ppriv->priv;
10248 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
10252 + store_cleaned = dpaa2_caam_store_consume(ppriv);
10253 + cleaned += store_cleaned;
10255 + if (store_cleaned == 0 ||
10256 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
10259 + /* Try to dequeue some more */
10260 + err = dpaa2_caam_pull_fq(ppriv);
10261 + if (unlikely(err))
10265 + if (cleaned < budget) {
10266 + napi_complete_done(napi, cleaned);
10267 + err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
10268 + if (unlikely(err))
10269 + dev_err(priv->dev, "Notification rearm failed: %d\n",
10276 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
10279 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
10280 + struct device *dev = priv->dev;
10284 + * Congestion group feature supported starting with DPSECI API v5.1
10285 + * and only when object has been created with this capability.
10287 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
10288 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
10291 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
10292 + GFP_KERNEL | GFP_DMA);
10293 + if (!priv->cscn_mem)
10296 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
10297 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
10298 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10299 + if (dma_mapping_error(dev, priv->cscn_dma)) {
10300 + dev_err(dev, "Error mapping CSCN memory area\n");
10302 + goto err_dma_map;
10305 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
10306 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
10307 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
10308 + cong_notif_cfg.message_ctx = (u64)priv;
10309 + cong_notif_cfg.message_iova = priv->cscn_dma;
10310 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
10311 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
10312 + DPSECI_CGN_MODE_COHERENT_WRITE;
10314 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
10315 + &cong_notif_cfg);
10317 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
10318 + goto err_set_cong;
10324 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10326 + kfree(priv->cscn_mem);
10331 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
10333 + struct device *dev = &ls_dev->dev;
10334 + struct dpaa2_caam_priv *priv;
10335 + struct dpaa2_caam_priv_per_cpu *ppriv;
10339 + priv = dev_get_drvdata(dev);
10342 + priv->dpsec_id = ls_dev->obj_desc.id;
10344 + /* Get a handle for the DPSECI this interface is associate with */
10345 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
10347 + dev_err(dev, "dpsec_open() failed: %d\n", err);
10351 + dev_info(dev, "Opened dpseci object successfully\n");
10353 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
10354 + &priv->minor_ver);
10356 + dev_err(dev, "dpseci_get_api_version() failed\n");
10357 + goto err_get_vers;
10360 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
10361 + &priv->dpseci_attr);
10363 + dev_err(dev, "dpseci_get_attributes() failed\n");
10364 + goto err_get_vers;
10367 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
10368 + &priv->sec_attr);
10370 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
10371 + goto err_get_vers;
10374 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
10376 + dev_err(dev, "setup_congestion() failed\n");
10377 + goto err_get_vers;
10380 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
10381 + priv->dpseci_attr.num_tx_queues);
10382 + if (priv->num_pairs > num_online_cpus()) {
10383 + dev_warn(dev, "%d queues won't be used\n",
10384 + priv->num_pairs - num_online_cpus());
10385 + priv->num_pairs = num_online_cpus();
10388 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
10389 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10390 + &priv->rx_queue_attr[i]);
10392 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
10393 + goto err_get_rx_queue;
10397 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
10398 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10399 + &priv->tx_queue_attr[i]);
10401 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
10402 + goto err_get_rx_queue;
10407 + for_each_online_cpu(cpu) {
10410 + j = i % priv->num_pairs;
10412 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10413 + ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
10416 + * Allow all cores to enqueue, while only some of them
10417 + * will take part in dequeuing.
10419 + if (++i > priv->num_pairs)
10422 + ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
10425 + dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", j,
10426 + priv->rx_queue_attr[j].fqid,
10427 + priv->tx_queue_attr[j].fqid);
10429 + ppriv->net_dev.dev = *dev;
10430 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
10431 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
10432 + DPAA2_CAAM_NAPI_WEIGHT);
10438 + dpaa2_dpseci_congestion_free(priv);
10440 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
10445 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
10447 + struct device *dev = priv->dev;
10448 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10449 + struct dpaa2_caam_priv_per_cpu *ppriv;
10452 + for (i = 0; i < priv->num_pairs; i++) {
10453 + ppriv = per_cpu_ptr(priv->ppriv, i);
10454 + napi_enable(&ppriv->napi);
10457 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
10459 + dev_err(dev, "dpseci_enable() failed\n");
10463 + dev_info(dev, "DPSECI version %d.%d\n",
10465 + priv->minor_ver);
10470 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
10472 + struct device *dev = priv->dev;
10473 + struct dpaa2_caam_priv_per_cpu *ppriv;
10474 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10475 + int i, err = 0, enabled;
10477 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
10479 + dev_err(dev, "dpseci_disable() failed\n");
10483 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
10485 + dev_err(dev, "dpseci_is_enabled() failed\n");
10489 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
10491 + for (i = 0; i < priv->num_pairs; i++) {
10492 + ppriv = per_cpu_ptr(priv->ppriv, i);
10493 + napi_disable(&ppriv->napi);
10494 + netif_napi_del(&ppriv->napi);
10500 +static struct list_head hash_list;
10502 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
10504 + struct device *dev;
10505 + struct dpaa2_caam_priv *priv;
10507 + bool registered = false;
10510 + * There is no way to get CAAM endianness - there is no direct register
10511 + * space access and MC f/w does not provide this attribute.
10512 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
10515 + caam_little_end = true;
10517 + caam_imx = false;
10519 + dev = &dpseci_dev->dev;
10521 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
10525 + dev_set_drvdata(dev, priv);
10527 + priv->domain = iommu_get_domain_for_dev(dev);
10529 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
10530 + 0, SLAB_CACHE_DMA, NULL);
10532 + dev_err(dev, "Can't allocate SEC cache\n");
10534 + goto err_qicache;
10537 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
10539 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
10540 + goto err_dma_mask;
10543 + /* Obtain a MC portal */
10544 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
10546 + if (err == -ENXIO)
10547 + err = -EPROBE_DEFER;
10549 + dev_err(dev, "MC portal allocation failed\n");
10551 + goto err_dma_mask;
10554 + priv->ppriv = alloc_percpu(*priv->ppriv);
10555 + if (!priv->ppriv) {
10556 + dev_err(dev, "alloc_percpu() failed\n");
10558 + goto err_alloc_ppriv;
10561 + /* DPSECI initialization */
10562 + err = dpaa2_dpseci_setup(dpseci_dev);
10564 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
10565 + goto err_dpseci_setup;
10569 + err = dpaa2_dpseci_dpio_setup(priv);
10571 + if (err != -EPROBE_DEFER)
10572 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
10573 + goto err_dpio_setup;
10576 + /* DPSECI binding to DPIO */
10577 + err = dpaa2_dpseci_bind(priv);
10579 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
10583 + /* DPSECI enable */
10584 + err = dpaa2_dpseci_enable(priv);
10586 + dev_err(dev, "dpaa2_dpseci_enable() failed");
10590 + /* register crypto algorithms the device supports */
10591 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
10592 + struct caam_skcipher_alg *t_alg = driver_algs + i;
10593 + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
10595 + /* Skip DES algorithms if not supported by device */
10596 + if (!priv->sec_attr.des_acc_num &&
10597 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
10598 + (alg_sel == OP_ALG_ALGSEL_DES)))
10601 + /* Skip AES algorithms if not supported by device */
10602 + if (!priv->sec_attr.aes_acc_num &&
10603 + (alg_sel == OP_ALG_ALGSEL_AES))
10606 + /* Skip CHACHA20 algorithms if not supported by device */
10607 + if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
10608 + !priv->sec_attr.ccha_acc_num)
10611 + t_alg->caam.dev = dev;
10612 + caam_skcipher_alg_init(t_alg);
10614 + err = crypto_register_skcipher(&t_alg->skcipher);
10616 + dev_warn(dev, "%s alg registration failed: %d\n",
10617 + t_alg->skcipher.base.cra_driver_name, err);
10621 + t_alg->registered = true;
10622 + registered = true;
10625 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
10626 + struct caam_aead_alg *t_alg = driver_aeads + i;
10627 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
10628 + OP_ALG_ALGSEL_MASK;
10629 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
10630 + OP_ALG_ALGSEL_MASK;
10632 + /* Skip DES algorithms if not supported by device */
10633 + if (!priv->sec_attr.des_acc_num &&
10634 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
10635 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
10638 + /* Skip AES algorithms if not supported by device */
10639 + if (!priv->sec_attr.aes_acc_num &&
10640 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
10643 + /* Skip CHACHA20 algorithms if not supported by device */
10644 + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
10645 + !priv->sec_attr.ccha_acc_num)
10648 + /* Skip POLY1305 algorithms if not supported by device */
10649 + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
10650 + !priv->sec_attr.ptha_acc_num)
10654 + * Skip algorithms requiring message digests
10655 + * if MD not supported by device.
10657 + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
10658 + !priv->sec_attr.md_acc_num)
10661 + t_alg->caam.dev = dev;
10662 + caam_aead_alg_init(t_alg);
10664 + err = crypto_register_aead(&t_alg->aead);
10666 + dev_warn(dev, "%s alg registration failed: %d\n",
10667 + t_alg->aead.base.cra_driver_name, err);
10671 + t_alg->registered = true;
10672 + registered = true;
10675 + dev_info(dev, "algorithms registered in /proc/crypto\n");
10677 + /* register hash algorithms the device supports */
10678 + INIT_LIST_HEAD(&hash_list);
10681 + * Skip registration of any hashing algorithms if MD block
10682 + * is not present.
10684 + if (!priv->sec_attr.md_acc_num)
10687 + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
10688 + struct caam_hash_alg *t_alg;
10689 + struct caam_hash_template *alg = driver_hash + i;
10691 + /* register hmac version */
10692 + t_alg = caam_hash_alloc(dev, alg, true);
10693 + if (IS_ERR(t_alg)) {
10694 + err = PTR_ERR(t_alg);
10695 + dev_warn(dev, "%s hash alg allocation failed: %d\n",
10696 + alg->driver_name, err);
10700 + err = crypto_register_ahash(&t_alg->ahash_alg);
10702 + dev_warn(dev, "%s alg registration failed: %d\n",
10703 + t_alg->ahash_alg.halg.base.cra_driver_name,
10707 + list_add_tail(&t_alg->entry, &hash_list);
10710 + /* register unkeyed version */
10711 + t_alg = caam_hash_alloc(dev, alg, false);
10712 + if (IS_ERR(t_alg)) {
10713 + err = PTR_ERR(t_alg);
10714 + dev_warn(dev, "%s alg allocation failed: %d\n",
10715 + alg->driver_name, err);
10719 + err = crypto_register_ahash(&t_alg->ahash_alg);
10721 + dev_warn(dev, "%s alg registration failed: %d\n",
10722 + t_alg->ahash_alg.halg.base.cra_driver_name,
10726 + list_add_tail(&t_alg->entry, &hash_list);
10729 + if (!list_empty(&hash_list))
10730 + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
10735 + dpaa2_dpseci_dpio_free(priv);
10737 + dpaa2_dpseci_free(priv);
10739 + free_percpu(priv->ppriv);
10741 + fsl_mc_portal_free(priv->mc_io);
10743 + kmem_cache_destroy(qi_cache);
10745 + dev_set_drvdata(dev, NULL);
10750 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
10752 + struct device *dev;
10753 + struct dpaa2_caam_priv *priv;
10756 + dev = &ls_dev->dev;
10757 + priv = dev_get_drvdata(dev);
10759 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
10760 + struct caam_aead_alg *t_alg = driver_aeads + i;
10762 + if (t_alg->registered)
10763 + crypto_unregister_aead(&t_alg->aead);
10766 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
10767 + struct caam_skcipher_alg *t_alg = driver_algs + i;
10769 + if (t_alg->registered)
10770 + crypto_unregister_skcipher(&t_alg->skcipher);
10773 + if (hash_list.next) {
10774 + struct caam_hash_alg *t_hash_alg, *p;
10776 + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
10777 + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
10778 + list_del(&t_hash_alg->entry);
10779 + kfree(t_hash_alg);
10783 + dpaa2_dpseci_disable(priv);
10784 + dpaa2_dpseci_dpio_free(priv);
10785 + dpaa2_dpseci_free(priv);
10786 + free_percpu(priv->ppriv);
10787 + fsl_mc_portal_free(priv->mc_io);
10788 + dev_set_drvdata(dev, NULL);
10789 + kmem_cache_destroy(qi_cache);
10794 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
10796 + struct dpaa2_fd fd;
10797 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
10798 + struct dpaa2_caam_priv_per_cpu *ppriv;
10802 + return PTR_ERR(req);
10804 + if (priv->cscn_mem) {
10805 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
10807 + DMA_FROM_DEVICE);
10808 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
10809 + dev_dbg_ratelimited(dev, "Dropping request\n");
10814 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
10816 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
10817 + DMA_BIDIRECTIONAL);
10818 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
10819 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
10823 + memset(&fd, 0, sizeof(fd));
10824 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
10825 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
10826 + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
10827 + dpaa2_fd_set_flc(&fd, req->flc_dma);
10829 + ppriv = this_cpu_ptr(priv->ppriv);
10830 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
10831 + err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
10833 + if (err != -EBUSY)
10839 + if (unlikely(err)) {
10840 + dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
10844 + return -EINPROGRESS;
10847 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
10848 + DMA_BIDIRECTIONAL);
10851 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
10853 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
10855 + .vendor = FSL_MC_VENDOR_FREESCALE,
10856 + .obj_type = "dpseci",
10858 + { .vendor = 0x0 }
10861 +static struct fsl_mc_driver dpaa2_caam_driver = {
10863 + .name = KBUILD_MODNAME,
10864 + .owner = THIS_MODULE,
10866 + .probe = dpaa2_caam_probe,
10867 + .remove = dpaa2_caam_remove,
10868 + .match_id_table = dpaa2_caam_match_id_table
10871 +MODULE_LICENSE("Dual BSD/GPL");
10872 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
10873 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
10875 +module_fsl_mc_driver(dpaa2_caam_driver);
10877 +++ b/drivers/crypto/caam/caamalg_qi2.h
10880 + * Copyright 2015-2016 Freescale Semiconductor Inc.
10881 + * Copyright 2017 NXP
10883 + * Redistribution and use in source and binary forms, with or without
10884 + * modification, are permitted provided that the following conditions are met:
10885 + * * Redistributions of source code must retain the above copyright
10886 + * notice, this list of conditions and the following disclaimer.
10887 + * * Redistributions in binary form must reproduce the above copyright
10888 + * notice, this list of conditions and the following disclaimer in the
10889 + * documentation and/or other materials provided with the distribution.
10890 + * * Neither the names of the above-listed copyright holders nor the
10891 + * names of any contributors may be used to endorse or promote products
10892 + * derived from this software without specific prior written permission.
10895 + * ALTERNATIVELY, this software may be distributed under the terms of the
10896 + * GNU General Public License ("GPL") as published by the Free Software
10897 + * Foundation, either version 2 of that License or (at your option) any
10900 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10901 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10902 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10903 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10904 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10905 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10906 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10907 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10908 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10909 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10910 + * POSSIBILITY OF SUCH DAMAGE.
10913 +#ifndef _CAAMALG_QI2_H_
10914 +#define _CAAMALG_QI2_H_
10916 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
10917 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
10918 +#include <linux/threads.h>
10919 +#include "dpseci.h"
10920 +#include "desc_constr.h"
10922 +#define DPAA2_CAAM_STORE_SIZE 16
10923 +/* NAPI weight *must* be a multiple of the store size. */
10924 +#define DPAA2_CAAM_NAPI_WEIGHT 64
10926 +/* The congestion entrance threshold was chosen so that on LS2088
10927 + * we support the maximum throughput for the available memory
10929 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
10930 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
10933 + * dpaa2_caam_priv - driver private data
10934 + * @dpseci_id: DPSECI object unique ID
10935 + * @major_ver: DPSECI major version
10936 + * @minor_ver: DPSECI minor version
10937 + * @dpseci_attr: DPSECI attributes
10938 + * @sec_attr: SEC engine attributes
10939 + * @rx_queue_attr: array of Rx queue attributes
10940 + * @tx_queue_attr: array of Tx queue attributes
10941 + * @cscn_mem: pointer to memory region containing the
10942 + * dpaa2_cscn struct; it's size is larger than
10943 + * sizeof(struct dpaa2_cscn) to accommodate alignment
10944 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
10945 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
10946 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
10947 + * @dev: device associated with the DPSECI object
10948 + * @mc_io: pointer to MC portal's I/O object
10949 + * @domain: IOMMU domain
10950 + * @ppriv: per CPU pointers to privata data
10952 +struct dpaa2_caam_priv {
10958 + struct dpseci_attr dpseci_attr;
10959 + struct dpseci_sec_attr sec_attr;
10960 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
10961 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
10966 + void *cscn_mem_aligned;
10967 + dma_addr_t cscn_dma;
10969 + struct device *dev;
10970 + struct fsl_mc_io *mc_io;
10971 + struct iommu_domain *domain;
10973 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
10977 + * dpaa2_caam_priv_per_cpu - per CPU private data
10978 + * @napi: napi structure
10979 + * @net_dev: netdev used by napi
10980 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
10981 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
10982 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
10983 + * @nctx: notification context of response FQ
10984 + * @store: where dequeued frames are stored
10985 + * @priv: backpointer to dpaa2_caam_priv
10986 + * @dpio: portal used for data path operations
10988 +struct dpaa2_caam_priv_per_cpu {
10989 + struct napi_struct napi;
10990 + struct net_device net_dev;
10994 + struct dpaa2_io_notification_ctx nctx;
10995 + struct dpaa2_io_store *store;
10996 + struct dpaa2_caam_priv *priv;
10997 + struct dpaa2_io *dpio;
11001 + * The CAAM QI hardware constructs a job descriptor which points
11002 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
11003 + * When the job descriptor is executed by deco, the whole job
11004 + * descriptor together with shared descriptor gets loaded in
11005 + * deco buffer which is 64 words long (each 32-bit).
11007 + * The job descriptor constructed by QI hardware has layout:
11009 + * HEADER (1 word)
11010 + * Shdesc ptr (1 or 2 words)
11011 + * SEQ_OUT_PTR (1 word)
11012 + * Out ptr (1 or 2 words)
11013 + * Out length (1 word)
11014 + * SEQ_IN_PTR (1 word)
11015 + * In ptr (1 or 2 words)
11016 + * In length (1 word)
11018 + * The shdesc ptr is used to fetch shared descriptor contents
11019 + * into deco buffer.
11021 + * Apart from shdesc contents, the total number of words that
11022 + * get loaded in deco buffer are '8' or '11'. The remaining words
11023 + * in deco buffer can be used for storing shared descriptor.
11025 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
11027 +/* Length of a single buffer in the QI driver memory cache */
11028 +#define CAAM_QI_MEMCACHE_SIZE 512
11031 + * aead_edesc - s/w-extended aead descriptor
11032 + * @src_nents: number of segments in input scatterlist
11033 + * @dst_nents: number of segments in output scatterlist
11034 + * @iv_dma: dma address of iv for checking continuity and link table
11035 + * @qm_sg_bytes: length of dma mapped h/w link table
11036 + * @qm_sg_dma: bus physical mapped address of h/w link table
11037 + * @assoclen: associated data length, in CAAM endianness
11038 + * @assoclen_dma: bus physical mapped address of req->assoclen
11039 + * @sgt: the h/w link table, followed by IV
11041 +struct aead_edesc {
11044 + dma_addr_t iv_dma;
11046 + dma_addr_t qm_sg_dma;
11047 + unsigned int assoclen;
11048 + dma_addr_t assoclen_dma;
11049 + struct dpaa2_sg_entry sgt[0];
11053 + * tls_edesc - s/w-extended tls descriptor
11054 + * @src_nents: number of segments in input scatterlist
11055 + * @dst_nents: number of segments in output scatterlist
11056 + * @iv_dma: dma address of iv for checking continuity and link table
11057 + * @qm_sg_bytes: length of dma mapped h/w link table
11058 + * @qm_sg_dma: bus physical mapped address of h/w link table
11059 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
11060 + * @dst: pointer to output scatterlist, usefull for unmapping
11061 + * @sgt: the h/w link table, followed by IV
11063 +struct tls_edesc {
11066 + dma_addr_t iv_dma;
11068 + dma_addr_t qm_sg_dma;
11069 + struct scatterlist tmp[2];
11070 + struct scatterlist *dst;
11071 + struct dpaa2_sg_entry sgt[0];
11075 + * skcipher_edesc - s/w-extended skcipher descriptor
11076 + * @src_nents: number of segments in input scatterlist
11077 + * @dst_nents: number of segments in output scatterlist
11078 + * @iv_dma: dma address of iv for checking continuity and link table
11079 + * @qm_sg_bytes: length of dma mapped qm_sg space
11080 + * @qm_sg_dma: I/O virtual address of h/w link table
11081 + * @sgt: the h/w link table, followed by IV
11083 +struct skcipher_edesc {
11086 + dma_addr_t iv_dma;
11088 + dma_addr_t qm_sg_dma;
11089 + struct dpaa2_sg_entry sgt[0];
11093 + * ahash_edesc - s/w-extended ahash descriptor
11094 + * @dst_dma: I/O virtual address of req->result
11095 + * @qm_sg_dma: I/O virtual address of h/w link table
11096 + * @src_nents: number of segments in input scatterlist
11097 + * @qm_sg_bytes: length of dma mapped qm_sg space
11098 + * @sgt: pointer to h/w link table
11100 +struct ahash_edesc {
11101 + dma_addr_t dst_dma;
11102 + dma_addr_t qm_sg_dma;
11105 + struct dpaa2_sg_entry sgt[0];
11109 + * caam_flc - Flow Context (FLC)
11110 + * @flc: Flow Context options
11111 + * @sh_desc: Shared Descriptor
11115 + u32 sh_desc[MAX_SDLEN];
11116 +} ____cacheline_aligned;
11125 + * caam_request - the request structure the driver application should fill while
11126 + * submitting a job to driver.
11127 + * @fd_flt: Frame list table defining input and output
11128 + * fd_flt[0] - FLE pointing to output buffer
11129 + * fd_flt[1] - FLE pointing to input buffer
11130 + * @fd_flt_dma: DMA address for the frame list table
11131 + * @flc: Flow Context
11132 + * @flc_dma: I/O virtual address of Flow Context
11133 + * @cbk: Callback function to invoke when job is completed
11134 + * @ctx: arbit context attached with request by the application
11135 + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
11137 +struct caam_request {
11138 + struct dpaa2_fl_entry fd_flt[2];
11139 + dma_addr_t fd_flt_dma;
11140 + struct caam_flc *flc;
11141 + dma_addr_t flc_dma;
11142 + void (*cbk)(void *ctx, u32 err);
11148 + * dpaa2_caam_enqueue() - enqueue a crypto request
11149 + * @dev: device associated with the DPSECI object
11150 + * @req: pointer to caam_request
11152 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
11154 +#endif /* _CAAMALG_QI2_H_ */
11155 --- a/drivers/crypto/caam/caamhash.c
11156 +++ b/drivers/crypto/caam/caamhash.c
11158 * caam - Freescale FSL CAAM support for ahash functions of crypto API
11160 * Copyright 2011 Freescale Semiconductor, Inc.
11161 + * Copyright 2018 NXP
11163 * Based on caamalg.c crypto API driver.
11167 #include "sg_sw_sec4.h"
11168 #include "key_gen.h"
11169 +#include "caamhash_desc.h"
11171 #define CAAM_CRA_PRIORITY 3000
11174 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
11175 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
11177 -/* length of descriptors text */
11178 -#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
11179 -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
11180 -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11181 -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11182 -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11183 -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11185 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
11186 CAAM_MAX_HASH_KEY_SIZE)
11187 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
11188 @@ -107,6 +101,7 @@ struct caam_hash_ctx {
11189 dma_addr_t sh_desc_update_first_dma;
11190 dma_addr_t sh_desc_fin_dma;
11191 dma_addr_t sh_desc_digest_dma;
11192 + enum dma_data_direction dir;
11193 struct device *jrdev;
11194 u8 key[CAAM_MAX_HASH_KEY_SIZE];
11196 @@ -218,7 +213,7 @@ static inline int buf_map_to_sec4_sg(str
11199 /* Map state->caam_ctx, and add it to link table */
11200 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
11201 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
11202 struct caam_hash_state *state, int ctx_len,
11203 struct sec4_sg_entry *sec4_sg, u32 flag)
11205 @@ -234,68 +229,22 @@ static inline int ctx_map_to_sec4_sg(u32
11210 - * For ahash update, final and finup (import_ctx = true)
11211 - * import context, read and write to seqout
11212 - * For ahash firsts and digest (import_ctx = false)
11213 - * read and write to seqout
11215 -static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
11216 - struct caam_hash_ctx *ctx, bool import_ctx)
11218 - u32 op = ctx->adata.algtype;
11219 - u32 *skip_key_load;
11221 - init_sh_desc(desc, HDR_SHARE_SERIAL);
11223 - /* Append key if it has been set; ahash update excluded */
11224 - if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
11225 - /* Skip key loading if already shared */
11226 - skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11229 - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
11230 - ctx->adata.keylen, CLASS_2 |
11231 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
11233 - set_jump_tgt_here(desc, skip_key_load);
11235 - op |= OP_ALG_AAI_HMAC_PRECOMP;
11238 - /* If needed, import context from software */
11240 - append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
11241 - LDST_SRCDST_BYTE_CONTEXT);
11243 - /* Class 2 operation */
11244 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
11247 - * Load from buf and/or src and write to req->result or state->context
11248 - * Calculate remaining bytes to read
11250 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11251 - /* Read remaining bytes */
11252 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
11253 - FIFOLD_TYPE_MSG | KEY_VLF);
11254 - /* Store class2 context bytes */
11255 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
11256 - LDST_SRCDST_BYTE_CONTEXT);
11259 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
11261 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11262 int digestsize = crypto_ahash_digestsize(ahash);
11263 struct device *jrdev = ctx->jrdev;
11264 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
11267 + ctx->adata.key_virt = ctx->key;
11269 /* ahash_update shared descriptor */
11270 desc = ctx->sh_desc_update;
11271 - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
11272 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
11273 + ctx->ctx_len, true, ctrlpriv->era);
11274 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
11275 - desc_bytes(desc), DMA_TO_DEVICE);
11276 + desc_bytes(desc), ctx->dir);
11278 print_hex_dump(KERN_ERR,
11279 "ahash update shdesc@"__stringify(__LINE__)": ",
11280 @@ -304,9 +253,10 @@ static int ahash_set_sh_desc(struct cryp
11282 /* ahash_update_first shared descriptor */
11283 desc = ctx->sh_desc_update_first;
11284 - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
11285 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
11286 + ctx->ctx_len, false, ctrlpriv->era);
11287 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
11288 - desc_bytes(desc), DMA_TO_DEVICE);
11289 + desc_bytes(desc), ctx->dir);
11291 print_hex_dump(KERN_ERR,
11292 "ahash update first shdesc@"__stringify(__LINE__)": ",
11293 @@ -315,9 +265,10 @@ static int ahash_set_sh_desc(struct cryp
11295 /* ahash_final shared descriptor */
11296 desc = ctx->sh_desc_fin;
11297 - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
11298 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
11299 + ctx->ctx_len, true, ctrlpriv->era);
11300 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
11301 - desc_bytes(desc), DMA_TO_DEVICE);
11302 + desc_bytes(desc), ctx->dir);
11304 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
11305 DUMP_PREFIX_ADDRESS, 16, 4, desc,
11306 @@ -326,9 +277,10 @@ static int ahash_set_sh_desc(struct cryp
11308 /* ahash_digest shared descriptor */
11309 desc = ctx->sh_desc_digest;
11310 - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
11311 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
11312 + ctx->ctx_len, false, ctrlpriv->era);
11313 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
11314 - desc_bytes(desc), DMA_TO_DEVICE);
11315 + desc_bytes(desc), ctx->dir);
11317 print_hex_dump(KERN_ERR,
11318 "ahash digest shdesc@"__stringify(__LINE__)": ",
11319 @@ -421,6 +373,7 @@ static int ahash_setkey(struct crypto_ah
11320 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11321 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
11322 int digestsize = crypto_ahash_digestsize(ahash);
11323 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
11325 u8 *hashed_key = NULL;
11327 @@ -441,16 +394,26 @@ static int ahash_setkey(struct crypto_ah
11331 - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
11332 - CAAM_MAX_HASH_KEY_SIZE);
11334 - goto bad_free_key;
11336 + * If DKP is supported, use it in the shared descriptor to generate
11339 + if (ctrlpriv->era >= 6) {
11340 + ctx->adata.key_inline = true;
11341 + ctx->adata.keylen = keylen;
11342 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
11343 + OP_ALG_ALGSEL_MASK);
11346 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
11347 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
11348 - ctx->adata.keylen_pad, 1);
11350 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
11351 + goto bad_free_key;
11353 + memcpy(ctx->key, key, keylen);
11355 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
11356 + keylen, CAAM_MAX_HASH_KEY_SIZE);
11358 + goto bad_free_key;
11362 return ahash_set_sh_desc(ahash);
11363 @@ -773,7 +736,7 @@ static int ahash_update_ctx(struct ahash
11364 edesc->src_nents = src_nents;
11365 edesc->sec4_sg_bytes = sec4_sg_bytes;
11367 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11368 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11369 edesc->sec4_sg, DMA_BIDIRECTIONAL);
11372 @@ -871,9 +834,8 @@ static int ahash_final_ctx(struct ahash_
11373 desc = edesc->hw_desc;
11375 edesc->sec4_sg_bytes = sec4_sg_bytes;
11376 - edesc->src_nents = 0;
11378 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11379 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11380 edesc->sec4_sg, DMA_TO_DEVICE);
11383 @@ -967,7 +929,7 @@ static int ahash_finup_ctx(struct ahash_
11385 edesc->src_nents = src_nents;
11387 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11388 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11389 edesc->sec4_sg, DMA_TO_DEVICE);
11392 @@ -1126,7 +1088,6 @@ static int ahash_final_no_ctx(struct aha
11393 dev_err(jrdev, "unable to map dst\n");
11396 - edesc->src_nents = 0;
11399 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
11400 @@ -1208,7 +1169,6 @@ static int ahash_update_no_ctx(struct ah
11402 edesc->src_nents = src_nents;
11403 edesc->sec4_sg_bytes = sec4_sg_bytes;
11404 - edesc->dst_dma = 0;
11406 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
11408 @@ -1420,7 +1380,6 @@ static int ahash_update_first(struct aha
11411 edesc->src_nents = src_nents;
11412 - edesc->dst_dma = 0;
11414 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
11416 @@ -1722,6 +1681,7 @@ static int caam_hash_cra_init(struct cry
11418 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
11419 dma_addr_t dma_addr;
11420 + struct caam_drv_private *priv;
11423 * Get a Job ring from Job Ring driver to ensure in-order
11424 @@ -1733,10 +1693,13 @@ static int caam_hash_cra_init(struct cry
11425 return PTR_ERR(ctx->jrdev);
11428 + priv = dev_get_drvdata(ctx->jrdev->parent);
11429 + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
11431 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
11432 offsetof(struct caam_hash_ctx,
11433 sh_desc_update_dma),
11434 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
11435 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
11436 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
11437 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
11438 caam_jr_free(ctx->jrdev);
11439 @@ -1771,11 +1734,11 @@ static void caam_hash_cra_exit(struct cr
11440 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
11441 offsetof(struct caam_hash_ctx,
11442 sh_desc_update_dma),
11443 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
11444 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
11445 caam_jr_free(ctx->jrdev);
11448 -static void __exit caam_algapi_hash_exit(void)
11449 +void caam_algapi_hash_exit(void)
11451 struct caam_hash_alg *t_alg, *n;
11453 @@ -1834,56 +1797,38 @@ caam_hash_alloc(struct caam_hash_templat
11457 -static int __init caam_algapi_hash_init(void)
11458 +int caam_algapi_hash_init(struct device *ctrldev)
11460 - struct device_node *dev_node;
11461 - struct platform_device *pdev;
11462 - struct device *ctrldev;
11463 int i = 0, err = 0;
11464 - struct caam_drv_private *priv;
11465 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11466 unsigned int md_limit = SHA512_DIGEST_SIZE;
11467 - u32 cha_inst, cha_vid;
11469 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11471 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11476 - pdev = of_find_device_by_node(dev_node);
11478 - of_node_put(dev_node);
11482 - ctrldev = &pdev->dev;
11483 - priv = dev_get_drvdata(ctrldev);
11484 - of_node_put(dev_node);
11487 - * If priv is NULL, it's probably because the caam driver wasn't
11488 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11492 + u32 md_inst, md_vid;
11495 * Register crypto algorithms the device supports. First, identify
11496 * presence and attributes of MD block.
11498 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
11499 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
11500 + if (priv->era < 10) {
11501 + md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
11502 + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
11503 + md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11504 + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
11506 + u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
11508 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
11509 + md_inst = mdha & CHA_VER_NUM_MASK;
11513 * Skip registration of any hashing algorithms if MD block
11516 - if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
11520 /* Limit digest size based on LP256 */
11521 - if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
11522 + if (md_vid == CHA_VER_VID_MD_LP256)
11523 md_limit = SHA256_DIGEST_SIZE;
11525 INIT_LIST_HEAD(&hash_list);
11526 @@ -1934,10 +1879,3 @@ static int __init caam_algapi_hash_init(
11531 -module_init(caam_algapi_hash_init);
11532 -module_exit(caam_algapi_hash_exit);
11534 -MODULE_LICENSE("GPL");
11535 -MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
11536 -MODULE_AUTHOR("Freescale Semiconductor - NMG");
11538 +++ b/drivers/crypto/caam/caamhash_desc.c
11541 + * Shared descriptors for ahash algorithms
11543 + * Copyright 2017 NXP
11545 + * Redistribution and use in source and binary forms, with or without
11546 + * modification, are permitted provided that the following conditions are met:
11547 + * * Redistributions of source code must retain the above copyright
11548 + * notice, this list of conditions and the following disclaimer.
11549 + * * Redistributions in binary form must reproduce the above copyright
11550 + * notice, this list of conditions and the following disclaimer in the
11551 + * documentation and/or other materials provided with the distribution.
11552 + * * Neither the names of the above-listed copyright holders nor the
11553 + * names of any contributors may be used to endorse or promote products
11554 + * derived from this software without specific prior written permission.
11557 + * ALTERNATIVELY, this software may be distributed under the terms of the
11558 + * GNU General Public License ("GPL") as published by the Free Software
11559 + * Foundation, either version 2 of that License or (at your option) any
11562 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11563 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11564 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11565 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11566 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11567 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11568 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11569 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11570 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11571 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11572 + * POSSIBILITY OF SUCH DAMAGE.
11575 +#include "compat.h"
11576 +#include "desc_constr.h"
11577 +#include "caamhash_desc.h"
11580 + * cnstr_shdsc_ahash - ahash shared descriptor
11581 + * @desc: pointer to buffer used for descriptor construction
11582 + * @adata: pointer to authentication transform definitions.
11583 + * A split key is required for SEC Era < 6; the size of the split key
11584 + * is specified in this case.
11585 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
11586 + * SHA256, SHA384, SHA512}.
11587 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
11588 + * @digestsize: algorithm's digest size
11589 + * @ctx_len: size of Context Register
11590 + * @import_ctx: true if previous Context Register needs to be restored
11591 + * must be true for ahash update and final
11592 + * must be false for for ahash first and digest
11595 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
11596 + int digestsize, int ctx_len, bool import_ctx, int era)
11598 + u32 op = adata->algtype;
11600 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11602 + /* Append key if it has been set; ahash update excluded */
11603 + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
11604 + u32 *skip_key_load;
11606 + /* Skip key loading if already shared */
11607 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11611 + append_key_as_imm(desc, adata->key_virt,
11612 + adata->keylen_pad,
11613 + adata->keylen, CLASS_2 |
11614 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
11616 + append_proto_dkp(desc, adata);
11618 + set_jump_tgt_here(desc, skip_key_load);
11620 + op |= OP_ALG_AAI_HMAC_PRECOMP;
11623 + /* If needed, import context from software */
11625 + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
11626 + LDST_SRCDST_BYTE_CONTEXT);
11628 + /* Class 2 operation */
11629 + append_operation(desc, op | state | OP_ALG_ENCRYPT);
11632 + * Load from buf and/or src and write to req->result or state->context
11633 + * Calculate remaining bytes to read
11635 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11636 + /* Read remaining bytes */
11637 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
11638 + FIFOLD_TYPE_MSG | KEY_VLF);
11639 + /* Store class2 context bytes */
11640 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
11641 + LDST_SRCDST_BYTE_CONTEXT);
11643 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
11645 +MODULE_LICENSE("Dual BSD/GPL");
11646 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
11647 +MODULE_AUTHOR("NXP Semiconductors");
11649 +++ b/drivers/crypto/caam/caamhash_desc.h
11652 + * Shared descriptors for ahash algorithms
11654 + * Copyright 2017 NXP
11656 + * Redistribution and use in source and binary forms, with or without
11657 + * modification, are permitted provided that the following conditions are met:
11658 + * * Redistributions of source code must retain the above copyright
11659 + * notice, this list of conditions and the following disclaimer.
11660 + * * Redistributions in binary form must reproduce the above copyright
11661 + * notice, this list of conditions and the following disclaimer in the
11662 + * documentation and/or other materials provided with the distribution.
11663 + * * Neither the names of the above-listed copyright holders nor the
11664 + * names of any contributors may be used to endorse or promote products
11665 + * derived from this software without specific prior written permission.
11668 + * ALTERNATIVELY, this software may be distributed under the terms of the
11669 + * GNU General Public License ("GPL") as published by the Free Software
11670 + * Foundation, either version 2 of that License or (at your option) any
11673 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11674 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11675 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11676 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11677 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11678 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11679 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11680 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11681 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11682 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11683 + * POSSIBILITY OF SUCH DAMAGE.
11686 +#ifndef _CAAMHASH_DESC_H_
11687 +#define _CAAMHASH_DESC_H_
11689 +/* length of descriptors text */
11690 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
11691 +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
11692 +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11693 +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11694 +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11696 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
11697 + int digestsize, int ctx_len, bool import_ctx, int era);
11699 +#endif /* _CAAMHASH_DESC_H_ */
11700 --- a/drivers/crypto/caam/caampkc.c
11701 +++ b/drivers/crypto/caam/caampkc.c
11703 * caam - Freescale FSL CAAM support for Public Key Cryptography
11705 * Copyright 2016 Freescale Semiconductor, Inc.
11706 + * Copyright 2018 NXP
11708 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
11709 * all the desired key parameters, input and output pointers.
11710 @@ -1017,46 +1018,22 @@ static struct akcipher_alg caam_rsa = {
11713 /* Public Key Cryptography module initialization handler */
11714 -static int __init caam_pkc_init(void)
11715 +int caam_pkc_init(struct device *ctrldev)
11717 - struct device_node *dev_node;
11718 - struct platform_device *pdev;
11719 - struct device *ctrldev;
11720 - struct caam_drv_private *priv;
11721 - u32 cha_inst, pk_inst;
11722 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11726 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11728 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11733 - pdev = of_find_device_by_node(dev_node);
11735 - of_node_put(dev_node);
11739 - ctrldev = &pdev->dev;
11740 - priv = dev_get_drvdata(ctrldev);
11741 - of_node_put(dev_node);
11744 - * If priv is NULL, it's probably because the caam driver wasn't
11745 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11750 /* Determine public key hardware accelerator presence. */
11751 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
11752 - pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
11753 + if (priv->era < 10)
11754 + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11755 + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
11757 + pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
11759 /* Do not register algorithms if PKHA is not present. */
11764 err = crypto_register_akcipher(&caam_rsa);
11766 @@ -1068,14 +1045,7 @@ static int __init caam_pkc_init(void)
11770 -static void __exit caam_pkc_exit(void)
11771 +void caam_pkc_exit(void)
11773 crypto_unregister_akcipher(&caam_rsa);
11776 -module_init(caam_pkc_init);
11777 -module_exit(caam_pkc_exit);
11779 -MODULE_LICENSE("Dual BSD/GPL");
11780 -MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
11781 -MODULE_AUTHOR("Freescale Semiconductor");
11782 --- a/drivers/crypto/caam/caamrng.c
11783 +++ b/drivers/crypto/caam/caamrng.c
11785 * caam - Freescale FSL CAAM support for hw_random
11787 * Copyright 2011 Freescale Semiconductor, Inc.
11788 + * Copyright 2018 NXP
11790 * Based on caamalg.c crypto API driver.
11792 @@ -294,49 +295,29 @@ static struct hwrng caam_rng = {
11796 -static void __exit caam_rng_exit(void)
11797 +void caam_rng_exit(void)
11799 caam_jr_free(rng_ctx->jrdev);
11800 hwrng_unregister(&caam_rng);
11804 -static int __init caam_rng_init(void)
11805 +int caam_rng_init(struct device *ctrldev)
11807 struct device *dev;
11808 - struct device_node *dev_node;
11809 - struct platform_device *pdev;
11810 - struct device *ctrldev;
11811 - struct caam_drv_private *priv;
11813 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11816 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11818 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11823 - pdev = of_find_device_by_node(dev_node);
11825 - of_node_put(dev_node);
11829 - ctrldev = &pdev->dev;
11830 - priv = dev_get_drvdata(ctrldev);
11831 - of_node_put(dev_node);
11834 - * If priv is NULL, it's probably because the caam driver wasn't
11835 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11840 /* Check for an instantiated RNG before registration */
11841 - if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
11843 + if (priv->era < 10)
11844 + rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11845 + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
11847 + rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
11852 dev = caam_jr_alloc();
11854 @@ -361,10 +342,3 @@ free_caam_alloc:
11859 -module_init(caam_rng_init);
11860 -module_exit(caam_rng_exit);
11862 -MODULE_LICENSE("GPL");
11863 -MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
11864 -MODULE_AUTHOR("Freescale Semiconductor - NMG");
11865 --- a/drivers/crypto/caam/compat.h
11866 +++ b/drivers/crypto/caam/compat.h
11868 #include <linux/of_platform.h>
11869 #include <linux/dma-mapping.h>
11870 #include <linux/io.h>
11871 +#include <linux/iommu.h>
11872 #include <linux/spinlock.h>
11873 #include <linux/rtnetlink.h>
11874 #include <linux/in.h>
11875 @@ -34,10 +35,13 @@
11876 #include <crypto/des.h>
11877 #include <crypto/sha.h>
11878 #include <crypto/md5.h>
11879 +#include <crypto/chacha20.h>
11880 +#include <crypto/poly1305.h>
11881 #include <crypto/internal/aead.h>
11882 #include <crypto/authenc.h>
11883 #include <crypto/akcipher.h>
11884 #include <crypto/scatterwalk.h>
11885 +#include <crypto/skcipher.h>
11886 #include <crypto/internal/skcipher.h>
11887 #include <crypto/internal/hash.h>
11888 #include <crypto/internal/rsa.h>
11889 --- a/drivers/crypto/caam/ctrl.c
11890 +++ b/drivers/crypto/caam/ctrl.c
11892 * Controller-level driver, kernel property detection, initialization
11894 * Copyright 2008-2012 Freescale Semiconductor, Inc.
11895 + * Copyright 2018 NXP
11898 #include <linux/device.h>
11899 @@ -16,17 +17,15 @@
11900 #include "desc_constr.h"
11903 -bool caam_little_end;
11904 -EXPORT_SYMBOL(caam_little_end);
11906 EXPORT_SYMBOL(caam_dpaa2);
11908 -EXPORT_SYMBOL(caam_imx);
11910 #ifdef CONFIG_CAAM_QI
11914 +static struct platform_device *caam_dma_dev;
11917 * i.MX targets tend to have clock control subsystems that can
11918 * enable/disable clocking to our device.
11919 @@ -105,7 +104,7 @@ static inline int run_descriptor_deco0(s
11920 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
11921 struct caam_deco __iomem *deco = ctrlpriv->deco;
11922 unsigned int timeout = 100000;
11923 - u32 deco_dbg_reg, flags;
11924 + u32 deco_dbg_reg, deco_state, flags;
11928 @@ -148,13 +147,22 @@ static inline int run_descriptor_deco0(s
11929 timeout = 10000000;
11931 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
11933 + if (ctrlpriv->era < 10)
11934 + deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
11935 + DESC_DBG_DECO_STAT_SHIFT;
11937 + deco_state = (rd_reg32(&deco->dbg_exec) &
11938 + DESC_DER_DECO_STAT_MASK) >>
11939 + DESC_DER_DECO_STAT_SHIFT;
11942 * If an error occured in the descriptor, then
11943 * the DECO status field will be set to 0x0D
11945 - if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
11946 - DESC_DBG_DECO_STAT_HOST_ERR)
11947 + if (deco_state == DECO_STAT_HOST_ERR)
11951 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
11953 @@ -316,15 +324,15 @@ static int caam_remove(struct platform_d
11954 of_platform_depopulate(ctrldev);
11956 #ifdef CONFIG_CAAM_QI
11957 - if (ctrlpriv->qidev)
11958 - caam_qi_shutdown(ctrlpriv->qidev);
11959 + if (ctrlpriv->qi_init)
11960 + caam_qi_shutdown(ctrldev);
11964 * De-initialize RNG state handles initialized by this driver.
11965 - * In case of DPAA 2.x, RNG is managed by MC firmware.
11966 + * In case of SoCs with Management Complex, RNG is managed by MC f/w.
11968 - if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
11969 + if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
11970 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
11972 /* Shut down debug views */
11973 @@ -332,6 +340,9 @@ static int caam_remove(struct platform_d
11974 debugfs_remove_recursive(ctrlpriv->dfs_root);
11977 + if (caam_dma_dev)
11978 + platform_device_unregister(caam_dma_dev);
11980 /* Unmap controller region */
11983 @@ -433,6 +444,10 @@ static int caam_probe(struct platform_de
11984 {.family = "Freescale i.MX"},
11987 + static struct platform_device_info caam_dma_pdev_info = {
11988 + .name = "caam-dma",
11989 + .id = PLATFORM_DEVID_NONE
11991 struct device *dev;
11992 struct device_node *nprop, *np;
11993 struct caam_ctrl __iomem *ctrl;
11994 @@ -442,7 +457,7 @@ static int caam_probe(struct platform_de
11995 struct caam_perfmon *perfmon;
11997 u32 scfgr, comp_params;
12001 int BLOCK_OFFSET = 0;
12003 @@ -454,15 +469,54 @@ static int caam_probe(struct platform_de
12004 dev_set_drvdata(dev, ctrlpriv);
12005 nprop = pdev->dev.of_node;
12007 + /* Get configuration properties from device tree */
12008 + /* First, get register page */
12009 + ctrl = of_iomap(nprop, 0);
12011 + dev_err(dev, "caam: of_iomap() failed\n");
12015 + caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
12016 + (CSTA_PLEND | CSTA_ALT_PLEND));
12017 caam_imx = (bool)soc_device_match(imx_soc);
12019 + comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
12020 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
12021 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
12023 +#ifdef CONFIG_CAAM_QI
12024 + /* If (DPAA 1.x) QI present, check whether dependencies are available */
12025 + if (ctrlpriv->qi_present && !caam_dpaa2) {
12026 + ret = qman_is_probed();
12028 + ret = -EPROBE_DEFER;
12029 + goto iounmap_ctrl;
12030 + } else if (ret < 0) {
12031 + dev_err(dev, "failing probe due to qman probe error\n");
12033 + goto iounmap_ctrl;
12036 + ret = qman_portals_probed();
12038 + ret = -EPROBE_DEFER;
12039 + goto iounmap_ctrl;
12040 + } else if (ret < 0) {
12041 + dev_err(dev, "failing probe due to qman portals probe error\n");
12043 + goto iounmap_ctrl;
12048 /* Enable clocking */
12049 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
12051 ret = PTR_ERR(clk);
12052 dev_err(&pdev->dev,
12053 "can't identify CAAM ipg clk: %d\n", ret);
12055 + goto iounmap_ctrl;
12057 ctrlpriv->caam_ipg = clk;
12059 @@ -471,7 +525,7 @@ static int caam_probe(struct platform_de
12060 ret = PTR_ERR(clk);
12061 dev_err(&pdev->dev,
12062 "can't identify CAAM mem clk: %d\n", ret);
12064 + goto iounmap_ctrl;
12066 ctrlpriv->caam_mem = clk;
12068 @@ -480,7 +534,7 @@ static int caam_probe(struct platform_de
12069 ret = PTR_ERR(clk);
12070 dev_err(&pdev->dev,
12071 "can't identify CAAM aclk clk: %d\n", ret);
12073 + goto iounmap_ctrl;
12075 ctrlpriv->caam_aclk = clk;
12077 @@ -490,7 +544,7 @@ static int caam_probe(struct platform_de
12078 ret = PTR_ERR(clk);
12079 dev_err(&pdev->dev,
12080 "can't identify CAAM emi_slow clk: %d\n", ret);
12082 + goto iounmap_ctrl;
12084 ctrlpriv->caam_emi_slow = clk;
12086 @@ -498,7 +552,7 @@ static int caam_probe(struct platform_de
12087 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
12089 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
12091 + goto iounmap_ctrl;
12094 ret = clk_prepare_enable(ctrlpriv->caam_mem);
12095 @@ -523,25 +577,10 @@ static int caam_probe(struct platform_de
12099 - /* Get configuration properties from device tree */
12100 - /* First, get register page */
12101 - ctrl = of_iomap(nprop, 0);
12102 - if (ctrl == NULL) {
12103 - dev_err(dev, "caam: of_iomap() failed\n");
12105 - goto disable_caam_emi_slow;
12108 - caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
12109 - (CSTA_PLEND | CSTA_ALT_PLEND));
12111 - /* Finding the page size for using the CTPR_MS register */
12112 - comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
12113 - pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
12115 /* Allocating the BLOCK_OFFSET based on the supported page size on
12118 + pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
12120 BLOCK_OFFSET = PG_SIZE_4K;
12122 @@ -563,11 +602,14 @@ static int caam_probe(struct platform_de
12124 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
12125 * long pointers in master configuration register.
12126 - * In case of DPAA 2.x, Management Complex firmware performs
12127 + * In case of SoCs with Management Complex, MC f/w performs
12128 * the configuration.
12130 - caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
12132 + np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
12133 + ctrlpriv->mc_en = !!np;
12136 + if (!ctrlpriv->mc_en)
12137 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
12138 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
12139 MCFGR_WDENABLE | MCFGR_LARGE_BURST |
12140 @@ -612,14 +654,11 @@ static int caam_probe(struct platform_de
12143 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
12144 - goto iounmap_ctrl;
12145 + goto disable_caam_emi_slow;
12148 - ret = of_platform_populate(nprop, caam_match, NULL, dev);
12150 - dev_err(dev, "JR platform devices creation error\n");
12151 - goto iounmap_ctrl;
12153 + ctrlpriv->era = caam_get_era();
12154 + ctrlpriv->domain = iommu_get_domain_for_dev(dev);
12156 #ifdef CONFIG_DEBUG_FS
12158 @@ -633,21 +672,7 @@ static int caam_probe(struct platform_de
12159 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
12163 - for_each_available_child_of_node(nprop, np)
12164 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
12165 - of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
12166 - ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
12167 - ((__force uint8_t *)ctrl +
12168 - (ring + JR_BLOCK_NUMBER) *
12171 - ctrlpriv->total_jobrs++;
12175 /* Check to see if (DPAA 1.x) QI present. If so, enable */
12176 - ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
12177 if (ctrlpriv->qi_present && !caam_dpaa2) {
12178 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
12179 ((__force uint8_t *)ctrl +
12180 @@ -664,6 +689,25 @@ static int caam_probe(struct platform_de
12184 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
12186 + dev_err(dev, "JR platform devices creation error\n");
12187 + goto shutdown_qi;
12191 + for_each_available_child_of_node(nprop, np)
12192 + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
12193 + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
12194 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
12195 + ((__force uint8_t *)ctrl +
12196 + (ring + JR_BLOCK_NUMBER) *
12199 + ctrlpriv->total_jobrs++;
12203 /* If no QI and no rings specified, quit and go home */
12204 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
12205 dev_err(dev, "no queues configured, terminating\n");
12206 @@ -671,15 +715,29 @@ static int caam_probe(struct platform_de
12210 - cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
12211 + caam_dma_pdev_info.parent = dev;
12212 + caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
12213 + caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
12214 + if (IS_ERR(caam_dma_dev)) {
12215 + dev_err(dev, "Unable to create and register caam-dma dev\n");
12216 + caam_dma_dev = 0;
12218 + set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
12221 + if (ctrlpriv->era < 10)
12222 + rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
12223 + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
12225 + rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
12226 + CHA_VER_VID_SHIFT;
12229 * If SEC has RNG version >= 4 and RNG state handle has not been
12230 * already instantiated, do RNG instantiation
12231 - * In case of DPAA 2.x, RNG is managed by MC firmware.
12232 + * In case of SoCs with Management Complex, RNG is managed by MC f/w.
12234 - if (!caam_dpaa2 &&
12235 - (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
12236 + if (!ctrlpriv->mc_en && rng_vid >= 4) {
12237 ctrlpriv->rng4_sh_init =
12238 rd_reg32(&ctrl->r4tst[0].rdsta);
12240 @@ -746,10 +804,9 @@ static int caam_probe(struct platform_de
12242 /* Report "alive" for developer to see */
12243 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
12245 - dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
12246 - ctrlpriv->total_jobrs, ctrlpriv->qi_present,
12247 - caam_dpaa2 ? "yes" : "no");
12249 + dev_info(dev, "job rings = %d, qi = %d\n",
12250 + ctrlpriv->total_jobrs, ctrlpriv->qi_present);
12252 #ifdef CONFIG_DEBUG_FS
12253 debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
12254 @@ -816,8 +873,11 @@ caam_remove:
12261 +#ifdef CONFIG_CAAM_QI
12262 + if (ctrlpriv->qi_init)
12263 + caam_qi_shutdown(dev);
12265 disable_caam_emi_slow:
12266 if (ctrlpriv->caam_emi_slow)
12267 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
12268 @@ -827,6 +887,8 @@ disable_caam_mem:
12269 clk_disable_unprepare(ctrlpriv->caam_mem);
12271 clk_disable_unprepare(ctrlpriv->caam_ipg);
12277 --- a/drivers/crypto/caam/desc.h
12278 +++ b/drivers/crypto/caam/desc.h
12280 * Definitions to support CAAM descriptor instruction generation
12282 * Copyright 2008-2011 Freescale Semiconductor, Inc.
12283 + * Copyright 2018 NXP
12288 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
12289 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
12290 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
12291 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
12292 #define CMD_STORE (0x0a << CMD_SHIFT)
12293 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
12294 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
12295 @@ -242,6 +244,7 @@
12296 #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
12297 #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
12298 #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
12299 +#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
12300 #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
12302 /* Offset in source/destination */
12303 @@ -284,6 +287,12 @@
12304 #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
12305 #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
12307 +/* Special Length definitions when dst=sm, nfifo-{sm,m} */
12308 +#define LDLEN_MATH0 0
12309 +#define LDLEN_MATH1 1
12310 +#define LDLEN_MATH2 2
12311 +#define LDLEN_MATH3 3
12314 * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
12315 * Command Constructs
12316 @@ -355,6 +364,7 @@
12317 #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
12318 #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
12319 #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
12320 +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
12322 /* Other types. Need to OR in last/flush bits as desired */
12323 #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
12324 @@ -408,6 +418,7 @@
12325 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
12326 #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
12327 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
12328 +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
12329 #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
12332 @@ -444,6 +455,18 @@
12333 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
12334 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
12335 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
12336 +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
12337 +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
12338 +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
12339 +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
12340 +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
12341 +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
12342 +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
12343 +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
12344 +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
12345 +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
12346 +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
12347 +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
12349 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
12350 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
12351 @@ -1093,6 +1116,22 @@
12352 /* MacSec protinfos */
12353 #define OP_PCL_MACSEC 0x0001
12355 +/* Derived Key Protocol (DKP) Protinfo */
12356 +#define OP_PCL_DKP_SRC_SHIFT 14
12357 +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
12358 +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
12359 +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
12360 +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
12361 +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
12362 +#define OP_PCL_DKP_DST_SHIFT 12
12363 +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
12364 +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
12365 +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
12366 +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
12367 +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
12368 +#define OP_PCL_DKP_KEY_SHIFT 0
12369 +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
12371 /* PKI unidirectional protocol protinfo bits */
12372 #define OP_PCL_PKPROT_TEST 0x0008
12373 #define OP_PCL_PKPROT_DECRYPT 0x0004
12374 @@ -1105,6 +1144,12 @@
12375 #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
12376 #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
12378 +/* version register fields */
12379 +#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
12380 +#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
12381 +#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
12382 +#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
12384 #define OP_ALG_ALGSEL_SHIFT 16
12385 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
12386 #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
12387 @@ -1124,6 +1169,8 @@
12388 #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
12389 #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
12390 #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
12391 +#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
12392 +#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
12394 #define OP_ALG_AAI_SHIFT 4
12395 #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
12396 @@ -1171,6 +1218,11 @@
12397 #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
12398 #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
12400 +/* Chacha20 AAI set */
12401 +#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
12402 +#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
12403 +#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
12405 /* hmac/smac AAI set */
12406 #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
12407 #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
12408 @@ -1359,6 +1411,7 @@
12409 #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
12410 #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
12411 #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
12412 +#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
12414 #define MOVE_DEST_SHIFT 16
12415 #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
12416 @@ -1385,6 +1438,10 @@
12418 #define MOVELEN_MRSEL_SHIFT 0
12419 #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
12420 +#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
12421 +#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
12422 +#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
12423 +#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
12426 * MATH Command Constructs
12427 @@ -1440,10 +1497,11 @@
12428 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
12429 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
12430 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
12431 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
12432 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
12433 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
12434 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
12435 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
12436 +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
12438 /* Destination selectors */
12439 #define MATH_DEST_SHIFT 8
12440 @@ -1452,6 +1510,7 @@
12441 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
12442 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
12443 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
12444 +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
12445 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
12446 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
12447 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
12448 @@ -1560,6 +1619,7 @@
12449 #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
12450 #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
12451 #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
12452 +#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
12453 #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
12454 #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
12456 @@ -1624,4 +1684,31 @@
12457 /* Frame Descriptor Command for Replacement Job Descriptor */
12458 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
12460 +/* CHA Control Register bits */
12461 +#define CCTRL_RESET_CHA_ALL 0x1
12462 +#define CCTRL_RESET_CHA_AESA 0x2
12463 +#define CCTRL_RESET_CHA_DESA 0x4
12464 +#define CCTRL_RESET_CHA_AFHA 0x8
12465 +#define CCTRL_RESET_CHA_KFHA 0x10
12466 +#define CCTRL_RESET_CHA_SF8A 0x20
12467 +#define CCTRL_RESET_CHA_PKHA 0x40
12468 +#define CCTRL_RESET_CHA_MDHA 0x80
12469 +#define CCTRL_RESET_CHA_CRCA 0x100
12470 +#define CCTRL_RESET_CHA_RNG 0x200
12471 +#define CCTRL_RESET_CHA_SF9A 0x400
12472 +#define CCTRL_RESET_CHA_ZUCE 0x800
12473 +#define CCTRL_RESET_CHA_ZUCA 0x1000
12474 +#define CCTRL_UNLOAD_PK_A0 0x10000
12475 +#define CCTRL_UNLOAD_PK_A1 0x20000
12476 +#define CCTRL_UNLOAD_PK_A2 0x40000
12477 +#define CCTRL_UNLOAD_PK_A3 0x80000
12478 +#define CCTRL_UNLOAD_PK_B0 0x100000
12479 +#define CCTRL_UNLOAD_PK_B1 0x200000
12480 +#define CCTRL_UNLOAD_PK_B2 0x400000
12481 +#define CCTRL_UNLOAD_PK_B3 0x800000
12482 +#define CCTRL_UNLOAD_PK_N 0x1000000
12483 +#define CCTRL_UNLOAD_PK_A 0x4000000
12484 +#define CCTRL_UNLOAD_PK_B 0x8000000
12485 +#define CCTRL_UNLOAD_SBOX 0x10000000
12487 #endif /* DESC_H */
12488 --- a/drivers/crypto/caam/desc_constr.h
12489 +++ b/drivers/crypto/caam/desc_constr.h
12490 @@ -109,7 +109,7 @@ static inline void init_job_desc_shared(
12491 append_ptr(desc, ptr);
12494 -static inline void append_data(u32 * const desc, void *data, int len)
12495 +static inline void append_data(u32 * const desc, const void *data, int len)
12497 u32 *offset = desc_end(desc);
12499 @@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen
12500 append_cmd(desc, len);
12503 -static inline void append_cmd_data(u32 * const desc, void *data, int len,
12504 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
12507 append_cmd(desc, command | IMMEDIATE | len);
12508 @@ -189,6 +189,8 @@ static inline u32 *append_##cmd(u32 * co
12510 APPEND_CMD_RET(jump, JUMP)
12511 APPEND_CMD_RET(move, MOVE)
12512 +APPEND_CMD_RET(moveb, MOVEB)
12513 +APPEND_CMD_RET(move_len, MOVE_LEN)
12515 static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
12517 @@ -271,7 +273,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
12518 APPEND_SEQ_PTR_INTLEN(out, OUT)
12520 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
12521 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
12522 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
12523 unsigned int len, u32 options) \
12526 @@ -312,7 +314,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
12527 * from length of immediate data provided, e.g., split keys
12529 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
12530 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
12531 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
12532 unsigned int data_len, \
12533 unsigned int len, u32 options) \
12535 @@ -327,7 +329,11 @@ static inline void append_##cmd##_imm_##
12539 - append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
12540 + if (options & LDST_LEN_MASK) \
12541 + append_cmd(desc, CMD_##op | IMMEDIATE | options); \
12543 + append_cmd(desc, CMD_##op | IMMEDIATE | options | \
12545 append_cmd(desc, immediate); \
12547 APPEND_CMD_RAW_IMM(load, LOAD, u32);
12548 @@ -452,7 +458,7 @@ struct alginfo {
12549 unsigned int keylen_pad;
12551 dma_addr_t key_dma;
12553 + const void *key_virt;
12557 @@ -496,4 +502,45 @@ static inline int desc_inline_query(unsi
12558 return (rem_bytes >= 0) ? 0 : -1;
12562 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
12563 + * @desc: pointer to buffer used for descriptor construction
12564 + * @adata: pointer to authentication transform definitions.
12565 + * keylen should be the length of initial key, while keylen_pad
12566 + * the length of the derived (split) key.
12567 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
12568 + * SHA256, SHA384, SHA512}.
12570 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
12575 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
12576 + * to OP_PCLID_DKP_{MD5, SHA*}
12578 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
12579 + (0x20 << OP_ALG_ALGSEL_SHIFT);
12581 + if (adata->key_inline) {
12584 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
12585 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
12587 + append_data(desc, adata->key_virt, adata->keylen);
12589 + /* Reserve space in descriptor buffer for the derived key */
12590 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
12591 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
12593 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
12595 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
12596 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
12598 + append_ptr(desc, adata->key_dma);
12602 #endif /* DESC_CONSTR_H */
12604 +++ b/drivers/crypto/caam/dpseci.c
12607 + * Copyright 2013-2016 Freescale Semiconductor Inc.
12608 + * Copyright 2017 NXP
12610 + * Redistribution and use in source and binary forms, with or without
12611 + * modification, are permitted provided that the following conditions are met:
12612 + * * Redistributions of source code must retain the above copyright
12613 + * notice, this list of conditions and the following disclaimer.
12614 + * * Redistributions in binary form must reproduce the above copyright
12615 + * notice, this list of conditions and the following disclaimer in the
12616 + * documentation and/or other materials provided with the distribution.
12617 + * * Neither the names of the above-listed copyright holders nor the
12618 + * names of any contributors may be used to endorse or promote products
12619 + * derived from this software without specific prior written permission.
12622 + * ALTERNATIVELY, this software may be distributed under the terms of the
12623 + * GNU General Public License ("GPL") as published by the Free Software
12624 + * Foundation, either version 2 of that License or (at your option) any
12627 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
12628 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
12629 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
12630 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
12631 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
12632 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
12633 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
12634 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
12635 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
12636 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
12637 + * POSSIBILITY OF SUCH DAMAGE.
12640 +#include <linux/fsl/mc.h>
12641 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
12642 +#include "dpseci.h"
12643 +#include "dpseci_cmd.h"
12646 + * dpseci_open() - Open a control session for the specified object
12647 + * @mc_io: Pointer to MC portal's I/O object
12648 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12649 + * @dpseci_id: DPSECI unique ID
12650 + * @token: Returned token; use in subsequent API calls
12652 + * This function can be used to open a control session for an already created
12653 + * object; an object may have been declared in the DPL or by calling the
12654 + * dpseci_create() function.
12655 + * This function returns a unique authentication token, associated with the
12656 + * specific object ID and the specific MC portal; this token must be used in all
12657 + * subsequent commands for this specific object.
12659 + * Return: '0' on success, error code otherwise
12661 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
12664 + struct fsl_mc_command cmd = { 0 };
12665 + struct dpseci_cmd_open *cmd_params;
12668 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
12671 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
12672 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
12673 + err = mc_send_command(mc_io, &cmd);
12677 + *token = mc_cmd_hdr_read_token(&cmd);
12683 + * dpseci_close() - Close the control session of the object
12684 + * @mc_io: Pointer to MC portal's I/O object
12685 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12686 + * @token: Token of DPSECI object
12688 + * After this function is called, no further operations are allowed on the
12689 + * object without opening a new control session.
12691 + * Return: '0' on success, error code otherwise
12693 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12695 + struct fsl_mc_command cmd = { 0 };
12697 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
12700 + return mc_send_command(mc_io, &cmd);
12704 + * dpseci_create() - Create the DPSECI object
12705 + * @mc_io: Pointer to MC portal's I/O object
12706 + * @dprc_token: Parent container token; '0' for default container
12707 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12708 + * @cfg: Configuration structure
12709 + * @obj_id: returned object id
12711 + * Create the DPSECI object, allocate required resources and perform required
12712 + * initialization.
12714 + * The object can be created either by declaring it in the DPL file, or by
12715 + * calling this function.
12717 + * The function accepts an authentication token of a parent container that this
12718 + * object should be assigned to. The token can be '0' so the object will be
12719 + * assigned to the default container.
12720 + * The newly created object can be opened with the returned object id and using
12721 + * the container's associated tokens and MC portals.
12723 + * Return: '0' on success, error code otherwise
12725 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
12726 + const struct dpseci_cfg *cfg, u32 *obj_id)
12728 + struct fsl_mc_command cmd = { 0 };
12729 + struct dpseci_cmd_create *cmd_params;
12732 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
12735 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
12736 + for (i = 0; i < 8; i++)
12737 + cmd_params->priorities[i] = cfg->priorities[i];
12738 + for (i = 0; i < 8; i++)
12739 + cmd_params->priorities2[i] = cfg->priorities[8 + i];
12740 + cmd_params->num_tx_queues = cfg->num_tx_queues;
12741 + cmd_params->num_rx_queues = cfg->num_rx_queues;
12742 + cmd_params->options = cpu_to_le32(cfg->options);
12743 + err = mc_send_command(mc_io, &cmd);
12747 + *obj_id = mc_cmd_read_object_id(&cmd);
12753 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
12754 + * @mc_io: Pointer to MC portal's I/O object
12755 + * @dprc_token: Parent container token; '0' for default container
12756 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12757 + * @object_id: The object id; it must be a valid id within the container that
12758 + * created this object
12760 + * The function accepts the authentication token of the parent container that
12761 + * created the object (not the one that currently owns the object). The object
12762 + * is searched within parent using the provided 'object_id'.
12763 + * All tokens to the object must be closed before calling destroy.
12765 + * Return: '0' on success, error code otherwise
12767 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
12770 + struct fsl_mc_command cmd = { 0 };
12771 + struct dpseci_cmd_destroy *cmd_params;
12773 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
12776 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
12777 + cmd_params->object_id = cpu_to_le32(object_id);
12779 + return mc_send_command(mc_io, &cmd);
12783 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
12784 + * @mc_io: Pointer to MC portal's I/O object
12785 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12786 + * @token: Token of DPSECI object
12788 + * Return: '0' on success, error code otherwise
12790 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12792 + struct fsl_mc_command cmd = { 0 };
12794 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
12797 + return mc_send_command(mc_io, &cmd);
12801 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
12802 + * @mc_io: Pointer to MC portal's I/O object
12803 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12804 + * @token: Token of DPSECI object
12806 + * Return: '0' on success, error code otherwise
12808 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12810 + struct fsl_mc_command cmd = { 0 };
12812 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
12816 + return mc_send_command(mc_io, &cmd);
12820 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
12821 + * @mc_io: Pointer to MC portal's I/O object
12822 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12823 + * @token: Token of DPSECI object
12824 + * @en: Returns '1' if object is enabled; '0' otherwise
12826 + * Return: '0' on success, error code otherwise
12828 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12831 + struct fsl_mc_command cmd = { 0 };
12832 + struct dpseci_rsp_is_enabled *rsp_params;
12835 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
12838 + err = mc_send_command(mc_io, &cmd);
12842 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
12843 + *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
12849 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
12850 + * @mc_io: Pointer to MC portal's I/O object
12851 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12852 + * @token: Token of DPSECI object
12854 + * Return: '0' on success, error code otherwise
12856 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12858 + struct fsl_mc_command cmd = { 0 };
12860 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
12864 + return mc_send_command(mc_io, &cmd);
12868 + * dpseci_get_irq_enable() - Get overall interrupt state
12869 + * @mc_io: Pointer to MC portal's I/O object
12870 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12871 + * @token: Token of DPSECI object
12872 + * @irq_index: The interrupt index to configure
12873 + * @en: Returned Interrupt state - enable = 1, disable = 0
12875 + * Return: '0' on success, error code otherwise
12877 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12878 + u8 irq_index, u8 *en)
12880 + struct fsl_mc_command cmd = { 0 };
12881 + struct dpseci_cmd_irq_enable *cmd_params;
12882 + struct dpseci_rsp_get_irq_enable *rsp_params;
12885 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
12888 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
12889 + cmd_params->irq_index = irq_index;
12890 + err = mc_send_command(mc_io, &cmd);
12894 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
12895 + *en = rsp_params->enable_state;
12901 + * dpseci_set_irq_enable() - Set overall interrupt state.
12902 + * @mc_io: Pointer to MC portal's I/O object
12903 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12904 + * @token: Token of DPSECI object
12905 + * @irq_index: The interrupt index to configure
12906 + * @en: Interrupt state - enable = 1, disable = 0
12908 + * Allows GPP software to control when interrupts are generated.
12909 + * Each interrupt can have up to 32 causes. The enable/disable control's the
12910 + * overall interrupt state. If the interrupt is disabled no causes will cause
12913 + * Return: '0' on success, error code otherwise
12915 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12916 + u8 irq_index, u8 en)
12918 + struct fsl_mc_command cmd = { 0 };
12919 + struct dpseci_cmd_irq_enable *cmd_params;
12921 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
12924 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
12925 + cmd_params->irq_index = irq_index;
12926 + cmd_params->enable_state = en;
12928 + return mc_send_command(mc_io, &cmd);
12932 + * dpseci_get_irq_mask() - Get interrupt mask.
12933 + * @mc_io: Pointer to MC portal's I/O object
12934 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12935 + * @token: Token of DPSECI object
12936 + * @irq_index: The interrupt index to configure
12937 + * @mask: Returned event mask to trigger interrupt
12939 + * Every interrupt can have up to 32 causes and the interrupt model supports
12940 + * masking/unmasking each cause independently.
12942 + * Return: '0' on success, error code otherwise
12944 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12945 + u8 irq_index, u32 *mask)
12947 + struct fsl_mc_command cmd = { 0 };
12948 + struct dpseci_cmd_irq_mask *cmd_params;
12951 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
12954 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
12955 + cmd_params->irq_index = irq_index;
12956 + err = mc_send_command(mc_io, &cmd);
12960 + *mask = le32_to_cpu(cmd_params->mask);
12966 + * dpseci_set_irq_mask() - Set interrupt mask.
12967 + * @mc_io: Pointer to MC portal's I/O object
12968 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12969 + * @token: Token of DPSECI object
12970 + * @irq_index: The interrupt index to configure
12971 + * @mask: event mask to trigger interrupt;
12973 + * 0 = ignore event
12974 + * 1 = consider event for asserting IRQ
12976 + * Every interrupt can have up to 32 causes and the interrupt model supports
12977 + * masking/unmasking each cause independently
12979 + * Return: '0' on success, error code otherwise
12981 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12982 + u8 irq_index, u32 mask)
12984 + struct fsl_mc_command cmd = { 0 };
12985 + struct dpseci_cmd_irq_mask *cmd_params;
12987 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
12990 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
12991 + cmd_params->mask = cpu_to_le32(mask);
12992 + cmd_params->irq_index = irq_index;
12994 + return mc_send_command(mc_io, &cmd);
12998 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
12999 + * @mc_io: Pointer to MC portal's I/O object
13000 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13001 + * @token: Token of DPSECI object
13002 + * @irq_index: The interrupt index to configure
13003 + * @status: Returned interrupts status - one bit per cause:
13004 + * 0 = no interrupt pending
13005 + * 1 = interrupt pending
13007 + * Return: '0' on success, error code otherwise
13009 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13010 + u8 irq_index, u32 *status)
13012 + struct fsl_mc_command cmd = { 0 };
13013 + struct dpseci_cmd_irq_status *cmd_params;
13016 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
13019 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
13020 + cmd_params->status = cpu_to_le32(*status);
13021 + cmd_params->irq_index = irq_index;
13022 + err = mc_send_command(mc_io, &cmd);
13026 + *status = le32_to_cpu(cmd_params->status);
13032 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
13033 + * @mc_io: Pointer to MC portal's I/O object
13034 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13035 + * @token: Token of DPSECI object
13036 + * @irq_index: The interrupt index to configure
13037 + * @status: bits to clear (W1C) - one bit per cause:
13038 + * 0 = don't change
13039 + * 1 = clear status bit
13041 + * Return: '0' on success, error code otherwise
13043 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13044 + u8 irq_index, u32 status)
13046 + struct fsl_mc_command cmd = { 0 };
13047 + struct dpseci_cmd_irq_status *cmd_params;
13049 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
13052 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
13053 + cmd_params->status = cpu_to_le32(status);
13054 + cmd_params->irq_index = irq_index;
13056 + return mc_send_command(mc_io, &cmd);
13060 + * dpseci_get_attributes() - Retrieve DPSECI attributes
13061 + * @mc_io: Pointer to MC portal's I/O object
13062 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13063 + * @token: Token of DPSECI object
13064 + * @attr: Returned object's attributes
13066 + * Return: '0' on success, error code otherwise
13068 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13069 + struct dpseci_attr *attr)
13071 + struct fsl_mc_command cmd = { 0 };
13072 + struct dpseci_rsp_get_attributes *rsp_params;
13075 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
13078 + err = mc_send_command(mc_io, &cmd);
13082 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
13083 + attr->id = le32_to_cpu(rsp_params->id);
13084 + attr->num_tx_queues = rsp_params->num_tx_queues;
13085 + attr->num_rx_queues = rsp_params->num_rx_queues;
13086 + attr->options = le32_to_cpu(rsp_params->options);
13092 + * dpseci_set_rx_queue() - Set Rx queue configuration
13093 + * @mc_io: Pointer to MC portal's I/O object
13094 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13095 + * @token: Token of DPSECI object
13096 + * @queue: Select the queue relative to number of priorities configured at
13097 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
13098 + * Rx queues identically.
13099 + * @cfg: Rx queue configuration
13101 + * Return: '0' on success, error code otherwise
13103 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13104 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
13106 + struct fsl_mc_command cmd = { 0 };
13107 + struct dpseci_cmd_queue *cmd_params;
13109 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
13112 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13113 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
13114 + cmd_params->priority = cfg->dest_cfg.priority;
13115 + cmd_params->queue = queue;
13116 + dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
13117 + cfg->dest_cfg.dest_type);
13118 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
13119 + cmd_params->options = cpu_to_le32(cfg->options);
13120 + dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
13121 + cfg->order_preservation_en);
13123 + return mc_send_command(mc_io, &cmd);
13127 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
13128 + * @mc_io: Pointer to MC portal's I/O object
13129 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13130 + * @token: Token of DPSECI object
13131 + * @queue: Select the queue relative to number of priorities configured at
13132 + * DPSECI creation
13133 + * @attr: Returned Rx queue attributes
13135 + * Return: '0' on success, error code otherwise
13137 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13138 + u8 queue, struct dpseci_rx_queue_attr *attr)
13140 + struct fsl_mc_command cmd = { 0 };
13141 + struct dpseci_cmd_queue *cmd_params;
13144 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
13147 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13148 + cmd_params->queue = queue;
13149 + err = mc_send_command(mc_io, &cmd);
13153 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
13154 + attr->dest_cfg.priority = cmd_params->priority;
13155 + attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
13157 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
13158 + attr->fqid = le32_to_cpu(cmd_params->fqid);
13159 + attr->order_preservation_en =
13160 + dpseci_get_field(cmd_params->order_preservation_en,
13161 + ORDER_PRESERVATION);
13167 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
13168 + * @mc_io: Pointer to MC portal's I/O object
13169 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13170 + * @token: Token of DPSECI object
13171 + * @queue: Select the queue relative to number of priorities configured at
13172 + * DPSECI creation
13173 + * @attr: Returned Tx queue attributes
13175 + * Return: '0' on success, error code otherwise
13177 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13178 + u8 queue, struct dpseci_tx_queue_attr *attr)
13180 + struct fsl_mc_command cmd = { 0 };
13181 + struct dpseci_cmd_queue *cmd_params;
13182 + struct dpseci_rsp_get_tx_queue *rsp_params;
13185 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
13188 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13189 + cmd_params->queue = queue;
13190 + err = mc_send_command(mc_io, &cmd);
13194 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
13195 + attr->fqid = le32_to_cpu(rsp_params->fqid);
13196 + attr->priority = rsp_params->priority;
13202 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
13203 + * @mc_io: Pointer to MC portal's I/O object
13204 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13205 + * @token: Token of DPSECI object
13206 + * @attr: Returned SEC attributes
13208 + * Return: '0' on success, error code otherwise
13210 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13211 + struct dpseci_sec_attr *attr)
13213 + struct fsl_mc_command cmd = { 0 };
13214 + struct dpseci_rsp_get_sec_attr *rsp_params;
13217 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
13220 + err = mc_send_command(mc_io, &cmd);
13224 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
13225 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
13226 + attr->major_rev = rsp_params->major_rev;
13227 + attr->minor_rev = rsp_params->minor_rev;
13228 + attr->era = rsp_params->era;
13229 + attr->deco_num = rsp_params->deco_num;
13230 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
13231 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
13232 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
13233 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
13234 + attr->crc_acc_num = rsp_params->crc_acc_num;
13235 + attr->pk_acc_num = rsp_params->pk_acc_num;
13236 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
13237 + attr->rng_acc_num = rsp_params->rng_acc_num;
13238 + attr->md_acc_num = rsp_params->md_acc_num;
13239 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
13240 + attr->des_acc_num = rsp_params->des_acc_num;
13241 + attr->aes_acc_num = rsp_params->aes_acc_num;
13242 + attr->ccha_acc_num = rsp_params->ccha_acc_num;
13243 + attr->ptha_acc_num = rsp_params->ptha_acc_num;
13249 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
13250 + * @mc_io: Pointer to MC portal's I/O object
13251 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13252 + * @token: Token of DPSECI object
13253 + * @counters: Returned SEC counters
13255 + * Return: '0' on success, error code otherwise
13257 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13258 + struct dpseci_sec_counters *counters)
13260 + struct fsl_mc_command cmd = { 0 };
13261 + struct dpseci_rsp_get_sec_counters *rsp_params;
13264 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
13267 + err = mc_send_command(mc_io, &cmd);
13271 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
13272 + counters->dequeued_requests =
13273 + le64_to_cpu(rsp_params->dequeued_requests);
13274 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
13275 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
13276 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
13277 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
13278 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
13279 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
13285 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
13286 + * @mc_io: Pointer to MC portal's I/O object
13287 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13288 + * @major_ver: Major version of data path sec API
13289 + * @minor_ver: Minor version of data path sec API
13291 + * Return: '0' on success, error code otherwise
13293 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
13294 + u16 *major_ver, u16 *minor_ver)
13296 + struct fsl_mc_command cmd = { 0 };
13297 + struct dpseci_rsp_get_api_version *rsp_params;
13300 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
13302 + err = mc_send_command(mc_io, &cmd);
13306 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
13307 + *major_ver = le16_to_cpu(rsp_params->major);
13308 + *minor_ver = le16_to_cpu(rsp_params->minor);
13314 + * dpseci_set_opr() - Set Order Restoration configuration
13315 + * @mc_io: Pointer to MC portal's I/O object
13316 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13317 + * @token: Token of DPSECI object
13318 + * @index: The queue index
13319 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
13321 + * @cfg: Configuration options for the OPR
13323 + * Return: '0' on success, error code otherwise
13325 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13326 + u8 options, struct opr_cfg *cfg)
13328 + struct fsl_mc_command cmd = { 0 };
13329 + struct dpseci_cmd_opr *cmd_params;
13331 + cmd.header = mc_encode_cmd_header(
13332 + DPSECI_CMDID_SET_OPR,
13335 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
13336 + cmd_params->index = index;
13337 + cmd_params->options = options;
13338 + cmd_params->oloe = cfg->oloe;
13339 + cmd_params->oeane = cfg->oeane;
13340 + cmd_params->olws = cfg->olws;
13341 + cmd_params->oa = cfg->oa;
13342 + cmd_params->oprrws = cfg->oprrws;
13344 + return mc_send_command(mc_io, &cmd);
13348 + * dpseci_get_opr() - Retrieve Order Restoration config and query
13349 + * @mc_io: Pointer to MC portal's I/O object
13350 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13351 + * @token: Token of DPSECI object
13352 + * @index: The queue index
13353 + * @cfg: Returned OPR configuration
13354 + * @qry: Returned OPR query
13356 + * Return: '0' on success, error code otherwise
13358 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13359 + struct opr_cfg *cfg, struct opr_qry *qry)
13361 + struct fsl_mc_command cmd = { 0 };
13362 + struct dpseci_cmd_opr *cmd_params;
13363 + struct dpseci_rsp_get_opr *rsp_params;
13366 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
13369 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
13370 + cmd_params->index = index;
13371 + err = mc_send_command(mc_io, &cmd);
13375 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
13376 + qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
13377 + qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
13378 + cfg->oloe = rsp_params->oloe;
13379 + cfg->oeane = rsp_params->oeane;
13380 + cfg->olws = rsp_params->olws;
13381 + cfg->oa = rsp_params->oa;
13382 + cfg->oprrws = rsp_params->oprrws;
13383 + qry->nesn = le16_to_cpu(rsp_params->nesn);
13384 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
13385 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
13386 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
13387 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
13388 + qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
13389 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
13390 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
13391 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
13392 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
13398 + * dpseci_set_congestion_notification() - Set congestion group
13399 + * notification configuration
13400 + * @mc_io: Pointer to MC portal's I/O object
13401 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13402 + * @token: Token of DPSECI object
13403 + * @cfg: congestion notification configuration
13405 + * Return: '0' on success, error code otherwise
13407 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13408 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
13410 + struct fsl_mc_command cmd = { 0 };
13411 + struct dpseci_cmd_congestion_notification *cmd_params;
13413 + cmd.header = mc_encode_cmd_header(
13414 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
13417 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
13418 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
13419 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
13420 + cmd_params->priority = cfg->dest_cfg.priority;
13421 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
13422 + cfg->dest_cfg.dest_type);
13423 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
13424 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
13425 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
13426 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
13427 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
13429 + return mc_send_command(mc_io, &cmd);
13433 + * dpseci_get_congestion_notification() - Get congestion group notification
13435 + * @mc_io: Pointer to MC portal's I/O object
13436 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13437 + * @token: Token of DPSECI object
13438 + * @cfg: congestion notification configuration
13440 + * Return: '0' on success, error code otherwise
13442 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13443 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
13445 + struct fsl_mc_command cmd = { 0 };
13446 + struct dpseci_cmd_congestion_notification *rsp_params;
13449 + cmd.header = mc_encode_cmd_header(
13450 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
13453 + err = mc_send_command(mc_io, &cmd);
13457 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
13458 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
13459 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
13460 + cfg->dest_cfg.priority = rsp_params->priority;
13461 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
13463 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
13464 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
13465 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
13466 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
13467 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
13472 +++ b/drivers/crypto/caam/dpseci.h
13475 + * Copyright 2013-2016 Freescale Semiconductor Inc.
13476 + * Copyright 2017 NXP
13478 + * Redistribution and use in source and binary forms, with or without
13479 + * modification, are permitted provided that the following conditions are met:
13480 + * * Redistributions of source code must retain the above copyright
13481 + * notice, this list of conditions and the following disclaimer.
13482 + * * Redistributions in binary form must reproduce the above copyright
13483 + * notice, this list of conditions and the following disclaimer in the
13484 + * documentation and/or other materials provided with the distribution.
13485 + * * Neither the names of the above-listed copyright holders nor the
13486 + * names of any contributors may be used to endorse or promote products
13487 + * derived from this software without specific prior written permission.
13490 + * ALTERNATIVELY, this software may be distributed under the terms of the
13491 + * GNU General Public License ("GPL") as published by the Free Software
13492 + * Foundation, either version 2 of that License or (at your option) any
13495 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13496 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13497 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13498 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13499 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13500 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13501 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13502 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13503 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13504 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13505 + * POSSIBILITY OF SUCH DAMAGE.
13507 +#ifndef _DPSECI_H_
13508 +#define _DPSECI_H_
13511 + * Data Path SEC Interface API
13512 + * Contains initialization APIs and runtime control APIs for DPSECI
13520 + * General DPSECI macros
13524 + * Maximum number of Tx/Rx queues per DPSECI object
13526 +#define DPSECI_MAX_QUEUE_NUM 16
13529 + * All queues considered; see dpseci_set_rx_queue()
13531 +#define DPSECI_ALL_QUEUES (u8)(-1)
13533 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
13536 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13539 + * Enable the Congestion Group support
13541 +#define DPSECI_OPT_HAS_CG 0x000020
13544 + * Enable the Order Restoration support
13546 +#define DPSECI_OPT_HAS_OPR 0x000040
13549 + * Order Point Records are shared for the entire DPSECI
13551 +#define DPSECI_OPT_OPR_SHARED 0x000080
13554 + * struct dpseci_cfg - Structure representing DPSECI configuration
13555 + * @options: Any combination of the following options:
13556 + * DPSECI_OPT_HAS_CG
13557 + * DPSECI_OPT_HAS_OPR
13558 + * DPSECI_OPT_OPR_SHARED
13559 + * @num_tx_queues: num of queues towards the SEC
13560 + * @num_rx_queues: num of queues back from the SEC
13561 + * @priorities: Priorities for the SEC hardware processing;
13562 + * each place in the array is the priority of the tx queue
13563 + * towards the SEC;
13564 + * valid priorities are configured with values 1-8;
13566 +struct dpseci_cfg {
13568 + u8 num_tx_queues;
13569 + u8 num_rx_queues;
13570 + u8 priorities[DPSECI_MAX_QUEUE_NUM];
13573 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
13574 + const struct dpseci_cfg *cfg, u32 *obj_id);
13576 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
13579 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13581 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13583 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13586 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13588 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13589 + u8 irq_index, u8 *en);
13591 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13592 + u8 irq_index, u8 en);
13594 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13595 + u8 irq_index, u32 *mask);
13597 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13598 + u8 irq_index, u32 mask);
13600 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13601 + u8 irq_index, u32 *status);
13603 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13604 + u8 irq_index, u32 status);
13607 + * struct dpseci_attr - Structure representing DPSECI attributes
13608 + * @id: DPSECI object ID
13609 + * @num_tx_queues: number of queues towards the SEC
13610 + * @num_rx_queues: number of queues back from the SEC
13611 + * @options: any combination of the following options:
13612 + * DPSECI_OPT_HAS_CG
13613 + * DPSECI_OPT_HAS_OPR
13614 + * DPSECI_OPT_OPR_SHARED
13616 +struct dpseci_attr {
13618 + u8 num_tx_queues;
13619 + u8 num_rx_queues;
13623 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13624 + struct dpseci_attr *attr);
13627 + * enum dpseci_dest - DPSECI destination types
13628 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
13629 + * and does not generate FQDAN notifications; user is expected to dequeue
13630 + * from the queue based on polling or other user-defined method
13631 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
13632 + * notifications to the specified DPIO; user is expected to dequeue from
13633 + * the queue only after notification is received
13634 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
13635 + * FQDAN notifications, but is connected to the specified DPCON object;
13636 + * user is expected to dequeue from the DPCON channel
13638 +enum dpseci_dest {
13639 + DPSECI_DEST_NONE = 0,
13640 + DPSECI_DEST_DPIO,
13641 + DPSECI_DEST_DPCON
13645 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
13646 + * @dest_type: Destination type
13647 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
13648 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
13649 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
13650 + * not relevant for 'DPSECI_DEST_NONE' option
13652 +struct dpseci_dest_cfg {
13653 + enum dpseci_dest dest_type;
13659 + * DPSECI queue modification options
13663 + * Select to modify the user's context associated with the queue
13665 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
13668 + * Select to modify the queue's destination
13670 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
13673 + * Select to modify the queue's order preservation
13675 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
13678 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
13679 + * @options: Flags representing the suggested modifications to the queue;
13680 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
13681 + * @order_preservation_en: order preservation configuration for the rx queue
13682 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
13683 + * @user_ctx: User context value provided in the frame descriptor of each
13684 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
13686 + * @dest_cfg: Queue destination parameters; valid only if
13687 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
13689 +struct dpseci_rx_queue_cfg {
13691 + int order_preservation_en;
13693 + struct dpseci_dest_cfg dest_cfg;
13696 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13697 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
13700 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
13701 + * @user_ctx: User context value provided in the frame descriptor of each
13703 + * @order_preservation_en: Status of the order preservation configuration on the
13705 + * @dest_cfg: Queue destination configuration
13706 + * @fqid: Virtual FQID value to be used for dequeue operations
13708 +struct dpseci_rx_queue_attr {
13710 + int order_preservation_en;
13711 + struct dpseci_dest_cfg dest_cfg;
13715 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13716 + u8 queue, struct dpseci_rx_queue_attr *attr);
13719 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
13720 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
13721 + * @priority: SEC hardware processing priority for the queue
13723 +struct dpseci_tx_queue_attr {
13728 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13729 + u8 queue, struct dpseci_tx_queue_attr *attr);
13732 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
13733 + * hardware accelerator
13734 + * @ip_id: ID for SEC
13735 + * @major_rev: Major revision number for SEC
13736 + * @minor_rev: Minor revision number for SEC
13738 + * @deco_num: The number of copies of the DECO that are implemented in this
13740 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
13742 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
13744 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
13745 + * implemented in this version of SEC
13746 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
13747 + * implemented in this version of SEC
13748 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
13749 + * this version of SEC
13750 + * @pk_acc_num: The number of copies of the Public Key module that are
13751 + * implemented in this version of SEC
13752 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
13753 + * implemented in this version of SEC
13754 + * @rng_acc_num: The number of copies of the Random Number Generator that are
13755 + * implemented in this version of SEC
13756 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
13757 + * implemented in this version of SEC
13758 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
13759 + * in this version of SEC
13760 + * @des_acc_num: The number of copies of the DES module that are implemented in
13761 + * this version of SEC
13762 + * @aes_acc_num: The number of copies of the AES module that are implemented in
13763 + * this version of SEC
13764 + * @ccha_acc_num: The number of copies of the ChaCha20 module that are
13765 + * implemented in this version of SEC.
13766 + * @ptha_acc_num: The number of copies of the Poly1305 module that are
13767 + * implemented in this version of SEC.
13769 +struct dpseci_sec_attr {
13775 + u8 zuc_auth_acc_num;
13776 + u8 zuc_enc_acc_num;
13777 + u8 snow_f8_acc_num;
13778 + u8 snow_f9_acc_num;
13781 + u8 kasumi_acc_num;
13791 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13792 + struct dpseci_sec_attr *attr);
13795 + * struct dpseci_sec_counters - Structure representing global SEC counters and
13796 + * not per dpseci counters
13797 + * @dequeued_requests: Number of Requests Dequeued
13798 + * @ob_enc_requests: Number of Outbound Encrypt Requests
13799 + * @ib_dec_requests: Number of Inbound Decrypt Requests
13800 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
13801 + * @ob_prot_bytes: Number of Outbound Bytes Protected
13802 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
13803 + * @ib_valid_bytes: Number of Inbound Bytes Validated
13805 +struct dpseci_sec_counters {
13806 + u64 dequeued_requests;
13807 + u64 ob_enc_requests;
13808 + u64 ib_dec_requests;
13809 + u64 ob_enc_bytes;
13810 + u64 ob_prot_bytes;
13811 + u64 ib_dec_bytes;
13812 + u64 ib_valid_bytes;
13815 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13816 + struct dpseci_sec_counters *counters);
13818 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
13819 + u16 *major_ver, u16 *minor_ver);
13821 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13822 + u8 options, struct opr_cfg *cfg);
13824 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13825 + struct opr_cfg *cfg, struct opr_qry *qry);
13828 + * enum dpseci_congestion_unit - DPSECI congestion units
13829 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
13830 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
13832 +enum dpseci_congestion_unit {
13833 + DPSECI_CONGESTION_UNIT_BYTES = 0,
13834 + DPSECI_CONGESTION_UNIT_FRAMES
13838 + * CSCN message is written to message_iova once entering a
13839 + * congestion state (see 'threshold_entry')
13841 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
13844 + * CSCN message is written to message_iova once exiting a
13845 + * congestion state (see 'threshold_exit')
13847 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
13850 + * CSCN write will attempt to allocate into a cache (coherent write);
13851 + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
13853 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
13856 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
13857 + * DPIO/DPCON's WQ channel once entering a congestion state
13858 + * (see 'threshold_entry')
13860 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
13863 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
13864 + * DPIO/DPCON's WQ channel once exiting a congestion state
13865 + * (see 'threshold_exit')
13867 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
13870 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
13871 + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
13874 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
13877 + * struct dpseci_congestion_notification_cfg - congestion notification
13879 + * @units: units type
13880 + * @threshold_entry: above this threshold we enter a congestion state.
13881 + * set it to '0' to disable it
13882 + * @threshold_exit: below this threshold we exit the congestion state.
13883 + * @message_ctx: The context that will be part of the CSCN message
13884 + * @message_iova: I/O virtual address (must be in DMA-able memory),
13885 + * must be 16B aligned;
13886 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
13887 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
13890 +struct dpseci_congestion_notification_cfg {
13891 + enum dpseci_congestion_unit units;
13892 + u32 threshold_entry;
13893 + u32 threshold_exit;
13895 + u64 message_iova;
13896 + struct dpseci_dest_cfg dest_cfg;
13897 + u16 notification_mode;
13900 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13901 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
13903 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13904 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
13906 +#endif /* _DPSECI_H_ */
13908 +++ b/drivers/crypto/caam/dpseci_cmd.h
13911 + * Copyright 2013-2016 Freescale Semiconductor Inc.
13912 + * Copyright 2017 NXP
13914 + * Redistribution and use in source and binary forms, with or without
13915 + * modification, are permitted provided that the following conditions are met:
13916 + * * Redistributions of source code must retain the above copyright
13917 + * notice, this list of conditions and the following disclaimer.
13918 + * * Redistributions in binary form must reproduce the above copyright
13919 + * notice, this list of conditions and the following disclaimer in the
13920 + * documentation and/or other materials provided with the distribution.
13921 + * * Neither the names of the above-listed copyright holders nor the
13922 + * names of any contributors may be used to endorse or promote products
13923 + * derived from this software without specific prior written permission.
13926 + * ALTERNATIVELY, this software may be distributed under the terms of the
13927 + * GNU General Public License ("GPL") as published by the Free Software
13928 + * Foundation, either version 2 of that License or (at your option) any
13931 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13932 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13933 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13934 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13935 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13936 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13937 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13938 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13939 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13940 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13941 + * POSSIBILITY OF SUCH DAMAGE.
13944 +#ifndef _DPSECI_CMD_H_
13945 +#define _DPSECI_CMD_H_
13947 +/* DPSECI Version */
13948 +#define DPSECI_VER_MAJOR 5
13949 +#define DPSECI_VER_MINOR 3
13951 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
13952 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
13954 +/* Command versioning */
13955 +#define DPSECI_CMD_BASE_VERSION 1
13956 +#define DPSECI_CMD_BASE_VERSION_V2 2
13957 +#define DPSECI_CMD_BASE_VERSION_V3 3
13958 +#define DPSECI_CMD_ID_OFFSET 4
13960 +#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13961 + DPSECI_CMD_BASE_VERSION)
13963 +#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13964 + DPSECI_CMD_BASE_VERSION_V2)
13966 +#define DPSECI_CMD_V3(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13967 + DPSECI_CMD_BASE_VERSION_V3)
13970 +#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
13971 +#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
13972 +#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
13973 +#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
13974 +#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
13976 +#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
13977 +#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
13978 +#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
13979 +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
13980 +#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
13982 +#define DPSECI_CMDID_SET_IRQ_ENABLE DPSECI_CMD_V1(0x012)
13983 +#define DPSECI_CMDID_GET_IRQ_ENABLE DPSECI_CMD_V1(0x013)
13984 +#define DPSECI_CMDID_SET_IRQ_MASK DPSECI_CMD_V1(0x014)
13985 +#define DPSECI_CMDID_GET_IRQ_MASK DPSECI_CMD_V1(0x015)
13986 +#define DPSECI_CMDID_GET_IRQ_STATUS DPSECI_CMD_V1(0x016)
13987 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS DPSECI_CMD_V1(0x017)
13989 +#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
13990 +#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
13991 +#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
13992 +#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
13993 +#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
13994 +#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
13995 +#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
13996 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
13997 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
13999 +/* Macros for accessing command fields smaller than 1 byte */
14000 +#define DPSECI_MASK(field) \
14001 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
14002 + DPSECI_##field##_SHIFT)
14004 +#define dpseci_set_field(var, field, val) \
14005 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
14007 +#define dpseci_get_field(var, field) \
14008 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
14010 +struct dpseci_cmd_open {
14011 + __le32 dpseci_id;
14014 +struct dpseci_cmd_create {
14015 + u8 priorities[8];
14016 + u8 num_tx_queues;
14017 + u8 num_rx_queues;
14021 + u8 priorities2[8];
14024 +struct dpseci_cmd_destroy {
14025 + __le32 object_id;
14028 +#define DPSECI_ENABLE_SHIFT 0
14029 +#define DPSECI_ENABLE_SIZE 1
14031 +struct dpseci_rsp_is_enabled {
14035 +struct dpseci_cmd_irq_enable {
14041 +struct dpseci_rsp_get_irq_enable {
14045 +struct dpseci_cmd_irq_mask {
14050 +struct dpseci_cmd_irq_status {
14055 +struct dpseci_rsp_get_attributes {
14058 + u8 num_tx_queues;
14059 + u8 num_rx_queues;
14064 +#define DPSECI_DEST_TYPE_SHIFT 0
14065 +#define DPSECI_DEST_TYPE_SIZE 4
14067 +#define DPSECI_ORDER_PRESERVATION_SHIFT 0
14068 +#define DPSECI_ORDER_PRESERVATION_SIZE 1
14070 +struct dpseci_cmd_queue {
14081 + u8 order_preservation_en;
14084 +struct dpseci_rsp_get_tx_queue {
14090 +struct dpseci_rsp_get_sec_attr {
14097 + u8 zuc_auth_acc_num;
14098 + u8 zuc_enc_acc_num;
14100 + u8 snow_f8_acc_num;
14101 + u8 snow_f9_acc_num;
14105 + u8 kasumi_acc_num;
14116 +struct dpseci_rsp_get_sec_counters {
14117 + __le64 dequeued_requests;
14118 + __le64 ob_enc_requests;
14119 + __le64 ib_dec_requests;
14120 + __le64 ob_enc_bytes;
14121 + __le64 ob_prot_bytes;
14122 + __le64 ib_dec_bytes;
14123 + __le64 ib_valid_bytes;
14126 +struct dpseci_rsp_get_api_version {
14131 +struct dpseci_cmd_opr {
14143 +#define DPSECI_OPR_RIP_SHIFT 0
14144 +#define DPSECI_OPR_RIP_SIZE 1
14145 +#define DPSECI_OPR_ENABLE_SHIFT 1
14146 +#define DPSECI_OPR_ENABLE_SIZE 1
14147 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 0
14148 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
14149 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 0
14150 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
14152 +struct dpseci_rsp_get_opr {
14180 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
14181 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
14182 +#define DPSECI_CGN_UNITS_SHIFT 4
14183 +#define DPSECI_CGN_UNITS_SIZE 2
14185 +struct dpseci_cmd_congestion_notification {
14187 + __le16 notification_mode;
14190 + __le64 message_iova;
14191 + __le64 message_ctx;
14192 + __le32 threshold_entry;
14193 + __le32 threshold_exit;
14196 +#endif /* _DPSECI_CMD_H_ */
14197 --- a/drivers/crypto/caam/error.c
14198 +++ b/drivers/crypto/caam/error.c
14199 @@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, con
14201 EXPORT_SYMBOL(caam_dump_sg);
14203 +bool caam_little_end;
14204 +EXPORT_SYMBOL(caam_little_end);
14207 +EXPORT_SYMBOL(caam_imx);
14209 static const struct {
14211 const char *error_text;
14212 @@ -108,6 +114,54 @@ static const struct {
14213 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
14216 +static const struct {
14218 + const char *error_text;
14219 +} qi_error_list[] = {
14220 + { 0x1F, "Job terminated by FQ or ICID flush" },
14221 + { 0x20, "FD format error"},
14222 + { 0x21, "FD command format error"},
14223 + { 0x23, "FL format error"},
14224 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
14225 + { 0x30, "Max. buffer size too small"},
14226 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
14227 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
14228 + { 0x33, "Size over/underflow (allocate mode)"},
14229 + { 0x34, "Size over/underflow (reuse mode)"},
14230 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
14231 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
14232 + { 0x41, "SBC frame format not supported (allocate mode)"},
14233 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
14234 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
14235 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
14236 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
14237 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
14238 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
14239 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
14240 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
14241 + { 0x51, "Unsupported IF reuse mode"},
14242 + { 0x52, "Unsupported FL use mode"},
14243 + { 0x53, "Unsupported RJD use mode"},
14244 + { 0x54, "Unsupported inline descriptor use mode"},
14245 + { 0xC0, "Table buffer pool 0 depletion"},
14246 + { 0xC1, "Table buffer pool 1 depletion"},
14247 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
14248 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
14249 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
14250 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
14251 + { 0xD0, "FLC read error"},
14252 + { 0xD1, "FL read error"},
14253 + { 0xD2, "FL write error"},
14254 + { 0xD3, "OF SGT write error"},
14255 + { 0xD4, "PTA read error"},
14256 + { 0xD5, "PTA write error"},
14257 + { 0xD6, "OF SGT F-bit write error"},
14258 + { 0xD7, "ASA write error"},
14259 + { 0xE1, "FLC[ICR]=0 ICID error"},
14260 + { 0xE2, "FLC[ICR]=1 ICID error"},
14261 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
14264 static const char * const cha_id_list[] = {
14267 @@ -236,6 +290,27 @@ static void report_deco_status(struct de
14268 status, error, idx_str, idx, err_str, err_err_code);
14271 +static void report_qi_status(struct device *qidev, const u32 status,
14272 + const char *error)
14274 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
14275 + const char *err_str = "unidentified error value 0x";
14276 + char err_err_code[3] = { 0 };
14279 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
14280 + if (qi_error_list[i].value == err_id)
14283 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
14284 + err_str = qi_error_list[i].error_text;
14286 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
14288 + dev_err(qidev, "%08x: %s: %s%s\n",
14289 + status, error, err_str, err_err_code);
14292 static void report_jr_status(struct device *jrdev, const u32 status,
14295 @@ -250,7 +325,7 @@ static void report_cond_code_status(stru
14296 status, error, __func__);
14299 -void caam_jr_strstatus(struct device *jrdev, u32 status)
14300 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
14302 static const struct stat_src {
14303 void (*report_ssed)(struct device *jrdev, const u32 status,
14304 @@ -262,7 +337,7 @@ void caam_jr_strstatus(struct device *jr
14305 { report_ccb_status, "CCB" },
14306 { report_jump_status, "Jump" },
14307 { report_deco_status, "DECO" },
14308 - { NULL, "Queue Manager Interface" },
14309 + { report_qi_status, "Queue Manager Interface" },
14310 { report_jr_status, "Job Ring" },
14311 { report_cond_code_status, "Condition Code" },
14313 @@ -288,4 +363,4 @@ void caam_jr_strstatus(struct device *jr
14315 dev_err(jrdev, "%d: unknown error source\n", ssrc);
14317 -EXPORT_SYMBOL(caam_jr_strstatus);
14318 +EXPORT_SYMBOL(caam_strstatus);
14319 --- a/drivers/crypto/caam/error.h
14320 +++ b/drivers/crypto/caam/error.h
14322 #ifndef CAAM_ERROR_H
14323 #define CAAM_ERROR_H
14324 #define CAAM_ERROR_STR_MAX 302
14325 -void caam_jr_strstatus(struct device *jrdev, u32 status);
14327 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
14329 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
14330 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
14332 void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
14333 int rowsize, int groupsize, struct scatterlist *sg,
14334 --- a/drivers/crypto/caam/intern.h
14335 +++ b/drivers/crypto/caam/intern.h
14336 @@ -65,10 +65,6 @@ struct caam_drv_private_jr {
14337 * Driver-private storage for a single CAAM block instance
14339 struct caam_drv_private {
14340 -#ifdef CONFIG_CAAM_QI
14341 - struct device *qidev;
14344 /* Physical-presence section */
14345 struct caam_ctrl __iomem *ctrl; /* controller region */
14346 struct caam_deco __iomem *deco; /* DECO/CCB views */
14347 @@ -76,14 +72,21 @@ struct caam_drv_private {
14348 struct caam_queue_if __iomem *qi; /* QI control region */
14349 struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
14351 + struct iommu_domain *domain;
14354 * Detected geometry block. Filled in from device tree if powerpc,
14355 * or from register-based version detection code
14357 u8 total_jobrs; /* Total Job Rings in device */
14358 u8 qi_present; /* Nonzero if QI present in device */
14359 +#ifdef CONFIG_CAAM_QI
14360 + u8 qi_init; /* Nonzero if QI has been initialized */
14362 + u8 mc_en; /* Nonzero if MC f/w is active */
14363 int secvio_irq; /* Security violation interrupt number */
14364 int virt_en; /* Virtualization enabled in CAAM */
14365 + int era; /* CAAM Era (internal HW revision) */
14367 #define RNG4_MAX_HANDLES 2
14369 @@ -108,8 +111,95 @@ struct caam_drv_private {
14373 -void caam_jr_algapi_init(struct device *dev);
14374 -void caam_jr_algapi_remove(struct device *dev);
14375 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
14377 +int caam_algapi_init(struct device *dev);
14378 +void caam_algapi_exit(void);
14382 +static inline int caam_algapi_init(struct device *dev)
14387 +static inline void caam_algapi_exit(void)
14391 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
14393 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
14395 +int caam_algapi_hash_init(struct device *dev);
14396 +void caam_algapi_hash_exit(void);
14400 +static inline int caam_algapi_hash_init(struct device *dev)
14405 +static inline void caam_algapi_hash_exit(void)
14409 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
14411 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
14413 +int caam_pkc_init(struct device *dev);
14414 +void caam_pkc_exit(void);
14418 +static inline int caam_pkc_init(struct device *dev)
14423 +static inline void caam_pkc_exit(void)
14427 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
14429 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
14431 +int caam_rng_init(struct device *dev);
14432 +void caam_rng_exit(void);
14436 +static inline int caam_rng_init(struct device *dev)
14441 +static inline void caam_rng_exit(void)
14445 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
14447 +#ifdef CONFIG_CAAM_QI
14449 +int caam_qi_algapi_init(struct device *dev);
14450 +void caam_qi_algapi_exit(void);
14454 +static inline int caam_qi_algapi_init(struct device *dev)
14459 +static inline void caam_qi_algapi_exit(void)
14463 +#endif /* CONFIG_CAAM_QI */
14465 #ifdef CONFIG_DEBUG_FS
14466 static int caam_debugfs_u64_get(void *data, u64 *val)
14467 --- a/drivers/crypto/caam/jr.c
14468 +++ b/drivers/crypto/caam/jr.c
14469 @@ -23,6 +23,52 @@ struct jr_driver_data {
14471 static struct jr_driver_data driver_data;
14473 +static int jr_driver_probed;
14475 +int caam_jr_driver_probed(void)
14477 + return jr_driver_probed;
14479 +EXPORT_SYMBOL(caam_jr_driver_probed);
14481 +static DEFINE_MUTEX(algs_lock);
14482 +static unsigned int active_devs;
14484 +static void register_algs(struct device *dev)
14486 + mutex_lock(&algs_lock);
14488 + if (++active_devs != 1)
14489 + goto algs_unlock;
14491 + caam_algapi_init(dev);
14492 + caam_algapi_hash_init(dev);
14493 + caam_pkc_init(dev);
14494 + caam_rng_init(dev);
14495 + caam_qi_algapi_init(dev);
14498 + mutex_unlock(&algs_lock);
14501 +static void unregister_algs(void)
14503 + mutex_lock(&algs_lock);
14505 + if (--active_devs != 0)
14506 + goto algs_unlock;
14508 + caam_qi_algapi_exit();
14512 + caam_algapi_hash_exit();
14513 + caam_algapi_exit();
14516 + mutex_unlock(&algs_lock);
14519 static int caam_reset_hw_jr(struct device *dev)
14521 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
14522 @@ -108,6 +154,9 @@ static int caam_jr_remove(struct platfor
14526 + /* Unregister JR-based RNG & crypto algorithms */
14527 + unregister_algs();
14529 /* Remove the node from Physical JobR list maintained by driver */
14530 spin_lock(&driver_data.jr_alloc_lock);
14531 list_del(&jrpriv->list_node);
14532 @@ -119,6 +168,8 @@ static int caam_jr_remove(struct platfor
14533 dev_err(jrdev, "Failed to shut down job ring\n");
14534 irq_dispose_mapping(jrpriv->irq);
14536 + jr_driver_probed--;
14541 @@ -282,6 +333,36 @@ struct device *caam_jr_alloc(void)
14542 EXPORT_SYMBOL(caam_jr_alloc);
14545 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
14547 + * returns : pointer to the newly allocated physical
14548 + * JobR dev can be written to if successful.
14550 +struct device *caam_jridx_alloc(int idx)
14552 + struct caam_drv_private_jr *jrpriv;
14553 + struct device *dev = ERR_PTR(-ENODEV);
14555 + spin_lock(&driver_data.jr_alloc_lock);
14557 + if (list_empty(&driver_data.jr_list))
14560 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
14561 + if (jrpriv->ridx == idx) {
14562 + atomic_inc(&jrpriv->tfm_count);
14563 + dev = jrpriv->dev;
14569 + spin_unlock(&driver_data.jr_alloc_lock);
14572 +EXPORT_SYMBOL(caam_jridx_alloc);
14575 * caam_jr_free() - Free the Job Ring
14576 * @rdev - points to the dev that identifies the Job ring to
14578 @@ -539,6 +620,9 @@ static int caam_jr_probe(struct platform
14580 atomic_set(&jrpriv->tfm_count, 0);
14582 + register_algs(jrdev->parent);
14583 + jr_driver_probed++;
14588 --- a/drivers/crypto/caam/jr.h
14589 +++ b/drivers/crypto/caam/jr.h
14593 /* Prototypes for backend-level services exposed to APIs */
14594 +int caam_jr_driver_probed(void);
14595 struct device *caam_jr_alloc(void);
14596 +struct device *caam_jridx_alloc(int idx);
14597 void caam_jr_free(struct device *rdev);
14598 int caam_jr_enqueue(struct device *dev, u32 *desc,
14599 void (*cbk)(struct device *dev, u32 *desc, u32 status,
14600 --- a/drivers/crypto/caam/key_gen.c
14601 +++ b/drivers/crypto/caam/key_gen.c
14603 #include "desc_constr.h"
14604 #include "key_gen.h"
14607 - * split_key_len - Compute MDHA split key length for a given algorithm
14608 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14609 - * SHA224, SHA384, SHA512.
14611 - * Return: MDHA split key length
14613 -static inline u32 split_key_len(u32 hash)
14615 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
14616 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
14619 - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
14621 - return (u32)(mdpadlen[idx] * 2);
14625 - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
14626 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14627 - * SHA224, SHA384, SHA512.
14629 - * Return: MDHA split key pad length
14631 -static inline u32 split_key_pad_len(u32 hash)
14633 - return ALIGN(split_key_len(hash), 16);
14636 void split_key_done(struct device *dev, u32 *desc, u32 err,
14639 --- a/drivers/crypto/caam/key_gen.h
14640 +++ b/drivers/crypto/caam/key_gen.h
14646 + * split_key_len - Compute MDHA split key length for a given algorithm
14647 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14648 + * SHA224, SHA384, SHA512.
14650 + * Return: MDHA split key length
14652 +static inline u32 split_key_len(u32 hash)
14654 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
14655 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
14658 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
14660 + return (u32)(mdpadlen[idx] * 2);
14664 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
14665 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14666 + * SHA224, SHA384, SHA512.
14668 + * Return: MDHA split key pad length
14670 +static inline u32 split_key_pad_len(u32 hash)
14672 + return ALIGN(split_key_len(hash), 16);
14675 struct split_key_result {
14676 struct completion completion;
14678 --- a/drivers/crypto/caam/qi.c
14679 +++ b/drivers/crypto/caam/qi.c
14682 #include <linux/cpumask.h>
14683 #include <linux/kthread.h>
14684 -#include <soc/fsl/qman.h>
14685 +#include <linux/fsl_qman.h>
14689 @@ -58,11 +58,9 @@ static DEFINE_PER_CPU(int, last_cpu);
14691 * caam_qi_priv - CAAM QI backend private params
14692 * @cgr: QMan congestion group
14693 - * @qi_pdev: platform device for QI backend
14695 struct caam_qi_priv {
14696 struct qman_cgr cgr;
14697 - struct platform_device *qi_pdev;
14700 static struct caam_qi_priv qipriv ____cacheline_aligned;
14701 @@ -102,26 +100,34 @@ static int mod_init_cpu;
14703 static struct kmem_cache *qi_cache;
14705 +static void *caam_iova_to_virt(struct iommu_domain *domain,
14706 + dma_addr_t iova_addr)
14708 + phys_addr_t phys_addr;
14710 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
14712 + return phys_to_virt(phys_addr);
14715 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
14720 int num_retries = 0;
14722 - qm_fd_clear_fd(&fd);
14723 - qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
14725 - addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
14727 + fd.format = qm_fd_compound;
14728 + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
14729 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
14730 DMA_BIDIRECTIONAL);
14731 - if (dma_mapping_error(qidev, addr)) {
14732 + if (dma_mapping_error(qidev, fd.addr)) {
14733 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
14736 - qm_fd_addr_set64(&fd, addr);
14739 - ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
14740 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
14744 @@ -137,20 +143,21 @@ int caam_qi_enqueue(struct device *qidev
14745 EXPORT_SYMBOL(caam_qi_enqueue);
14747 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
14748 - const union qm_mr_entry *msg)
14749 + const struct qm_mr_entry *msg)
14751 const struct qm_fd *fd;
14752 struct caam_drv_req *drv_req;
14753 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
14754 + struct caam_drv_private *priv = dev_get_drvdata(qidev);
14758 - if (qm_fd_get_format(fd) != qm_fd_compound) {
14759 + if (fd->format != qm_fd_compound) {
14760 dev_err(qidev, "Non-compound FD from CAAM\n");
14764 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
14765 + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
14768 "Can't find original request for CAAM response\n");
14769 @@ -180,20 +187,22 @@ static struct qman_fq *create_caam_req_f
14770 req_fq->cb.fqs = NULL;
14772 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
14773 - QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
14774 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
14777 dev_err(qidev, "Failed to create session req FQ\n");
14778 goto create_req_fq_fail;
14781 - memset(&opts, 0, sizeof(opts));
14782 - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14783 - QM_INITFQ_WE_CONTEXTB |
14784 - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
14785 - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
14786 - qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
14787 - opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
14788 - qm_fqd_context_a_set64(&opts.fqd, hwdesc);
14789 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14790 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
14791 + QM_INITFQ_WE_CGID;
14792 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
14793 + opts.fqd.dest.channel = qm_channel_caam;
14794 + opts.fqd.dest.wq = 2;
14795 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
14796 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
14797 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
14798 opts.fqd.cgid = qipriv.cgr.cgrid;
14800 ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
14801 @@ -207,7 +216,7 @@ static struct qman_fq *create_caam_req_f
14805 - qman_destroy_fq(req_fq);
14806 + qman_destroy_fq(req_fq, 0);
14807 create_req_fq_fail:
14809 return ERR_PTR(ret);
14810 @@ -275,7 +284,7 @@ empty_fq:
14812 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
14814 - qman_destroy_fq(fq);
14815 + qman_destroy_fq(fq, 0);
14819 @@ -292,7 +301,7 @@ static int empty_caam_fq(struct qman_fq
14823 - if (!qm_mcr_np_get(&np, frm_cnt))
14828 @@ -495,7 +504,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
14829 int caam_qi_shutdown(struct device *qidev)
14832 - struct caam_qi_priv *priv = dev_get_drvdata(qidev);
14833 + struct caam_qi_priv *priv = &qipriv;
14834 const cpumask_t *cpus = qman_affine_cpus();
14835 struct cpumask old_cpumask = current->cpus_allowed;
14837 @@ -528,7 +537,6 @@ int caam_qi_shutdown(struct device *qide
14838 /* Now that we're done with the CGRs, restore the cpus allowed mask */
14839 set_cpus_allowed_ptr(current, &old_cpumask);
14841 - platform_device_unregister(priv->qi_pdev);
14845 @@ -572,22 +580,28 @@ static enum qman_cb_dqrr_result caam_rsp
14846 struct caam_drv_req *drv_req;
14847 const struct qm_fd *fd;
14848 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
14850 + struct caam_drv_private *priv = dev_get_drvdata(qidev);
14852 if (caam_qi_napi_schedule(p, caam_napi))
14853 return qman_cb_dqrr_stop;
14856 - status = be32_to_cpu(fd->status);
14857 - if (unlikely(status))
14858 - dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
14859 + if (unlikely(fd->status)) {
14860 + u32 ssrc = fd->status & JRSTA_SSRC_MASK;
14861 + u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
14863 - if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
14864 + if (ssrc != JRSTA_SSRC_CCB_ERROR ||
14865 + err_id != JRSTA_CCBERR_ERRID_ICVCHK)
14866 + dev_err(qidev, "Error: %#x in CAAM response FD\n",
14870 + if (unlikely(fd->format != qm_fd_compound)) {
14871 dev_err(qidev, "Non-compound FD from CAAM\n");
14872 return qman_cb_dqrr_consume;
14875 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
14876 + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
14877 if (unlikely(!drv_req)) {
14879 "Can't find original request for caam response\n");
14880 @@ -597,7 +611,7 @@ static enum qman_cb_dqrr_result caam_rsp
14881 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
14882 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
14884 - drv_req->cbk(drv_req, status);
14885 + drv_req->cbk(drv_req, fd->status);
14886 return qman_cb_dqrr_consume;
14889 @@ -621,17 +635,18 @@ static int alloc_rsp_fq_cpu(struct devic
14893 - memset(&opts, 0, sizeof(opts));
14894 - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14895 - QM_INITFQ_WE_CONTEXTB |
14896 - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
14897 - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
14898 - QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
14899 - qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
14900 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14901 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
14902 + QM_INITFQ_WE_CGID;
14903 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
14905 + opts.fqd.dest.channel = qman_affine_channel(cpu);
14906 + opts.fqd.dest.wq = 3;
14907 opts.fqd.cgid = qipriv.cgr.cgrid;
14908 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
14909 QM_STASHING_EXCL_DATA;
14910 - qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
14911 + opts.fqd.context_a.stashing.data_cl = 1;
14912 + opts.fqd.context_a.stashing.context_cl = 1;
14914 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
14916 @@ -650,9 +665,8 @@ static int init_cgr(struct device *qidev
14919 struct qm_mcc_initcgr opts;
14920 - const u64 cpus = *(u64 *)qman_affine_cpus();
14921 - const int num_cpus = hweight64(cpus);
14922 - const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
14923 + const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
14924 + MAX_RSP_FQ_BACKLOG_PER_CPU;
14926 ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
14928 @@ -662,8 +676,7 @@ static int init_cgr(struct device *qidev
14930 qipriv.cgr.cb = cgr_cb;
14931 memset(&opts, 0, sizeof(opts));
14932 - opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
14934 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
14935 opts.cgr.cscn_en = QM_CGR_EN;
14936 opts.cgr.mode = QMAN_CGR_MODE_FRAME;
14937 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
14938 @@ -708,15 +721,10 @@ static void free_rsp_fqs(void)
14939 int caam_qi_init(struct platform_device *caam_pdev)
14942 - struct platform_device *qi_pdev;
14943 struct device *ctrldev = &caam_pdev->dev, *qidev;
14944 struct caam_drv_private *ctrlpriv;
14945 const cpumask_t *cpus = qman_affine_cpus();
14946 struct cpumask old_cpumask = current->cpus_allowed;
14947 - static struct platform_device_info qi_pdev_info = {
14948 - .name = "caam_qi",
14949 - .id = PLATFORM_DEVID_NONE
14953 * QMAN requires CGRs to be removed from same CPU+portal from where it
14954 @@ -728,24 +736,13 @@ int caam_qi_init(struct platform_device
14955 mod_init_cpu = cpumask_first(cpus);
14956 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
14958 - qi_pdev_info.parent = ctrldev;
14959 - qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
14960 - qi_pdev = platform_device_register_full(&qi_pdev_info);
14961 - if (IS_ERR(qi_pdev))
14962 - return PTR_ERR(qi_pdev);
14963 - set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
14965 ctrlpriv = dev_get_drvdata(ctrldev);
14966 - qidev = &qi_pdev->dev;
14968 - qipriv.qi_pdev = qi_pdev;
14969 - dev_set_drvdata(qidev, &qipriv);
14972 /* Initialize the congestion detection */
14973 err = init_cgr(qidev);
14975 dev_err(qidev, "CGR initialization failed: %d\n", err);
14976 - platform_device_unregister(qi_pdev);
14980 @@ -754,7 +751,6 @@ int caam_qi_init(struct platform_device
14982 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
14984 - platform_device_unregister(qi_pdev);
14988 @@ -777,15 +773,11 @@ int caam_qi_init(struct platform_device
14989 napi_enable(irqtask);
14992 - /* Hook up QI device to parent controlling caam device */
14993 - ctrlpriv->qidev = qidev;
14995 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
14996 SLAB_CACHE_DMA, NULL);
14998 dev_err(qidev, "Can't allocate CAAM cache\n");
15000 - platform_device_unregister(qi_pdev);
15004 @@ -795,6 +787,8 @@ int caam_qi_init(struct platform_device
15005 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
15006 ×_congested, &caam_fops_u64_ro);
15009 + ctrlpriv->qi_init = 1;
15010 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
15013 --- a/drivers/crypto/caam/qi.h
15014 +++ b/drivers/crypto/caam/qi.h
15019 -#include <soc/fsl/qman.h>
15020 +#include <linux/fsl_qman.h>
15021 #include "compat.h"
15023 #include "desc_constr.h"
15024 --- a/drivers/crypto/caam/regs.h
15025 +++ b/drivers/crypto/caam/regs.h
15027 * CAAM hardware register-level view
15029 * Copyright 2008-2011 Freescale Semiconductor, Inc.
15030 + * Copyright 2018 NXP
15034 @@ -211,6 +212,47 @@ struct jr_outentry {
15035 u32 jrstatus; /* Status for completed descriptor */
15038 +/* Version registers (Era 10+) e80-eff */
15039 +struct version_regs {
15040 + u32 crca; /* CRCA_VERSION */
15041 + u32 afha; /* AFHA_VERSION */
15042 + u32 kfha; /* KFHA_VERSION */
15043 + u32 pkha; /* PKHA_VERSION */
15044 + u32 aesa; /* AESA_VERSION */
15045 + u32 mdha; /* MDHA_VERSION */
15046 + u32 desa; /* DESA_VERSION */
15047 + u32 snw8a; /* SNW8A_VERSION */
15048 + u32 snw9a; /* SNW9A_VERSION */
15049 + u32 zuce; /* ZUCE_VERSION */
15050 + u32 zuca; /* ZUCA_VERSION */
15051 + u32 ccha; /* CCHA_VERSION */
15052 + u32 ptha; /* PTHA_VERSION */
15053 + u32 rng; /* RNG_VERSION */
15054 + u32 trng; /* TRNG_VERSION */
15055 + u32 aaha; /* AAHA_VERSION */
15057 + u32 sr; /* SR_VERSION */
15058 + u32 dma; /* DMA_VERSION */
15059 + u32 ai; /* AI_VERSION */
15060 + u32 qi; /* QI_VERSION */
15061 + u32 jr; /* JR_VERSION */
15062 + u32 deco; /* DECO_VERSION */
15065 +/* Version registers bitfields */
15067 +/* Number of CHAs instantiated */
15068 +#define CHA_VER_NUM_MASK 0xffull
15069 +/* CHA Miscellaneous Information */
15070 +#define CHA_VER_MISC_SHIFT 8
15071 +#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
15072 +/* CHA Revision Number */
15073 +#define CHA_VER_REV_SHIFT 16
15074 +#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
15075 +/* CHA Version ID */
15076 +#define CHA_VER_VID_SHIFT 24
15077 +#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
15080 * caam_perfmon - Performance Monitor/Secure Memory Status/
15081 * CAAM Global Status/Component Version IDs
15082 @@ -223,15 +265,13 @@ struct jr_outentry {
15083 #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
15086 - * CHA version IDs / instantiation bitfields
15087 + * CHA version IDs / instantiation bitfields (< Era 10)
15088 * Defined for use with the cha_id fields in perfmon, but the same shift/mask
15089 * selectors can be used to pull out the number of instantiated blocks within
15090 * cha_num fields in perfmon because the locations are the same.
15092 #define CHA_ID_LS_AES_SHIFT 0
15093 #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
15094 -#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
15095 -#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
15097 #define CHA_ID_LS_DES_SHIFT 4
15098 #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
15099 @@ -241,9 +281,6 @@ struct jr_outentry {
15101 #define CHA_ID_LS_MD_SHIFT 12
15102 #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
15103 -#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
15104 -#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
15105 -#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
15107 #define CHA_ID_LS_RNG_SHIFT 16
15108 #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
15109 @@ -269,6 +306,13 @@ struct jr_outentry {
15110 #define CHA_ID_MS_JR_SHIFT 28
15111 #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
15113 +/* Specific CHA version IDs */
15114 +#define CHA_VER_VID_AES_LP 0x3ull
15115 +#define CHA_VER_VID_AES_HP 0x4ull
15116 +#define CHA_VER_VID_MD_LP256 0x0ull
15117 +#define CHA_VER_VID_MD_LP512 0x1ull
15118 +#define CHA_VER_VID_MD_HP 0x2ull
15123 @@ -473,8 +517,10 @@ struct caam_ctrl {
15124 struct rng4tst r4tst[2];
15130 + /* Version registers - introduced with era 10 e80-eff */
15131 + struct version_regs vreg;
15132 /* Performance Monitor f00-fff */
15133 struct caam_perfmon perfmon;
15135 @@ -564,8 +610,10 @@ struct caam_job_ring {
15137 u32 jrcommand; /* JRCRx - JobR command */
15142 + /* Version registers - introduced with era 10 e80-eff */
15143 + struct version_regs vreg;
15144 /* Performance Monitor f00-fff */
15145 struct caam_perfmon perfmon;
15147 @@ -627,6 +675,8 @@ struct caam_job_ring {
15148 #define JRSTA_DECOERR_INVSIGN 0x86
15149 #define JRSTA_DECOERR_DSASIGN 0x87
15151 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
15153 #define JRSTA_CCBERR_JUMP 0x08000000
15154 #define JRSTA_CCBERR_INDEX_MASK 0xff00
15155 #define JRSTA_CCBERR_INDEX_SHIFT 8
15156 @@ -870,13 +920,19 @@ struct caam_deco {
15158 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
15160 -#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
15161 #define DESC_DBG_DECO_STAT_VALID 0x80000000
15162 #define DESC_DBG_DECO_STAT_MASK 0x00F00000
15163 +#define DESC_DBG_DECO_STAT_SHIFT 20
15164 u32 desc_dbg; /* DxDDR - DECO Debug Register */
15167 +#define DESC_DER_DECO_STAT_MASK 0x000F0000
15168 +#define DESC_DER_DECO_STAT_SHIFT 16
15169 + u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
15173 +#define DECO_STAT_HOST_ERR 0xD
15175 #define DECO_JQCR_WHL 0x20000000
15176 #define DECO_JQCR_FOUR 0x10000000
15178 --- a/drivers/crypto/caam/sg_sw_qm.h
15179 +++ b/drivers/crypto/caam/sg_sw_qm.h
15180 @@ -34,46 +34,61 @@
15181 #ifndef __SG_SW_QM_H
15182 #define __SG_SW_QM_H
15184 -#include <soc/fsl/qman.h>
15185 +#include <linux/fsl_qman.h>
15188 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
15190 + dma_addr_t addr = qm_sg_ptr->opaque;
15192 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
15193 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
15196 static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
15198 + u32 len, u16 offset)
15200 - qm_sg_entry_set64(qm_sg_ptr, dma);
15201 + qm_sg_ptr->addr = dma;
15202 + qm_sg_ptr->length = len;
15203 qm_sg_ptr->__reserved2 = 0;
15204 qm_sg_ptr->bpid = 0;
15205 - qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
15206 + qm_sg_ptr->__reserved3 = 0;
15207 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
15209 + cpu_to_hw_sg(qm_sg_ptr);
15212 static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
15213 dma_addr_t dma, u32 len, u16 offset)
15215 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15216 - qm_sg_entry_set_len(qm_sg_ptr, len);
15217 + qm_sg_ptr->extension = 0;
15218 + qm_sg_ptr->final = 0;
15219 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15222 static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
15223 dma_addr_t dma, u32 len, u16 offset)
15225 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15226 - qm_sg_entry_set_f(qm_sg_ptr, len);
15227 + qm_sg_ptr->extension = 0;
15228 + qm_sg_ptr->final = 1;
15229 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15232 static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
15233 dma_addr_t dma, u32 len, u16 offset)
15235 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15236 - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
15237 + qm_sg_ptr->extension = 1;
15238 + qm_sg_ptr->final = 0;
15239 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15242 static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
15243 dma_addr_t dma, u32 len,
15246 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15247 - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
15248 - (len & QM_SG_LEN_MASK));
15249 + qm_sg_ptr->extension = 1;
15250 + qm_sg_ptr->final = 1;
15251 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15255 @@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru
15256 struct qm_sg_entry *qm_sg_ptr, u16 offset)
15258 qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
15259 - qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
15261 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
15262 + qm_sg_ptr->final = 1;
15263 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
15266 #endif /* __SG_SW_QM_H */
15267 --- a/drivers/crypto/talitos.c
15268 +++ b/drivers/crypto/talitos.c
15269 @@ -1247,6 +1247,14 @@ static int ipsec_esp(struct talitos_edes
15270 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
15271 sg_count, areq->assoclen, tbl_off, elen);
15274 + * In case of SEC 2.x+, cipher in len must include only the ciphertext,
15275 + * while extent is used for ICV len.
15277 + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
15278 + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
15279 + desc->ptr[4].len = cpu_to_be16(cryptlen);
15283 sync_needed = true;
15284 --- a/include/crypto/chacha20.h
15285 +++ b/include/crypto/chacha20.h
15287 #define CHACHA20_IV_SIZE 16
15288 #define CHACHA20_KEY_SIZE 32
15289 #define CHACHA20_BLOCK_SIZE 64
15290 +#define CHACHAPOLY_IV_SIZE 12
15292 struct chacha20_ctx {