1 From ba8e92b322a3763880fdc4d19e9c7085f5504be7 Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 23 Apr 2019 17:41:43 +0800
4 Subject: [PATCH] sec: support layerscape
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 This is an integrated patch of sec for layerscape
11 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
12 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
13 Signed-off-by: Biwen Li <biwen.li@nxp.com>
14 Signed-off-by: Carmen Iorga <carmen.iorga@nxp.com>
15 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
16 Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
17 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
18 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
19 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
20 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
23 Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
27 crypto/chacha20poly1305.c | 2 -
28 crypto/tcrypt.c | 27 +-
29 crypto/testmgr.c | 244 ++
30 crypto/testmgr.h | 219 ++
31 crypto/tls.c | 607 ++++
32 drivers/crypto/Makefile | 2 +-
33 drivers/crypto/caam/Kconfig | 85 +-
34 drivers/crypto/caam/Makefile | 26 +-
35 drivers/crypto/caam/caamalg.c | 468 +++-
36 drivers/crypto/caam/caamalg_desc.c | 903 +++++-
37 drivers/crypto/caam/caamalg_desc.h | 52 +-
38 drivers/crypto/caam/caamalg_qi.c | 1060 ++++++-
39 drivers/crypto/caam/caamalg_qi2.c | 5843 +++++++++++++++++++++++++++++++++++
40 drivers/crypto/caam/caamalg_qi2.h | 276 ++
41 drivers/crypto/caam/caamhash.c | 192 +-
42 drivers/crypto/caam/caamhash_desc.c | 108 +
43 drivers/crypto/caam/caamhash_desc.h | 49 +
44 drivers/crypto/caam/caampkc.c | 52 +-
45 drivers/crypto/caam/caamrng.c | 52 +-
46 drivers/crypto/caam/compat.h | 4 +
47 drivers/crypto/caam/ctrl.c | 194 +-
48 drivers/crypto/caam/desc.h | 89 +-
49 drivers/crypto/caam/desc_constr.h | 59 +-
50 drivers/crypto/caam/dpseci.c | 865 ++++++
51 drivers/crypto/caam/dpseci.h | 433 +++
52 drivers/crypto/caam/dpseci_cmd.h | 287 ++
53 drivers/crypto/caam/error.c | 81 +-
54 drivers/crypto/caam/error.h | 6 +-
55 drivers/crypto/caam/intern.h | 102 +-
56 drivers/crypto/caam/jr.c | 84 +
57 drivers/crypto/caam/jr.h | 2 +
58 drivers/crypto/caam/key_gen.c | 30 -
59 drivers/crypto/caam/key_gen.h | 30 +
60 drivers/crypto/caam/qi.c | 134 +-
61 drivers/crypto/caam/qi.h | 2 +-
62 drivers/crypto/caam/regs.h | 76 +-
63 drivers/crypto/caam/sg_sw_qm.h | 46 +-
64 drivers/crypto/talitos.c | 8 +
65 include/crypto/chacha20.h | 1 +
66 41 files changed, 12088 insertions(+), 733 deletions(-)
67 create mode 100644 crypto/tls.c
68 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
69 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
70 create mode 100644 drivers/crypto/caam/caamhash_desc.c
71 create mode 100644 drivers/crypto/caam/caamhash_desc.h
72 create mode 100644 drivers/crypto/caam/dpseci.c
73 create mode 100644 drivers/crypto/caam/dpseci.h
74 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
78 @@ -312,6 +312,26 @@ config CRYPTO_ECHAINIV
79 a sequence number xored with a salt. This is the default
83 + tristate "TLS support"
85 + select CRYPTO_BLKCIPHER
86 + select CRYPTO_MANAGER
89 + select CRYPTO_AUTHENC
91 + Support for TLS 1.0 record encryption and decryption
93 + This module adds support for encryption/decryption of TLS 1.0 frames
94 + using blockcipher algorithms. The name of the resulting algorithm is
95 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
96 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
97 + accelerated versions will be used automatically if available.
99 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
100 + operations through AF_ALG or cryptodev interfaces
102 comment "Block modes"
105 --- a/crypto/Makefile
106 +++ b/crypto/Makefile
107 @@ -118,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
108 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
109 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
110 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
111 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
112 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
113 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
114 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
115 --- a/crypto/chacha20poly1305.c
116 +++ b/crypto/chacha20poly1305.c
119 #include "internal.h"
121 -#define CHACHAPOLY_IV_SIZE 12
123 struct chachapoly_instance_ctx {
124 struct crypto_skcipher_spawn chacha;
125 struct crypto_ahash_spawn poly;
126 --- a/crypto/tcrypt.c
127 +++ b/crypto/tcrypt.c
128 @@ -76,7 +76,7 @@ static char *check[] = {
129 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
130 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
131 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
136 struct tcrypt_result {
137 @@ -355,11 +355,13 @@ static void test_aead_speed(const char *
139 aead_request_set_ad(req, aad_size);
143 ret = test_aead_jiffies(req, enc, *b_size,
148 ret = test_aead_cycles(req, enc, *b_size);
152 pr_err("%s() failed return code=%d\n", e, ret);
153 @@ -736,12 +738,14 @@ static void test_ahash_speed_common(cons
155 ahash_request_set_crypt(req, sg, output, speed[i].plen);
159 ret = test_ahash_jiffies(req, speed[i].blen,
160 speed[i].plen, output, secs);
164 ret = test_ahash_cycles(req, speed[i].blen,
165 speed[i].plen, output);
169 pr_err("hashing failed ret=%d\n", ret);
170 @@ -959,12 +963,14 @@ static void test_skcipher_speed(const ch
172 skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
176 ret = test_acipher_jiffies(req, enc,
181 ret = test_acipher_cycles(req, enc,
186 pr_err("%s() failed flags=%x\n", e,
187 @@ -1336,6 +1342,10 @@ static int do_test(const char *alg, u32
188 ret += tcrypt_test("hmac(sha3-512)");
192 + ret += tcrypt_test("rsa");
196 ret += tcrypt_test("ansi_cprng");
198 @@ -1397,6 +1407,9 @@ static int do_test(const char *alg, u32
200 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
203 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
206 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
207 speed_template_16_24_32);
208 --- a/crypto/testmgr.c
209 +++ b/crypto/testmgr.c
210 @@ -117,6 +117,13 @@ struct drbg_test_suite {
214 +struct tls_test_suite {
216 + struct tls_testvec *vecs;
217 + unsigned int count;
221 struct akcipher_test_suite {
222 const struct akcipher_testvec *vecs;
224 @@ -140,6 +147,7 @@ struct alg_test_desc {
225 struct hash_test_suite hash;
226 struct cprng_test_suite cprng;
227 struct drbg_test_suite drbg;
228 + struct tls_test_suite tls;
229 struct akcipher_test_suite akcipher;
230 struct kpp_test_suite kpp;
232 @@ -991,6 +999,233 @@ static int test_aead(struct crypto_aead
236 +static int __test_tls(struct crypto_aead *tfm, int enc,
237 + struct tls_testvec *template, unsigned int tcount,
238 + const bool diff_dst)
240 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
241 + unsigned int i, k, authsize;
243 + struct aead_request *req;
244 + struct scatterlist *sg;
245 + struct scatterlist *sgout;
247 + struct tcrypt_result result;
253 + char *xbuf[XBUFSIZE];
254 + char *xoutbuf[XBUFSIZE];
255 + char *axbuf[XBUFSIZE];
258 + if (testmgr_alloc_buf(xbuf))
261 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
264 + if (testmgr_alloc_buf(axbuf))
267 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
271 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
275 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
281 + d = diff_dst ? "-ddst" : "";
282 + e = enc ? "encryption" : "decryption";
284 + init_completion(&result.completion);
286 + req = aead_request_alloc(tfm, GFP_KERNEL);
288 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
293 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
294 + tcrypt_complete, &result);
296 + for (i = 0; i < tcount; i++) {
301 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
302 + template[i].alen > PAGE_SIZE))
305 + memcpy(assoc, template[i].assoc, template[i].alen);
306 + memcpy(input, template[i].input, template[i].ilen);
308 + if (template[i].iv)
309 + memcpy(iv, template[i].iv, MAX_IVLEN);
311 + memset(iv, 0, MAX_IVLEN);
313 + crypto_aead_clear_flags(tfm, ~0);
315 + if (template[i].klen > MAX_KEYLEN) {
316 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
317 + d, i, algo, template[i].klen, MAX_KEYLEN);
321 + memcpy(key, template[i].key, template[i].klen);
323 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
324 + if (!ret == template[i].fail) {
325 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
326 + d, i, algo, crypto_aead_get_flags(tfm));
332 + ret = crypto_aead_setauthsize(tfm, authsize);
334 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
335 + d, authsize, i, algo);
339 + k = !!template[i].alen;
340 + sg_init_table(sg, k + 1);
341 + sg_set_buf(&sg[0], assoc, template[i].alen);
342 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
343 + template[i].ilen));
347 + sg_init_table(sgout, k + 1);
348 + sg_set_buf(&sgout[0], assoc, template[i].alen);
350 + output = xoutbuf[0];
351 + sg_set_buf(&sgout[k], output,
352 + (enc ? template[i].rlen : template[i].ilen));
355 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
356 + template[i].ilen, iv);
358 + aead_request_set_ad(req, template[i].alen);
360 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
364 + if (template[i].novrfy) {
365 + /* verification was supposed to fail */
366 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
368 + /* so really, we got a bad message */
375 + wait_for_completion(&result.completion);
376 + reinit_completion(&result.completion);
381 + /* verification failure was expected */
382 + if (template[i].novrfy)
386 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
387 + d, e, i, algo, -ret);
392 + if (memcmp(q, template[i].result, template[i].rlen)) {
393 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
395 + hexdump(q, template[i].rlen);
396 + pr_err("should be:\n");
397 + hexdump(template[i].result, template[i].rlen);
404 + aead_request_free(req);
412 + testmgr_free_buf(axbuf);
415 + testmgr_free_buf(xoutbuf);
417 + testmgr_free_buf(xbuf);
422 +static int test_tls(struct crypto_aead *tfm, int enc,
423 + struct tls_testvec *template, unsigned int tcount)
426 + /* test 'dst == src' case */
427 + ret = __test_tls(tfm, enc, template, tcount, false);
430 + /* test 'dst != src' case */
431 + return __test_tls(tfm, enc, template, tcount, true);
434 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
435 + u32 type, u32 mask)
437 + struct crypto_aead *tfm;
440 + tfm = crypto_alloc_aead(driver, type, mask);
442 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
443 + driver, PTR_ERR(tfm));
444 + return PTR_ERR(tfm);
447 + if (desc->suite.tls.enc.vecs) {
448 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
449 + desc->suite.tls.enc.count);
454 + if (!err && desc->suite.tls.dec.vecs)
455 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
456 + desc->suite.tls.dec.count);
459 + crypto_free_aead(tfm);
463 static int test_cipher(struct crypto_cipher *tfm, int enc,
464 const struct cipher_testvec *template,
466 @@ -3524,6 +3759,15 @@ static const struct alg_test_desc alg_te
467 .hash = __VECS(tgr192_tv_template)
470 + .alg = "tls10(hmac(sha1),cbc(aes))",
471 + .test = alg_test_tls,
474 + .enc = __VECS(tls_enc_tv_template),
475 + .dec = __VECS(tls_dec_tv_template)
480 .test = alg_test_hash,
482 --- a/crypto/testmgr.h
483 +++ b/crypto/testmgr.h
484 @@ -125,6 +125,20 @@ struct drbg_testvec {
488 +struct tls_testvec {
489 + char *key; /* wrapped keys for encryption and authentication */
490 + char *iv; /* initialization vector */
491 + char *input; /* input data */
492 + char *assoc; /* associated data: seq num, type, version, input len */
493 + char *result; /* result data */
494 + unsigned char fail; /* the test failure is expected */
495 + unsigned char novrfy; /* dec verification failure expected */
496 + unsigned char klen; /* key length */
497 + unsigned short ilen; /* input data length */
498 + unsigned short alen; /* associated data length */
499 + unsigned short rlen; /* result length */
502 struct akcipher_testvec {
503 const unsigned char *key;
504 const unsigned char *m;
505 @@ -153,6 +167,211 @@ struct kpp_testvec {
506 static const char zeroed_string[48];
509 + * TLS1.0 synthetic test vectors
511 +static struct tls_testvec tls_enc_tv_template[] = {
513 +#ifdef __LITTLE_ENDIAN
514 + .key = "\x08\x00" /* rta length */
515 + "\x01\x00" /* rta type */
517 + .key = "\x00\x08" /* rta length */
518 + "\x00\x01" /* rta type */
520 + "\x00\x00\x00\x10" /* enc key length */
521 + "authenticationkey20benckeyis16_bytes",
522 + .klen = 8 + 20 + 16,
523 + .iv = "iv0123456789abcd",
524 + .input = "Single block msg",
526 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
527 + "\x00\x03\x01\x00\x10",
529 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
530 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
531 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
532 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
533 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
534 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
535 + .rlen = 16 + 20 + 12,
537 +#ifdef __LITTLE_ENDIAN
538 + .key = "\x08\x00" /* rta length */
539 + "\x01\x00" /* rta type */
541 + .key = "\x00\x08" /* rta length */
542 + "\x00\x01" /* rta type */
544 + "\x00\x00\x00\x10" /* enc key length */
545 + "authenticationkey20benckeyis16_bytes",
546 + .klen = 8 + 20 + 16,
547 + .iv = "iv0123456789abcd",
550 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
551 + "\x00\x03\x01\x00\x00",
553 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
554 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
555 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
556 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
559 +#ifdef __LITTLE_ENDIAN
560 + .key = "\x08\x00" /* rta length */
561 + "\x01\x00" /* rta type */
563 + .key = "\x00\x08" /* rta length */
564 + "\x00\x01" /* rta type */
566 + "\x00\x00\x00\x10" /* enc key length */
567 + "authenticationkey20benckeyis16_bytes",
568 + .klen = 8 + 20 + 16,
569 + .iv = "iv0123456789abcd",
570 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
571 + " plaintext285 bytes plaintext285 bytes plaintext285"
572 + " bytes plaintext285 bytes plaintext285 bytes"
573 + " plaintext285 bytes plaintext285 bytes plaintext285"
574 + " bytes plaintext285 bytes plaintext285 bytes"
575 + " plaintext285 bytes plaintext285 bytes plaintext285"
576 + " bytes plaintext285 bytes plaintext",
578 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
579 + "\x00\x03\x01\x01\x1d",
581 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
582 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
583 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
584 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
585 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
586 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
587 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
588 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
589 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
590 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
591 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
592 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
593 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
594 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
595 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
596 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
597 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
598 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
599 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
600 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
601 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
602 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
603 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
604 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
605 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
606 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
607 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
608 + .rlen = 285 + 20 + 15,
612 +static struct tls_testvec tls_dec_tv_template[] = {
614 +#ifdef __LITTLE_ENDIAN
615 + .key = "\x08\x00" /* rta length */
616 + "\x01\x00" /* rta type */
618 + .key = "\x00\x08" /* rta length */
619 + "\x00\x01" /* rta type */
621 + "\x00\x00\x00\x10" /* enc key length */
622 + "authenticationkey20benckeyis16_bytes",
623 + .klen = 8 + 20 + 16,
624 + .iv = "iv0123456789abcd",
625 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
626 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
627 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
628 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
629 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
630 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
631 + .ilen = 16 + 20 + 12,
632 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
633 + "\x00\x03\x01\x00\x30",
635 + .result = "Single block msg",
638 +#ifdef __LITTLE_ENDIAN
639 + .key = "\x08\x00" /* rta length */
640 + "\x01\x00" /* rta type */
642 + .key = "\x00\x08" /* rta length */
643 + "\x00\x01" /* rta type */
645 + "\x00\x00\x00\x10" /* enc key length */
646 + "authenticationkey20benckeyis16_bytes",
647 + .klen = 8 + 20 + 16,
648 + .iv = "iv0123456789abcd",
649 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
650 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
651 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
652 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
654 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
655 + "\x00\x03\x01\x00\x20",
660 +#ifdef __LITTLE_ENDIAN
661 + .key = "\x08\x00" /* rta length */
662 + "\x01\x00" /* rta type */
664 + .key = "\x00\x08" /* rta length */
665 + "\x00\x01" /* rta type */
667 + "\x00\x00\x00\x10" /* enc key length */
668 + "authenticationkey20benckeyis16_bytes",
669 + .klen = 8 + 20 + 16,
670 + .iv = "iv0123456789abcd",
671 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
672 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
673 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
674 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
675 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
676 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
677 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
678 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
679 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
680 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
681 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
682 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
683 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
684 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
685 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
686 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
687 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
688 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
689 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
690 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
691 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
692 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
693 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
694 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
695 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
696 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
697 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
699 + .ilen = 285 + 20 + 15,
700 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
701 + "\x00\x03\x01\x01\x40",
703 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
704 + " plaintext285 bytes plaintext285 bytes plaintext285"
705 + " bytes plaintext285 bytes plaintext285 bytes"
706 + " plaintext285 bytes plaintext285 bytes plaintext285"
707 + " bytes plaintext285 bytes plaintext285 bytes"
708 + " plaintext285 bytes plaintext285 bytes plaintext",
714 * RSA test vectors. Borrowed from openSSL.
716 static const struct akcipher_testvec rsa_tv_template[] = {
721 + * Copyright 2013 Freescale Semiconductor, Inc.
722 + * Copyright 2017 NXP Semiconductor, Inc.
724 + * This program is free software; you can redistribute it and/or modify it
725 + * under the terms of the GNU General Public License as published by the Free
726 + * Software Foundation; either version 2 of the License, or (at your option)
727 + * any later version.
731 +#include <crypto/internal/aead.h>
732 +#include <crypto/internal/hash.h>
733 +#include <crypto/internal/skcipher.h>
734 +#include <crypto/authenc.h>
735 +#include <crypto/null.h>
736 +#include <crypto/scatterwalk.h>
737 +#include <linux/err.h>
738 +#include <linux/init.h>
739 +#include <linux/module.h>
740 +#include <linux/rtnetlink.h>
742 +struct tls_instance_ctx {
743 + struct crypto_ahash_spawn auth;
744 + struct crypto_skcipher_spawn enc;
747 +struct crypto_tls_ctx {
748 + unsigned int reqoff;
749 + struct crypto_ahash *auth;
750 + struct crypto_skcipher *enc;
751 + struct crypto_skcipher *null;
754 +struct tls_request_ctx {
756 + * cryptlen holds the payload length in the case of encryption or
757 + * payload_len + icv_len + padding_len in case of decryption
759 + unsigned int cryptlen;
760 + /* working space for partial results */
761 + struct scatterlist tmp[2];
762 + struct scatterlist cipher[2];
763 + struct scatterlist dst[2];
768 + struct completion completion;
772 +static void tls_async_op_done(struct crypto_async_request *req, int err)
774 + struct async_op *areq = req->data;
776 + if (err == -EINPROGRESS)
780 + complete(&areq->completion);
783 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
784 + unsigned int keylen)
786 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
787 + struct crypto_ahash *auth = ctx->auth;
788 + struct crypto_skcipher *enc = ctx->enc;
789 + struct crypto_authenc_keys keys;
792 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
795 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
796 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
797 + CRYPTO_TFM_REQ_MASK);
798 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
799 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
800 + CRYPTO_TFM_RES_MASK);
805 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
806 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
807 + CRYPTO_TFM_REQ_MASK);
808 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
809 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
810 + CRYPTO_TFM_RES_MASK);
816 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
821 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
822 + * @hash: (output) buffer to save the digest into
823 + * @src: (input) scatterlist with the assoc and payload data
824 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
825 + * @req: (input) aead request
827 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
828 + unsigned int srclen, struct aead_request *req)
830 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
831 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
832 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
833 + struct async_op ahash_op;
834 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
835 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
836 + int err = -EBADMSG;
838 + /* Bail out if the request assoc len is 0 */
839 + if (!req->assoclen)
842 + init_completion(&ahash_op.completion);
844 + /* the hash transform to be executed comes from the original request */
845 + ahash_request_set_tfm(ahreq, ctx->auth);
846 + /* prepare the hash request with input data and result pointer */
847 + ahash_request_set_crypt(ahreq, src, hash, srclen);
848 + /* set the notifier for when the async hash function returns */
849 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
850 + tls_async_op_done, &ahash_op);
852 + /* Calculate the digest on the given data. The result is put in hash */
853 + err = crypto_ahash_digest(ahreq);
854 + if (err == -EINPROGRESS) {
855 + err = wait_for_completion_interruptible(&ahash_op.completion);
857 + err = ahash_op.err;
864 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
865 + * @hash: (output) buffer to save the digest and padding into
866 + * @phashlen: (output) the size of digest + padding
867 + * @req: (input) aead request
869 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
870 + struct aead_request *req)
872 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
873 + unsigned int hash_size = crypto_aead_authsize(tls);
874 + unsigned int block_size = crypto_aead_blocksize(tls);
875 + unsigned int srclen = req->cryptlen + hash_size;
876 + unsigned int icvlen = req->cryptlen + req->assoclen;
877 + unsigned int padlen;
880 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
884 + /* add padding after digest */
885 + padlen = block_size - (srclen % block_size);
886 + memset(hash + hash_size, padlen - 1, padlen);
888 + *phashlen = hash_size + padlen;
893 +static int crypto_tls_copy_data(struct aead_request *req,
894 + struct scatterlist *src,
895 + struct scatterlist *dst,
898 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
899 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
900 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
902 + skcipher_request_set_tfm(skreq, ctx->null);
903 + skcipher_request_set_callback(skreq, aead_request_flags(req),
905 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
907 + return crypto_skcipher_encrypt(skreq);
910 +static int crypto_tls_encrypt(struct aead_request *req)
912 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
913 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
914 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
915 + struct skcipher_request *skreq;
916 + struct scatterlist *cipher = treq_ctx->cipher;
917 + struct scatterlist *tmp = treq_ctx->tmp;
918 + struct scatterlist *sg, *src, *dst;
919 + unsigned int cryptlen, phashlen;
920 + u8 *hash = treq_ctx->tail;
924 + * The hash result is saved at the beginning of the tls request ctx
925 + * and is aligned as required by the hash transform. Enough space was
926 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
927 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
928 + * the result is not overwritten by the second (cipher) request.
930 + hash = (u8 *)ALIGN((unsigned long)hash +
931 + crypto_ahash_alignmask(ctx->auth),
932 + crypto_ahash_alignmask(ctx->auth) + 1);
935 + * STEP 1: create ICV together with necessary padding
937 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
942 + * STEP 2: Hash and padding are combined with the payload
943 + * depending on the form it arrives. Scatter tables must have at least
944 + * one page of data before chaining with another table and can't have
945 + * an empty data page. The following code addresses these requirements.
947 + * If the payload is empty, only the hash is encrypted, otherwise the
948 + * payload scatterlist is merged with the hash. A special merging case
949 + * is when the payload has only one page of data. In that case the
950 + * payload page is moved to another scatterlist and prepared there for
953 + if (req->cryptlen) {
954 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
956 + sg_init_table(cipher, 2);
957 + sg_set_buf(cipher + 1, hash, phashlen);
959 + if (sg_is_last(src)) {
960 + sg_set_page(cipher, sg_page(src), req->cryptlen,
964 + unsigned int rem_len = req->cryptlen;
966 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
967 + rem_len -= min(rem_len, sg->length);
969 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
970 + sg_chain(sg, 1, cipher);
973 + sg_init_one(cipher, hash, phashlen);
978 + * If src != dst copy the associated data from source to destination.
979 + * In both cases fast-forward passed the associated data in the dest.
981 + if (req->src != req->dst) {
982 + err = crypto_tls_copy_data(req, req->src, req->dst,
987 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
990 + * STEP 3: encrypt the frame and return the result
992 + cryptlen = req->cryptlen + phashlen;
995 + * The hash and the cipher are applied at different times and their
996 + * requests can use the same memory space without interference
998 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
999 + skcipher_request_set_tfm(skreq, ctx->enc);
1000 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1001 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1002 + req->base.complete, req->base.data);
1004 + * Apply the cipher transform. The result will be in req->dst when the
1005 + * asynchronuous call terminates
1007 + err = crypto_skcipher_encrypt(skreq);
1012 +static int crypto_tls_decrypt(struct aead_request *req)
1014 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
1015 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
1016 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
1017 + unsigned int cryptlen = req->cryptlen;
1018 + unsigned int hash_size = crypto_aead_authsize(tls);
1019 + unsigned int block_size = crypto_aead_blocksize(tls);
1020 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
1021 + struct scatterlist *tmp = treq_ctx->tmp;
1022 + struct scatterlist *src, *dst;
1024 + u8 padding[255]; /* padding can be 0-255 bytes */
1027 + u8 *ihash, *hash = treq_ctx->tail;
1030 + int err = -EINVAL;
1032 + struct async_op ciph_op;
1035 + * Rule out bad packets. The input packet length must be at least one
1036 + * byte more than the hash_size
1038 + if (cryptlen <= hash_size || cryptlen % block_size)
1042 + * Step 1 - Decrypt the source. Fast-forward past the associated data
1043 + * to the encrypted data. The result will be overwritten in place so
1044 + * that the decrypted data will be adjacent to the associated data. The
1045 + * last step (computing the hash) will have it's input data already
1046 + * prepared and ready to be accessed at req->src.
1048 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
1051 + init_completion(&ciph_op.completion);
1052 + skcipher_request_set_tfm(skreq, ctx->enc);
1053 + skcipher_request_set_callback(skreq, aead_request_flags(req),
1054 + tls_async_op_done, &ciph_op);
1055 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
1056 + err = crypto_skcipher_decrypt(skreq);
1057 + if (err == -EINPROGRESS) {
1058 + err = wait_for_completion_interruptible(&ciph_op.completion);
1060 + err = ciph_op.err;
1066 + * Step 2 - Verify padding
1067 + * Retrieve the last byte of the payload; this is the padding size.
1070 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
1072 + /* RFC recommendation for invalid padding size. */
1073 + if (cryptlen < pad_size + hash_size) {
1075 + paderr = -EBADMSG;
1077 + cryptlen -= pad_size;
1078 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
1080 + /* Padding content must be equal with pad_size. We verify it all */
1081 + for (i = 0; i < pad_size; i++)
1082 + if (padding[i] != pad_size)
1083 + paderr = -EBADMSG;
1086 + * Step 3 - Verify hash
1087 + * Align the digest result as required by the hash transform. Enough
1088 + * space was allocated in crypto_tls_init_tfm
1090 + hash = (u8 *)ALIGN((unsigned long)hash +
1091 + crypto_ahash_alignmask(ctx->auth),
1092 + crypto_ahash_alignmask(ctx->auth) + 1);
1094 + * Two bytes at the end of the associated data make the length field.
1095 + * It must be updated with the length of the cleartext message before
1096 + * the hash is calculated.
1098 + len_field = sg_virt(req->src) + req->assoclen - 2;
1099 + cryptlen -= hash_size;
1100 + *len_field = htons(cryptlen);
1102 + /* This is the hash from the decrypted packet. Save it for later */
1103 + ihash = hash + hash_size;
1104 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
1106 + /* Now compute and compare our ICV with the one from the packet */
1107 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
1109 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
1111 + if (req->src != req->dst) {
1112 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
1118 + /* return the first found error */
1123 + aead_request_complete(req, err);
1127 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
1129 + struct aead_instance *inst = aead_alg_instance(tfm);
1130 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
1131 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1132 + struct crypto_ahash *auth;
1133 + struct crypto_skcipher *enc;
1134 + struct crypto_skcipher *null;
1137 + auth = crypto_spawn_ahash(&ictx->auth);
1139 + return PTR_ERR(auth);
1141 + enc = crypto_spawn_skcipher(&ictx->enc);
1142 + err = PTR_ERR(enc);
1144 + goto err_free_ahash;
1146 + null = crypto_get_default_null_skcipher2();
1147 + err = PTR_ERR(null);
1149 + goto err_free_skcipher;
1156 + * Allow enough space for two digests. The two digests will be compared
1157 + * during the decryption phase. One will come from the decrypted packet
1158 + * and the other will be calculated. For encryption, one digest is
1159 + * padded (up to a cipher blocksize) and chained with the payload
1161 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
1162 + crypto_ahash_alignmask(auth),
1163 + crypto_ahash_alignmask(auth) + 1) +
1164 + max(crypto_ahash_digestsize(auth),
1165 + crypto_skcipher_blocksize(enc));
1167 + crypto_aead_set_reqsize(tfm,
1168 + sizeof(struct tls_request_ctx) +
1170 + max_t(unsigned int,
1171 + crypto_ahash_reqsize(auth) +
1172 + sizeof(struct ahash_request),
1173 + crypto_skcipher_reqsize(enc) +
1174 + sizeof(struct skcipher_request)));
1179 + crypto_free_skcipher(enc);
1181 + crypto_free_ahash(auth);
1185 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
1187 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
1189 + crypto_free_ahash(ctx->auth);
1190 + crypto_free_skcipher(ctx->enc);
1191 + crypto_put_default_null_skcipher2();
1194 +static void crypto_tls_free(struct aead_instance *inst)
1196 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
1198 + crypto_drop_skcipher(&ctx->enc);
1199 + crypto_drop_ahash(&ctx->auth);
1203 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
1205 + struct crypto_attr_type *algt;
1206 + struct aead_instance *inst;
1207 + struct hash_alg_common *auth;
1208 + struct crypto_alg *auth_base;
1209 + struct skcipher_alg *enc;
1210 + struct tls_instance_ctx *ctx;
1211 + const char *enc_name;
1214 + algt = crypto_get_attr_type(tb);
1216 + return PTR_ERR(algt);
1218 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
1221 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
1222 + CRYPTO_ALG_TYPE_AHASH_MASK |
1223 + crypto_requires_sync(algt->type, algt->mask));
1225 + return PTR_ERR(auth);
1227 + auth_base = &auth->base;
1229 + enc_name = crypto_attr_alg_name(tb[2]);
1230 + err = PTR_ERR(enc_name);
1231 + if (IS_ERR(enc_name))
1232 + goto out_put_auth;
1234 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1237 + goto out_put_auth;
1239 + ctx = aead_instance_ctx(inst);
1241 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
1242 + aead_crypto_instance(inst));
1244 + goto err_free_inst;
1246 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
1247 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
1248 + crypto_requires_sync(algt->type,
1251 + goto err_drop_auth;
1253 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
1255 + err = -ENAMETOOLONG;
1256 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
1257 + "tls10(%s,%s)", auth_base->cra_name,
1258 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
1259 + goto err_drop_enc;
1261 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1262 + "tls10(%s,%s)", auth_base->cra_driver_name,
1263 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
1264 + goto err_drop_enc;
1266 + inst->alg.base.cra_flags = (auth_base->cra_flags |
1267 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
1268 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
1269 + auth_base->cra_priority;
1270 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
1271 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
1272 + enc->base.cra_alignmask;
1273 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
1275 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
1276 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
1277 + inst->alg.maxauthsize = auth->digestsize;
1279 + inst->alg.init = crypto_tls_init_tfm;
1280 + inst->alg.exit = crypto_tls_exit_tfm;
1282 + inst->alg.setkey = crypto_tls_setkey;
1283 + inst->alg.encrypt = crypto_tls_encrypt;
1284 + inst->alg.decrypt = crypto_tls_decrypt;
1286 + inst->free = crypto_tls_free;
1288 + err = aead_register_instance(tmpl, inst);
1290 + goto err_drop_enc;
1293 + crypto_mod_put(auth_base);
1297 + crypto_drop_skcipher(&ctx->enc);
1299 + crypto_drop_ahash(&ctx->auth);
1306 +static struct crypto_template crypto_tls_tmpl = {
1308 + .create = crypto_tls_create,
1309 + .module = THIS_MODULE,
1312 +static int __init crypto_tls_module_init(void)
1314 + return crypto_register_template(&crypto_tls_tmpl);
1317 +static void __exit crypto_tls_module_exit(void)
1319 + crypto_unregister_template(&crypto_tls_tmpl);
1322 +module_init(crypto_tls_module_init);
1323 +module_exit(crypto_tls_module_exit);
1325 +MODULE_LICENSE("GPL");
1326 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
1327 --- a/drivers/crypto/Makefile
1328 +++ b/drivers/crypto/Makefile
1329 @@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chel
1330 obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
1331 obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
1332 obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
1333 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
1334 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
1335 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
1336 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
1337 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
1338 --- a/drivers/crypto/caam/Kconfig
1339 +++ b/drivers/crypto/caam/Kconfig
1341 +config CRYPTO_DEV_FSL_CAAM_COMMON
1344 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1347 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1350 config CRYPTO_DEV_FSL_CAAM
1351 - tristate "Freescale CAAM-Multicore driver backend"
1352 + tristate "Freescale CAAM-Multicore platform driver backend"
1353 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
1355 + select CRYPTO_DEV_FSL_CAAM_COMMON
1357 Enables the driver module for Freescale's Cryptographic Accelerator
1358 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
1359 @@ -12,9 +22,16 @@ config CRYPTO_DEV_FSL_CAAM
1360 To compile this driver as a module, choose M here: the module
1361 will be called caam.
1363 +if CRYPTO_DEV_FSL_CAAM
1365 +config CRYPTO_DEV_FSL_CAAM_DEBUG
1366 + bool "Enable debug output in CAAM driver"
1368 + Selecting this will enable printing of various debug
1369 + information in the CAAM driver.
1371 config CRYPTO_DEV_FSL_CAAM_JR
1372 tristate "Freescale CAAM Job Ring driver backend"
1373 - depends on CRYPTO_DEV_FSL_CAAM
1376 Enables the driver module for Job Rings which are part of
1377 @@ -25,9 +42,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
1378 To compile this driver as a module, choose M here: the module
1379 will be called caam_jr.
1381 +if CRYPTO_DEV_FSL_CAAM_JR
1383 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1385 - depends on CRYPTO_DEV_FSL_CAAM_JR
1389 @@ -45,7 +63,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
1391 config CRYPTO_DEV_FSL_CAAM_INTC
1392 bool "Job Ring interrupt coalescing"
1393 - depends on CRYPTO_DEV_FSL_CAAM_JR
1395 Enable the Job Ring's interrupt coalescing feature.
1397 @@ -74,9 +91,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
1398 threshold. Range is 1-65535.
1400 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1401 - tristate "Register algorithm implementations with the Crypto API"
1402 - depends on CRYPTO_DEV_FSL_CAAM_JR
1403 + bool "Register algorithm implementations with the Crypto API"
1405 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1407 select CRYPTO_AUTHENC
1408 select CRYPTO_BLKCIPHER
1409 @@ -85,13 +102,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
1410 scatterlist crypto API (such as the linux native IPSec
1411 stack) to the SEC4 via job ring.
1413 - To compile this as a module, choose M here: the module
1414 - will be called caamalg.
1416 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1417 - tristate "Queue Interface as Crypto API backend"
1418 - depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
1419 + bool "Queue Interface as Crypto API backend"
1420 + depends on FSL_SDK_DPA && NET
1422 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1423 select CRYPTO_AUTHENC
1424 select CRYPTO_BLKCIPHER
1426 @@ -102,36 +117,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
1427 assigned to the kernel should also be more than the number of
1430 - To compile this as a module, choose M here: the module
1431 - will be called caamalg_qi.
1433 config CRYPTO_DEV_FSL_CAAM_AHASH_API
1434 - tristate "Register hash algorithm implementations with Crypto API"
1435 - depends on CRYPTO_DEV_FSL_CAAM_JR
1436 + bool "Register hash algorithm implementations with Crypto API"
1438 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1441 Selecting this will offload ahash for users of the
1442 scatterlist crypto API to the SEC4 via job ring.
1444 - To compile this as a module, choose M here: the module
1445 - will be called caamhash.
1447 config CRYPTO_DEV_FSL_CAAM_PKC_API
1448 - tristate "Register public key cryptography implementations with Crypto API"
1449 - depends on CRYPTO_DEV_FSL_CAAM_JR
1450 + bool "Register public key cryptography implementations with Crypto API"
1454 Selecting this will allow SEC Public key support for RSA.
1455 Supported cryptographic primitives: encryption, decryption,
1456 signature and verification.
1457 - To compile this as a module, choose M here: the module
1458 - will be called caam_pkc.
1460 config CRYPTO_DEV_FSL_CAAM_RNG_API
1461 - tristate "Register caam device for hwrng API"
1462 - depends on CRYPTO_DEV_FSL_CAAM_JR
1463 + bool "Register caam device for hwrng API"
1467 @@ -139,16 +144,24 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
1468 Selecting this will register the SEC4 hardware rng to
1469 the hw_random API for suppying the kernel entropy pool.
1471 - To compile this as a module, choose M here: the module
1472 - will be called caamrng.
1473 +endif # CRYPTO_DEV_FSL_CAAM_JR
1475 -config CRYPTO_DEV_FSL_CAAM_DEBUG
1476 - bool "Enable debug output in CAAM driver"
1477 - depends on CRYPTO_DEV_FSL_CAAM
1479 - Selecting this will enable printing of various debug
1480 - information in the CAAM driver.
1481 +endif # CRYPTO_DEV_FSL_CAAM
1483 -config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1484 - def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
1485 - CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
1486 +config CRYPTO_DEV_FSL_DPAA2_CAAM
1487 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
1488 + depends on FSL_MC_DPIO
1489 + select CRYPTO_DEV_FSL_CAAM_COMMON
1490 + select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
1491 + select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
1492 + select CRYPTO_BLKCIPHER
1493 + select CRYPTO_AUTHENC
1494 + select CRYPTO_AEAD
1495 + select CRYPTO_HASH
1497 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
1498 + It handles DPSECI DPAA2 objects that sit on the Management Complex
1501 + To compile this as a module, choose M here: the module
1502 + will be called dpaa2_caam.
1503 --- a/drivers/crypto/caam/Makefile
1504 +++ b/drivers/crypto/caam/Makefile
1505 @@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
1506 ccflags-y := -DDEBUG
1509 +ccflags-y += -DVERSION=\"\"
1511 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
1512 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
1513 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
1514 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1515 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1516 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
1517 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1518 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1519 -obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
1521 -caam-objs := ctrl.o
1522 -caam_jr-objs := jr.o key_gen.o error.o
1523 -caam_pkc-y := caampkc.o pkc_desc.o
1524 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
1527 +caam_jr-y := jr.o key_gen.o
1528 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
1529 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
1530 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
1531 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
1532 +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
1534 +caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
1535 ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
1536 ccflags-y += -DCONFIG_CAAM_QI
1540 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
1542 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
1543 --- a/drivers/crypto/caam/caamalg.c
1544 +++ b/drivers/crypto/caam/caamalg.c
1546 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
1549 +#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
1551 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
1552 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
1554 @@ -108,6 +110,7 @@ struct caam_ctx {
1555 dma_addr_t sh_desc_dec_dma;
1556 dma_addr_t sh_desc_givenc_dma;
1558 + enum dma_data_direction dir;
1559 struct device *jrdev;
1560 struct alginfo adata;
1561 struct alginfo cdata;
1562 @@ -118,6 +121,7 @@ static int aead_null_set_sh_desc(struct
1564 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1565 struct device *jrdev = ctx->jrdev;
1566 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1568 int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
1569 ctx->adata.keylen_pad;
1570 @@ -136,9 +140,10 @@ static int aead_null_set_sh_desc(struct
1572 /* aead_encrypt shared descriptor */
1573 desc = ctx->sh_desc_enc;
1574 - cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
1575 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
1577 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1578 - desc_bytes(desc), DMA_TO_DEVICE);
1579 + desc_bytes(desc), ctx->dir);
1582 * Job Descriptor and Shared Descriptors
1583 @@ -154,9 +159,10 @@ static int aead_null_set_sh_desc(struct
1585 /* aead_decrypt shared descriptor */
1586 desc = ctx->sh_desc_dec;
1587 - cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
1588 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
1590 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1591 - desc_bytes(desc), DMA_TO_DEVICE);
1592 + desc_bytes(desc), ctx->dir);
1596 @@ -168,6 +174,7 @@ static int aead_set_sh_desc(struct crypt
1597 unsigned int ivsize = crypto_aead_ivsize(aead);
1598 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1599 struct device *jrdev = ctx->jrdev;
1600 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1601 u32 ctx1_iv_off = 0;
1602 u32 *desc, *nonce = NULL;
1604 @@ -234,9 +241,9 @@ static int aead_set_sh_desc(struct crypt
1605 desc = ctx->sh_desc_enc;
1606 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
1607 ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
1609 + false, ctrlpriv->era);
1610 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1611 - desc_bytes(desc), DMA_TO_DEVICE);
1612 + desc_bytes(desc), ctx->dir);
1616 @@ -266,9 +273,9 @@ skip_enc:
1617 desc = ctx->sh_desc_dec;
1618 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
1619 ctx->authsize, alg->caam.geniv, is_rfc3686,
1620 - nonce, ctx1_iv_off, false);
1621 + nonce, ctx1_iv_off, false, ctrlpriv->era);
1622 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1623 - desc_bytes(desc), DMA_TO_DEVICE);
1624 + desc_bytes(desc), ctx->dir);
1626 if (!alg->caam.geniv)
1628 @@ -300,9 +307,9 @@ skip_enc:
1629 desc = ctx->sh_desc_enc;
1630 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
1631 ctx->authsize, is_rfc3686, nonce,
1632 - ctx1_iv_off, false);
1633 + ctx1_iv_off, false, ctrlpriv->era);
1634 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1635 - desc_bytes(desc), DMA_TO_DEVICE);
1636 + desc_bytes(desc), ctx->dir);
1640 @@ -323,6 +330,7 @@ static int gcm_set_sh_desc(struct crypto
1642 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1643 struct device *jrdev = ctx->jrdev;
1644 + unsigned int ivsize = crypto_aead_ivsize(aead);
1646 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1648 @@ -344,9 +352,9 @@ static int gcm_set_sh_desc(struct crypto
1651 desc = ctx->sh_desc_enc;
1652 - cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
1653 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1654 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1655 - desc_bytes(desc), DMA_TO_DEVICE);
1656 + desc_bytes(desc), ctx->dir);
1659 * Job Descriptor and Shared Descriptors
1660 @@ -361,9 +369,9 @@ static int gcm_set_sh_desc(struct crypto
1663 desc = ctx->sh_desc_dec;
1664 - cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
1665 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
1666 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1667 - desc_bytes(desc), DMA_TO_DEVICE);
1668 + desc_bytes(desc), ctx->dir);
1672 @@ -382,6 +390,7 @@ static int rfc4106_set_sh_desc(struct cr
1674 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1675 struct device *jrdev = ctx->jrdev;
1676 + unsigned int ivsize = crypto_aead_ivsize(aead);
1678 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1680 @@ -403,9 +412,10 @@ static int rfc4106_set_sh_desc(struct cr
1683 desc = ctx->sh_desc_enc;
1684 - cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
1685 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1687 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1688 - desc_bytes(desc), DMA_TO_DEVICE);
1689 + desc_bytes(desc), ctx->dir);
1692 * Job Descriptor and Shared Descriptors
1693 @@ -420,9 +430,10 @@ static int rfc4106_set_sh_desc(struct cr
1696 desc = ctx->sh_desc_dec;
1697 - cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
1698 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1700 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1701 - desc_bytes(desc), DMA_TO_DEVICE);
1702 + desc_bytes(desc), ctx->dir);
1706 @@ -442,6 +453,7 @@ static int rfc4543_set_sh_desc(struct cr
1708 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1709 struct device *jrdev = ctx->jrdev;
1710 + unsigned int ivsize = crypto_aead_ivsize(aead);
1712 int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
1714 @@ -463,9 +475,10 @@ static int rfc4543_set_sh_desc(struct cr
1717 desc = ctx->sh_desc_enc;
1718 - cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
1719 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
1721 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1722 - desc_bytes(desc), DMA_TO_DEVICE);
1723 + desc_bytes(desc), ctx->dir);
1726 * Job Descriptor and Shared Descriptors
1727 @@ -480,9 +493,10 @@ static int rfc4543_set_sh_desc(struct cr
1730 desc = ctx->sh_desc_dec;
1731 - cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
1732 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
1734 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1735 - desc_bytes(desc), DMA_TO_DEVICE);
1736 + desc_bytes(desc), ctx->dir);
1740 @@ -498,11 +512,67 @@ static int rfc4543_setauthsize(struct cr
1744 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
1746 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1747 + struct device *jrdev = ctx->jrdev;
1748 + unsigned int ivsize = crypto_aead_ivsize(aead);
1751 + if (!ctx->cdata.keylen || !ctx->authsize)
1754 + desc = ctx->sh_desc_enc;
1755 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1756 + ctx->authsize, true, false);
1757 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1758 + desc_bytes(desc), ctx->dir);
1760 + desc = ctx->sh_desc_dec;
1761 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
1762 + ctx->authsize, false, false);
1763 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1764 + desc_bytes(desc), ctx->dir);
1769 +static int chachapoly_setauthsize(struct crypto_aead *aead,
1770 + unsigned int authsize)
1772 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1774 + if (authsize != POLY1305_DIGEST_SIZE)
1777 + ctx->authsize = authsize;
1778 + return chachapoly_set_sh_desc(aead);
1781 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
1782 + unsigned int keylen)
1784 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
1785 + unsigned int ivsize = crypto_aead_ivsize(aead);
1786 + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
1788 + if (keylen != CHACHA20_KEY_SIZE + saltlen) {
1789 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
1793 + ctx->cdata.key_virt = key;
1794 + ctx->cdata.keylen = keylen - saltlen;
1796 + return chachapoly_set_sh_desc(aead);
1799 static int aead_setkey(struct crypto_aead *aead,
1800 const u8 *key, unsigned int keylen)
1802 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1803 struct device *jrdev = ctx->jrdev;
1804 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
1805 struct crypto_authenc_keys keys;
1808 @@ -517,6 +587,27 @@ static int aead_setkey(struct crypto_aea
1809 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
1813 + * If DKP is supported, use it in the shared descriptor to generate
1816 + if (ctrlpriv->era >= 6) {
1817 + ctx->adata.keylen = keys.authkeylen;
1818 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
1819 + OP_ALG_ALGSEL_MASK);
1821 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
1824 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
1825 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
1827 + dma_sync_single_for_device(jrdev, ctx->key_dma,
1828 + ctx->adata.keylen_pad +
1829 + keys.enckeylen, ctx->dir);
1830 + goto skip_split_key;
1833 ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
1834 keys.authkeylen, CAAM_MAX_KEY_SIZE -
1836 @@ -527,12 +618,14 @@ static int aead_setkey(struct crypto_aea
1837 /* postpend encryption key to auth split key */
1838 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
1839 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
1840 - keys.enckeylen, DMA_TO_DEVICE);
1841 + keys.enckeylen, ctx->dir);
1843 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
1844 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
1845 ctx->adata.keylen_pad + keys.enckeylen, 1);
1849 ctx->cdata.keylen = keys.enckeylen;
1850 return aead_set_sh_desc(aead);
1852 @@ -552,7 +645,7 @@ static int gcm_setkey(struct crypto_aead
1855 memcpy(ctx->key, key, keylen);
1856 - dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
1857 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
1858 ctx->cdata.keylen = keylen;
1860 return gcm_set_sh_desc(aead);
1861 @@ -580,7 +673,7 @@ static int rfc4106_setkey(struct crypto_
1863 ctx->cdata.keylen = keylen - 4;
1864 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1867 return rfc4106_set_sh_desc(aead);
1870 @@ -606,7 +699,7 @@ static int rfc4543_setkey(struct crypto_
1872 ctx->cdata.keylen = keylen - 4;
1873 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
1876 return rfc4543_set_sh_desc(aead);
1879 @@ -658,21 +751,21 @@ static int ablkcipher_setkey(struct cryp
1880 cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
1882 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1883 - desc_bytes(desc), DMA_TO_DEVICE);
1884 + desc_bytes(desc), ctx->dir);
1886 /* ablkcipher_decrypt shared descriptor */
1887 desc = ctx->sh_desc_dec;
1888 cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
1890 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1891 - desc_bytes(desc), DMA_TO_DEVICE);
1892 + desc_bytes(desc), ctx->dir);
1894 /* ablkcipher_givencrypt shared descriptor */
1895 desc = ctx->sh_desc_givenc;
1896 cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
1898 dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
1899 - desc_bytes(desc), DMA_TO_DEVICE);
1900 + desc_bytes(desc), ctx->dir);
1904 @@ -701,13 +794,13 @@ static int xts_ablkcipher_setkey(struct
1905 desc = ctx->sh_desc_enc;
1906 cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
1907 dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
1908 - desc_bytes(desc), DMA_TO_DEVICE);
1909 + desc_bytes(desc), ctx->dir);
1911 /* xts_ablkcipher_decrypt shared descriptor */
1912 desc = ctx->sh_desc_dec;
1913 cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
1914 dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
1915 - desc_bytes(desc), DMA_TO_DEVICE);
1916 + desc_bytes(desc), ctx->dir);
1920 @@ -989,9 +1082,6 @@ static void init_aead_job(struct aead_re
1921 append_seq_out_ptr(desc, dst_dma,
1922 req->assoclen + req->cryptlen - authsize,
1925 - /* REG3 = assoclen */
1926 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1929 static void init_gcm_job(struct aead_request *req,
1930 @@ -1006,6 +1096,7 @@ static void init_gcm_job(struct aead_req
1933 init_aead_job(req, edesc, all_contig, encrypt);
1934 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1936 /* BUG This should not be specific to generic GCM. */
1938 @@ -1023,6 +1114,40 @@ static void init_gcm_job(struct aead_req
1939 /* End of blank commands */
1942 +static void init_chachapoly_job(struct aead_request *req,
1943 + struct aead_edesc *edesc, bool all_contig,
1946 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
1947 + unsigned int ivsize = crypto_aead_ivsize(aead);
1948 + unsigned int assoclen = req->assoclen;
1949 + u32 *desc = edesc->hw_desc;
1950 + u32 ctx_iv_off = 4;
1952 + init_aead_job(req, edesc, all_contig, encrypt);
1954 + if (ivsize != CHACHAPOLY_IV_SIZE) {
1955 + /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1959 + * The associated data comes already with the IV but we need
1960 + * to skip it when we authenticate or encrypt...
1962 + assoclen -= ivsize;
1965 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
1968 + * For IPsec load the IV further in the same register.
1969 + * For RFC7539 simply load the 12 bytes nonce in a single operation
1971 + append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
1972 + LDST_SRCDST_BYTE_CONTEXT |
1973 + ctx_iv_off << LDST_OFFSET_SHIFT);
1976 static void init_authenc_job(struct aead_request *req,
1977 struct aead_edesc *edesc,
1978 bool all_contig, bool encrypt)
1979 @@ -1032,6 +1157,7 @@ static void init_authenc_job(struct aead
1980 struct caam_aead_alg, aead);
1981 unsigned int ivsize = crypto_aead_ivsize(aead);
1982 struct caam_ctx *ctx = crypto_aead_ctx(aead);
1983 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
1984 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
1985 OP_ALG_AAI_CTR_MOD128);
1986 const bool is_rfc3686 = alg->caam.rfc3686;
1987 @@ -1055,6 +1181,15 @@ static void init_authenc_job(struct aead
1989 init_aead_job(req, edesc, all_contig, encrypt);
1992 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
1993 + * having DPOVRD as destination.
1995 + if (ctrlpriv->era < 3)
1996 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
1998 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
2000 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
2001 append_load_as_imm(desc, req->iv, ivsize,
2003 @@ -1227,8 +1362,16 @@ static struct aead_edesc *aead_edesc_all
2008 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2009 + * the end of the table by allocating more S/G entries.
2011 sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
2012 - sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2013 + if (mapped_dst_nents > 1)
2014 + sec4_sg_len += ALIGN(mapped_dst_nents, 4);
2016 + sec4_sg_len = ALIGN(sec4_sg_len, 4);
2018 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
2020 /* allocate space for base edesc and hw desc commands, link tables */
2021 @@ -1309,6 +1452,72 @@ static int gcm_encrypt(struct aead_reque
2025 +static int chachapoly_encrypt(struct aead_request *req)
2027 + struct aead_edesc *edesc;
2028 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
2029 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
2030 + struct device *jrdev = ctx->jrdev;
2035 + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2037 + if (IS_ERR(edesc))
2038 + return PTR_ERR(edesc);
2040 + desc = edesc->hw_desc;
2042 + init_chachapoly_job(req, edesc, all_contig, true);
2043 + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2044 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2047 + ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
2049 + ret = -EINPROGRESS;
2051 + aead_unmap(jrdev, edesc, req);
2058 +static int chachapoly_decrypt(struct aead_request *req)
2060 + struct aead_edesc *edesc;
2061 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
2062 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
2063 + struct device *jrdev = ctx->jrdev;
2068 + edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
2070 + if (IS_ERR(edesc))
2071 + return PTR_ERR(edesc);
2073 + desc = edesc->hw_desc;
2075 + init_chachapoly_job(req, edesc, all_contig, false);
2076 + print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
2077 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
2080 + ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
2082 + ret = -EINPROGRESS;
2084 + aead_unmap(jrdev, edesc, req);
2091 static int ipsec_gcm_encrypt(struct aead_request *req)
2093 if (req->assoclen < 8)
2094 @@ -1496,7 +1705,25 @@ static struct ablkcipher_edesc *ablkciph
2096 sec4_sg_ents = 1 + mapped_src_nents;
2097 dst_sg_idx = sec4_sg_ents;
2098 - sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
2101 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
2102 + * the end of the table by allocating more S/G entries. Logic:
2103 + * if (src != dst && output S/G)
2104 + * pad output S/G, if needed
2105 + * else if (src == dst && S/G)
2106 + * overlapping S/Gs; pad one of them
2107 + * else if (input S/G) ...
2108 + * pad input S/G, if needed
2110 + if (mapped_dst_nents > 1)
2111 + sec4_sg_ents += ALIGN(mapped_dst_nents, 4);
2112 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
2113 + sec4_sg_ents = max(ALIGN(sec4_sg_ents, 4),
2114 + 1 + ALIGN(mapped_src_nents, 4));
2116 + sec4_sg_ents = ALIGN(sec4_sg_ents, 4);
2118 sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
2121 @@ -3199,6 +3426,50 @@ static struct caam_aead_alg driver_aeads
2128 + .cra_name = "rfc7539(chacha20,poly1305)",
2129 + .cra_driver_name = "rfc7539-chacha20-poly1305-"
2131 + .cra_blocksize = 1,
2133 + .setkey = chachapoly_setkey,
2134 + .setauthsize = chachapoly_setauthsize,
2135 + .encrypt = chachapoly_encrypt,
2136 + .decrypt = chachapoly_decrypt,
2137 + .ivsize = CHACHAPOLY_IV_SIZE,
2138 + .maxauthsize = POLY1305_DIGEST_SIZE,
2141 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2143 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2150 + .cra_name = "rfc7539esp(chacha20,poly1305)",
2151 + .cra_driver_name = "rfc7539esp-chacha20-"
2153 + .cra_blocksize = 1,
2155 + .setkey = chachapoly_setkey,
2156 + .setauthsize = chachapoly_setauthsize,
2157 + .encrypt = chachapoly_encrypt,
2158 + .decrypt = chachapoly_decrypt,
2160 + .maxauthsize = POLY1305_DIGEST_SIZE,
2163 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2165 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2171 struct caam_crypto_alg {
2172 @@ -3207,9 +3478,11 @@ struct caam_crypto_alg {
2173 struct caam_alg_entry caam;
2176 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
2177 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
2180 dma_addr_t dma_addr;
2181 + struct caam_drv_private *priv;
2183 ctx->jrdev = caam_jr_alloc();
2184 if (IS_ERR(ctx->jrdev)) {
2185 @@ -3217,10 +3490,16 @@ static int caam_init_common(struct caam_
2186 return PTR_ERR(ctx->jrdev);
2189 + priv = dev_get_drvdata(ctx->jrdev->parent);
2190 + if (priv->era >= 6 && uses_dkp)
2191 + ctx->dir = DMA_BIDIRECTIONAL;
2193 + ctx->dir = DMA_TO_DEVICE;
2195 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
2196 offsetof(struct caam_ctx,
2198 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2199 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2200 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
2201 dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
2202 caam_jr_free(ctx->jrdev);
2203 @@ -3248,7 +3527,7 @@ static int caam_cra_init(struct crypto_t
2204 container_of(alg, struct caam_crypto_alg, crypto_alg);
2205 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2207 - return caam_init_common(ctx, &caam_alg->caam);
2208 + return caam_init_common(ctx, &caam_alg->caam, false);
2211 static int caam_aead_init(struct crypto_aead *tfm)
2212 @@ -3258,14 +3537,15 @@ static int caam_aead_init(struct crypto_
2213 container_of(alg, struct caam_aead_alg, aead);
2214 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
2216 - return caam_init_common(ctx, &caam_alg->caam);
2217 + return caam_init_common(ctx, &caam_alg->caam,
2218 + alg->setkey == aead_setkey);
2221 static void caam_exit_common(struct caam_ctx *ctx)
2223 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
2224 offsetof(struct caam_ctx, sh_desc_enc_dma),
2225 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
2226 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
2227 caam_jr_free(ctx->jrdev);
2230 @@ -3279,7 +3559,7 @@ static void caam_aead_exit(struct crypto
2231 caam_exit_common(crypto_aead_ctx(tfm));
2234 -static void __exit caam_algapi_exit(void)
2235 +void caam_algapi_exit(void)
2238 struct caam_crypto_alg *t_alg, *n;
2239 @@ -3358,56 +3638,52 @@ static void caam_aead_alg_init(struct ca
2240 alg->exit = caam_aead_exit;
2243 -static int __init caam_algapi_init(void)
2244 +int caam_algapi_init(struct device *ctrldev)
2246 - struct device_node *dev_node;
2247 - struct platform_device *pdev;
2248 - struct device *ctrldev;
2249 - struct caam_drv_private *priv;
2250 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
2252 - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
2253 + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
2254 unsigned int md_limit = SHA512_DIGEST_SIZE;
2255 bool registered = false;
2257 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2259 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2264 - pdev = of_find_device_by_node(dev_node);
2266 - of_node_put(dev_node);
2270 - ctrldev = &pdev->dev;
2271 - priv = dev_get_drvdata(ctrldev);
2272 - of_node_put(dev_node);
2275 - * If priv is NULL, it's probably because the caam driver wasn't
2276 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2282 INIT_LIST_HEAD(&alg_list);
2285 * Register crypto algorithms the device supports.
2286 * First, detect presence and attributes of DES, AES, and MD blocks.
2288 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2289 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2290 - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
2291 - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
2292 - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2293 + if (priv->era < 10) {
2294 + u32 cha_vid, cha_inst;
2296 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
2297 + aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
2298 + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2300 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
2301 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
2302 + CHA_ID_LS_DES_SHIFT;
2303 + aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
2304 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
2310 + aesa = rd_reg32(&priv->ctrl->vreg.aesa);
2311 + mdha = rd_reg32(&priv->ctrl->vreg.mdha);
2313 + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2314 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
2316 + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
2317 + aes_inst = aesa & CHA_VER_NUM_MASK;
2318 + md_inst = mdha & CHA_VER_NUM_MASK;
2319 + ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
2320 + ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
2323 /* If MD is present, limit digest size based on LP256 */
2324 - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
2325 + if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
2326 md_limit = SHA256_DIGEST_SIZE;
2328 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2329 @@ -3429,10 +3705,10 @@ static int __init caam_algapi_init(void)
2330 * Check support for AES modes not available
2333 - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2334 - if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2337 + if (aes_vid == CHA_VER_VID_AES_LP &&
2338 + (alg->class1_alg_type & OP_ALG_AAI_MASK) ==
2342 t_alg = caam_alg_alloc(alg);
2343 if (IS_ERR(t_alg)) {
2344 @@ -3471,21 +3747,28 @@ static int __init caam_algapi_init(void)
2345 if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
2348 + /* Skip CHACHA20 algorithms if not supported by device */
2349 + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
2352 + /* Skip POLY1305 algorithms if not supported by device */
2353 + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
2357 * Check support for AES algorithms not available
2360 - if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
2361 - if (alg_aai == OP_ALG_AAI_GCM)
2363 + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
2367 * Skip algorithms requiring message digests
2368 * if MD or MD size is not supported by device.
2371 - (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
2373 + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
2374 + (!md_inst || t_alg->aead.maxauthsize > md_limit))
2377 caam_aead_alg_init(t_alg);
2379 @@ -3505,10 +3788,3 @@ static int __init caam_algapi_init(void)
2384 -module_init(caam_algapi_init);
2385 -module_exit(caam_algapi_exit);
2387 -MODULE_LICENSE("GPL");
2388 -MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2389 -MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2390 --- a/drivers/crypto/caam/caamalg_desc.c
2391 +++ b/drivers/crypto/caam/caamalg_desc.c
2392 @@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
2393 * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
2394 * (non-protocol) with no (null) encryption.
2395 * @desc: pointer to buffer used for descriptor construction
2396 - * @adata: pointer to authentication transform definitions. Note that since a
2397 - * split key is to be used, the size of the split key itself is
2398 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2399 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2400 + * @adata: pointer to authentication transform definitions.
2401 + * A split key is required for SEC Era < 6; the size of the split key
2402 + * is specified in this case. Valid algorithm values - one of
2403 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2404 + * with OP_ALG_AAI_HMAC_PRECOMP.
2405 * @icvsize: integrity check value (ICV) size (truncated or full)
2407 - * Note: Requires an MDHA split key.
2410 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
2411 - unsigned int icvsize)
2412 + unsigned int icvsize, int era)
2414 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
2416 @@ -63,13 +63,18 @@ void cnstr_shdsc_aead_null_encap(u32 * c
2417 /* Skip if already shared */
2418 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2420 - if (adata->key_inline)
2421 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2422 - adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
2425 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2426 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2428 + if (adata->key_inline)
2429 + append_key_as_imm(desc, adata->key_virt,
2430 + adata->keylen_pad, adata->keylen,
2431 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2434 + append_key(desc, adata->key_dma, adata->keylen,
2435 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2437 + append_proto_dkp(desc, adata);
2439 set_jump_tgt_here(desc, key_jump_cmd);
2441 /* assoclen + cryptlen = seqinlen */
2442 @@ -121,16 +126,16 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_enca
2443 * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
2444 * (non-protocol) with no (null) decryption.
2445 * @desc: pointer to buffer used for descriptor construction
2446 - * @adata: pointer to authentication transform definitions. Note that since a
2447 - * split key is to be used, the size of the split key itself is
2448 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2449 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2450 + * @adata: pointer to authentication transform definitions.
2451 + * A split key is required for SEC Era < 6; the size of the split key
2452 + * is specified in this case. Valid algorithm values - one of
2453 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2454 + * with OP_ALG_AAI_HMAC_PRECOMP.
2455 * @icvsize: integrity check value (ICV) size (truncated or full)
2457 - * Note: Requires an MDHA split key.
2460 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
2461 - unsigned int icvsize)
2462 + unsigned int icvsize, int era)
2464 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
2466 @@ -139,13 +144,18 @@ void cnstr_shdsc_aead_null_decap(u32 * c
2467 /* Skip if already shared */
2468 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2470 - if (adata->key_inline)
2471 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2472 - adata->keylen, CLASS_2 |
2473 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2475 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2476 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2478 + if (adata->key_inline)
2479 + append_key_as_imm(desc, adata->key_virt,
2480 + adata->keylen_pad, adata->keylen,
2481 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2484 + append_key(desc, adata->key_dma, adata->keylen,
2485 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2487 + append_proto_dkp(desc, adata);
2489 set_jump_tgt_here(desc, key_jump_cmd);
2491 /* Class 2 operation */
2492 @@ -204,7 +214,7 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_null_deca
2493 static void init_sh_desc_key_aead(u32 * const desc,
2494 struct alginfo * const cdata,
2495 struct alginfo * const adata,
2496 - const bool is_rfc3686, u32 *nonce)
2497 + const bool is_rfc3686, u32 *nonce, int era)
2500 unsigned int enckeylen = cdata->keylen;
2501 @@ -224,13 +234,18 @@ static void init_sh_desc_key_aead(u32 *
2503 enckeylen -= CTR_RFC3686_NONCE_SIZE;
2505 - if (adata->key_inline)
2506 - append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
2507 - adata->keylen, CLASS_2 |
2508 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2510 - append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2511 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
2513 + if (adata->key_inline)
2514 + append_key_as_imm(desc, adata->key_virt,
2515 + adata->keylen_pad, adata->keylen,
2516 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2519 + append_key(desc, adata->key_dma, adata->keylen,
2520 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2522 + append_proto_dkp(desc, adata);
2525 if (cdata->key_inline)
2526 append_key_as_imm(desc, cdata->key_virt, enckeylen,
2527 @@ -261,26 +276,27 @@ static void init_sh_desc_key_aead(u32 *
2528 * @cdata: pointer to block cipher transform definitions
2529 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2530 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2531 - * @adata: pointer to authentication transform definitions. Note that since a
2532 - * split key is to be used, the size of the split key itself is
2533 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2534 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2535 + * @adata: pointer to authentication transform definitions.
2536 + * A split key is required for SEC Era < 6; the size of the split key
2537 + * is specified in this case. Valid algorithm values - one of
2538 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2539 + * with OP_ALG_AAI_HMAC_PRECOMP.
2540 * @ivsize: initialization vector size
2541 * @icvsize: integrity check value (ICV) size (truncated or full)
2542 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2543 * @nonce: pointer to rfc3686 nonce
2544 * @ctx1_iv_off: IV offset in CONTEXT1 register
2545 * @is_qi: true when called from caam/qi
2547 - * Note: Requires an MDHA split key.
2550 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
2551 struct alginfo *adata, unsigned int ivsize,
2552 unsigned int icvsize, const bool is_rfc3686,
2553 - u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
2554 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
2557 /* Note: Context registers are saved. */
2558 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2559 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2561 /* Class 2 operation */
2562 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2563 @@ -306,8 +322,13 @@ void cnstr_shdsc_aead_encap(u32 * const
2566 /* Read and write assoclen bytes */
2567 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2568 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2569 + if (is_qi || era < 3) {
2570 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2571 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2573 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2574 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2577 /* Skip assoc data */
2578 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2579 @@ -350,27 +371,27 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
2580 * @cdata: pointer to block cipher transform definitions
2581 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2582 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2583 - * @adata: pointer to authentication transform definitions. Note that since a
2584 - * split key is to be used, the size of the split key itself is
2585 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2586 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2587 + * @adata: pointer to authentication transform definitions.
2588 + * A split key is required for SEC Era < 6; the size of the split key
2589 + * is specified in this case. Valid algorithm values - one of
2590 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2591 + * with OP_ALG_AAI_HMAC_PRECOMP.
2592 * @ivsize: initialization vector size
2593 * @icvsize: integrity check value (ICV) size (truncated or full)
2594 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2595 * @nonce: pointer to rfc3686 nonce
2596 * @ctx1_iv_off: IV offset in CONTEXT1 register
2597 * @is_qi: true when called from caam/qi
2599 - * Note: Requires an MDHA split key.
2602 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
2603 struct alginfo *adata, unsigned int ivsize,
2604 unsigned int icvsize, const bool geniv,
2605 const bool is_rfc3686, u32 *nonce,
2606 - const u32 ctx1_iv_off, const bool is_qi)
2607 + const u32 ctx1_iv_off, const bool is_qi, int era)
2609 /* Note: Context registers are saved. */
2610 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2611 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2613 /* Class 2 operation */
2614 append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2615 @@ -397,11 +418,23 @@ void cnstr_shdsc_aead_decap(u32 * const
2618 /* Read and write assoclen bytes */
2619 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2621 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
2623 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2624 + if (is_qi || era < 3) {
2625 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2627 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
2630 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
2633 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2635 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
2638 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
2642 /* Skip assoc data */
2643 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2644 @@ -456,30 +489,29 @@ EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
2645 * @cdata: pointer to block cipher transform definitions
2646 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
2647 * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
2648 - * @adata: pointer to authentication transform definitions. Note that since a
2649 - * split key is to be used, the size of the split key itself is
2650 - * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
2651 - * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2652 - * @ivsize: initialization vector size
2653 + * @adata: pointer to authentication transform definitions.
2654 + * A split key is required for SEC Era < 6; the size of the split key
2655 + * is specified in this case. Valid algorithm values - one of
2656 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
2657 + * with OP_ALG_AAI_HMAC_PRECOMP. * @ivsize: initialization vector size
2658 * @icvsize: integrity check value (ICV) size (truncated or full)
2659 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
2660 * @nonce: pointer to rfc3686 nonce
2661 * @ctx1_iv_off: IV offset in CONTEXT1 register
2662 * @is_qi: true when called from caam/qi
2664 - * Note: Requires an MDHA split key.
2667 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
2668 struct alginfo *adata, unsigned int ivsize,
2669 unsigned int icvsize, const bool is_rfc3686,
2670 u32 *nonce, const u32 ctx1_iv_off,
2672 + const bool is_qi, int era)
2677 /* Note: Context registers are saved. */
2678 - init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
2679 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
2683 @@ -529,8 +561,13 @@ copy_iv:
2686 /* Read and write assoclen bytes */
2687 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2688 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2689 + if (is_qi || era < 3) {
2690 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
2691 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
2693 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2694 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
2697 /* Skip assoc data */
2698 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
2699 @@ -592,14 +629,431 @@ copy_iv:
2700 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
2703 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
2704 + * @desc: pointer to buffer used for descriptor construction
2705 + * @cdata: pointer to block cipher transform definitions
2706 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2707 + * with OP_ALG_AAI_CBC
2708 + * @adata: pointer to authentication transform definitions.
2709 + * A split key is required for SEC Era < 6; the size of the split key
2710 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2711 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2712 + * @assoclen: associated data length
2713 + * @ivsize: initialization vector size
2714 + * @authsize: authentication data size
2715 + * @blocksize: block cipher size
2718 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
2719 + struct alginfo *adata, unsigned int assoclen,
2720 + unsigned int ivsize, unsigned int authsize,
2721 + unsigned int blocksize, int era)
2723 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
2724 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
2727 + * Compute the index (in bytes) for the LOAD with destination of
2728 + * Class 1 Data Size Register and for the LOAD that generates padding
2730 + if (adata->key_inline) {
2731 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2732 + cdata->keylen - 4 * CAAM_CMD_SZ;
2733 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
2734 + cdata->keylen - 2 * CAAM_CMD_SZ;
2736 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2738 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
2742 + stidx = 1 << HDR_START_IDX_SHIFT;
2743 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2745 + /* skip key loading if they are loaded due to sharing */
2746 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2750 + if (adata->key_inline)
2751 + append_key_as_imm(desc, adata->key_virt,
2752 + adata->keylen_pad, adata->keylen,
2753 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
2756 + append_key(desc, adata->key_dma, adata->keylen,
2757 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
2759 + append_proto_dkp(desc, adata);
2762 + if (cdata->key_inline)
2763 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
2764 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
2766 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2767 + KEY_DEST_CLASS_REG);
2769 + set_jump_tgt_here(desc, key_jump_cmd);
2771 + /* class 2 operation */
2772 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2774 + /* class 1 operation */
2775 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2778 + /* payloadlen = input data length - (assoclen + ivlen) */
2779 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
2781 + /* math1 = payloadlen + icvlen */
2782 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
2784 + /* padlen = block_size - math1 % block_size */
2785 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
2786 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
2788 + /* cryptlen = payloadlen + icvlen + padlen */
2789 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
2792 + * update immediate data with the padding length value
2793 + * for the LOAD in the class 1 data size register.
2795 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2796 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
2797 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2798 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
2800 + /* overwrite PL field for the padding iNFO FIFO entry */
2801 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
2802 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
2803 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
2804 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
2806 + /* store encrypted payload, icv and padding */
2807 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2809 + /* if payload length is zero, jump to zero-payload commands */
2810 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
2811 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2812 + JUMP_COND_MATH_Z);
2814 + /* load iv in context1 */
2815 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2816 + LDST_CLASS_1_CCB | ivsize);
2818 + /* read assoc for authentication */
2819 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2821 + /* insnoop payload */
2822 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
2823 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
2825 + /* jump the zero-payload commands */
2826 + append_jump(desc, JUMP_TEST_ALL | 3);
2828 + /* zero-payload commands */
2829 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
2831 + /* load iv in context1 */
2832 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2833 + LDST_CLASS_1_CCB | ivsize);
2835 + /* assoc data is the only data for authentication */
2836 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
2837 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
2839 + /* send icv to encryption */
2840 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
2843 + /* update class 1 data size register with padding length */
2844 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
2845 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
2847 + /* generate padding and send it to encryption */
2848 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
2849 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
2850 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
2851 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2854 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
2855 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
2856 + desc_bytes(desc), 1);
2859 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
2862 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
2863 + * @desc: pointer to buffer used for descriptor construction
2864 + * @cdata: pointer to block cipher transform definitions
2865 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
2866 + * with OP_ALG_AAI_CBC
2867 + * @adata: pointer to authentication transform definitions.
2868 + * A split key is required for SEC Era < 6; the size of the split key
2869 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
2870 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
2871 + * @assoclen: associated data length
2872 + * @ivsize: initialization vector size
2873 + * @authsize: authentication data size
2874 + * @blocksize: block cipher size
2877 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
2878 + struct alginfo *adata, unsigned int assoclen,
2879 + unsigned int ivsize, unsigned int authsize,
2880 + unsigned int blocksize, int era)
2882 + u32 stidx, jumpback;
2883 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
2885 + * Pointer Size bool determines the size of address pointers.
2886 + * false - Pointers fit in one 32-bit word.
2887 + * true - Pointers fit in two 32-bit words.
2889 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
2891 + stidx = 1 << HDR_START_IDX_SHIFT;
2892 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
2894 + /* skip key loading if they are loaded due to sharing */
2895 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2899 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
2900 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
2902 + append_proto_dkp(desc, adata);
2904 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
2905 + KEY_DEST_CLASS_REG);
2907 + set_jump_tgt_here(desc, key_jump_cmd);
2909 + /* class 2 operation */
2910 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
2911 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
2912 + /* class 1 operation */
2913 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
2916 + /* VSIL = input data length - 2 * block_size */
2917 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
2921 + * payloadlen + icvlen + padlen = input data length - (assoclen +
2924 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
2926 + /* skip data to the last but one cipher block */
2927 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
2929 + /* load iv for the last cipher block */
2930 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
2931 + LDST_CLASS_1_CCB | ivsize);
2933 + /* read last cipher block */
2934 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
2935 + FIFOLD_TYPE_LAST1 | blocksize);
2937 + /* move decrypted block into math0 and math1 */
2938 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
2941 + /* reset AES CHA */
2942 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
2943 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
2945 + /* rewind input sequence */
2946 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
2948 + /* key1 is in decryption form */
2949 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
2950 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
2952 + /* load iv in context1 */
2953 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
2954 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
2956 + /* read sequence number */
2957 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
2958 + /* load Type, Version and Len fields in math0 */
2959 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
2960 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
2962 + /* compute (padlen - 1) */
2963 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
2965 + /* math2 = icvlen + (padlen - 1) + 1 */
2966 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
2968 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
2970 + /* VSOL = payloadlen + icvlen + padlen */
2971 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
2973 + if (caam_little_end)
2974 + append_moveb(desc, MOVE_WAITCOMP |
2975 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
2977 + /* update Len field */
2978 + append_math_sub(desc, REG0, REG0, REG2, 8);
2980 + /* store decrypted payload, icv and padding */
2981 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
2983 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
2984 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
2986 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
2987 + JUMP_COND_MATH_Z);
2989 + /* send Type, Version and Len(pre ICV) fields to authentication */
2990 + append_move(desc, MOVE_WAITCOMP |
2991 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
2992 + (3 << MOVE_OFFSET_SHIFT) | 5);
2994 + /* outsnooping payload */
2995 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
2996 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
2998 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
3000 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
3001 + /* send Type, Version and Len(pre ICV) fields to authentication */
3002 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
3003 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
3004 + (3 << MOVE_OFFSET_SHIFT) | 5);
3006 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
3007 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
3009 + /* load icvlen and padlen */
3010 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
3011 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
3013 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
3014 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
3017 + * Start a new input sequence using the SEQ OUT PTR command options,
3018 + * pointer and length used when the current output sequence was defined.
3022 + * Move the lower 32 bits of Shared Descriptor address, the
3023 + * SEQ OUT PTR command, Output Pointer (2 words) and
3024 + * Output Length into math registers.
3026 + if (caam_little_end)
3027 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3029 + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
3031 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3033 + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
3035 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3036 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
3037 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
3038 + /* Append a JUMP command after the copied fields */
3039 + jumpback = CMD_JUMP | (char)-9;
3040 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3041 + LDST_SRCDST_WORD_DECO_MATH2 |
3042 + (4 << LDST_OFFSET_SHIFT));
3043 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3044 + /* Move the updated fields back to the Job Descriptor */
3045 + if (caam_little_end)
3046 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3047 + MOVE_DEST_DESCBUF |
3048 + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
3050 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3051 + MOVE_DEST_DESCBUF |
3052 + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
3055 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
3056 + * and then jump back to the next command from the
3057 + * Shared Descriptor.
3059 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
3062 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
3063 + * Output Length into math registers.
3065 + if (caam_little_end)
3066 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3068 + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
3070 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
3072 + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
3074 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
3075 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
3076 + ~(((u64)(CMD_SEQ_IN_PTR ^
3077 + CMD_SEQ_OUT_PTR)) << 32));
3078 + /* Append a JUMP command after the copied fields */
3079 + jumpback = CMD_JUMP | (char)-7;
3080 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
3081 + LDST_SRCDST_WORD_DECO_MATH1 |
3082 + (4 << LDST_OFFSET_SHIFT));
3083 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
3084 + /* Move the updated fields back to the Job Descriptor */
3085 + if (caam_little_end)
3086 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3087 + MOVE_DEST_DESCBUF |
3088 + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
3090 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
3091 + MOVE_DEST_DESCBUF |
3092 + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
3095 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
3096 + * and then jump back to the next command from the
3097 + * Shared Descriptor.
3099 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
3102 + /* skip payload */
3103 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
3105 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
3106 + FIFOLD_TYPE_LAST2 | authsize);
3109 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
3110 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
3111 + desc_bytes(desc), 1);
3114 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
3117 * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
3118 * @desc: pointer to buffer used for descriptor construction
3119 * @cdata: pointer to block cipher transform definitions
3120 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3121 + * @ivsize: initialization vector size
3122 * @icvsize: integrity check value (ICV) size (truncated or full)
3123 + * @is_qi: true when called from caam/qi
3125 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3126 - unsigned int icvsize)
3127 + unsigned int ivsize, unsigned int icvsize,
3130 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
3131 *zero_assoc_jump_cmd2;
3132 @@ -621,11 +1075,35 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3133 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3137 + u32 *wait_load_cmd;
3139 + /* REG3 = assoclen */
3140 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3141 + LDST_SRCDST_WORD_DECO_MATH3 |
3142 + (4 << LDST_OFFSET_SHIFT));
3144 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3145 + JUMP_COND_CALM | JUMP_COND_NCP |
3146 + JUMP_COND_NOP | JUMP_COND_NIP |
3148 + set_jump_tgt_here(desc, wait_load_cmd);
3150 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
3153 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3157 /* if assoclen + cryptlen is ZERO, skip to ICV write */
3158 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3159 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
3163 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3164 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3166 /* if assoclen is ZERO, skip reading the assoc data */
3167 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3168 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3169 @@ -657,8 +1135,11 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3170 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3171 FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
3173 - /* jump the zero-payload commands */
3174 - append_jump(desc, JUMP_TEST_ALL | 2);
3175 + /* jump to ICV writing */
3177 + append_jump(desc, JUMP_TEST_ALL | 4);
3179 + append_jump(desc, JUMP_TEST_ALL | 2);
3181 /* zero-payload commands */
3182 set_jump_tgt_here(desc, zero_payload_jump_cmd);
3183 @@ -666,10 +1147,18 @@ void cnstr_shdsc_gcm_encap(u32 * const d
3184 /* read assoc data */
3185 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
3186 FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
3188 + /* jump to ICV writing */
3189 + append_jump(desc, JUMP_TEST_ALL | 2);
3191 /* There is no input data */
3192 set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
3195 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3196 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
3197 + FIFOLD_TYPE_LAST1);
3200 append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
3201 LDST_SRCDST_BYTE_CONTEXT);
3202 @@ -686,10 +1175,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
3203 * @desc: pointer to buffer used for descriptor construction
3204 * @cdata: pointer to block cipher transform definitions
3205 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3206 + * @ivsize: initialization vector size
3207 * @icvsize: integrity check value (ICV) size (truncated or full)
3208 + * @is_qi: true when called from caam/qi
3210 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3211 - unsigned int icvsize)
3212 + unsigned int ivsize, unsigned int icvsize,
3215 u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
3217 @@ -710,6 +1202,24 @@ void cnstr_shdsc_gcm_decap(u32 * const d
3218 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3219 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3222 + u32 *wait_load_cmd;
3224 + /* REG3 = assoclen */
3225 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3226 + LDST_SRCDST_WORD_DECO_MATH3 |
3227 + (4 << LDST_OFFSET_SHIFT));
3229 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3230 + JUMP_COND_CALM | JUMP_COND_NCP |
3231 + JUMP_COND_NOP | JUMP_COND_NIP |
3233 + set_jump_tgt_here(desc, wait_load_cmd);
3235 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3236 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3239 /* if assoclen is ZERO, skip reading the assoc data */
3240 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3241 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
3242 @@ -762,10 +1272,13 @@ EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
3243 * @desc: pointer to buffer used for descriptor construction
3244 * @cdata: pointer to block cipher transform definitions
3245 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3246 + * @ivsize: initialization vector size
3247 * @icvsize: integrity check value (ICV) size (truncated or full)
3248 + * @is_qi: true when called from caam/qi
3250 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3251 - unsigned int icvsize)
3252 + unsigned int ivsize, unsigned int icvsize,
3257 @@ -786,7 +1299,29 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3258 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3261 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3263 + u32 *wait_load_cmd;
3265 + /* REG3 = assoclen */
3266 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3267 + LDST_SRCDST_WORD_DECO_MATH3 |
3268 + (4 << LDST_OFFSET_SHIFT));
3270 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3271 + JUMP_COND_CALM | JUMP_COND_NCP |
3272 + JUMP_COND_NOP | JUMP_COND_NIP |
3274 + set_jump_tgt_here(desc, wait_load_cmd);
3276 + /* Read salt and IV */
3277 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3278 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3280 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3281 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3284 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3285 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3287 /* Read assoc data */
3288 @@ -794,7 +1329,7 @@ void cnstr_shdsc_rfc4106_encap(u32 * con
3289 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3292 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3293 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3295 /* Will read cryptlen bytes */
3296 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3297 @@ -833,10 +1368,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap)
3298 * @desc: pointer to buffer used for descriptor construction
3299 * @cdata: pointer to block cipher transform definitions
3300 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3301 + * @ivsize: initialization vector size
3302 * @icvsize: integrity check value (ICV) size (truncated or full)
3303 + * @is_qi: true when called from caam/qi
3305 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3306 - unsigned int icvsize)
3307 + unsigned int ivsize, unsigned int icvsize,
3312 @@ -858,7 +1396,29 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3313 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3314 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3316 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
3318 + u32 *wait_load_cmd;
3320 + /* REG3 = assoclen */
3321 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3322 + LDST_SRCDST_WORD_DECO_MATH3 |
3323 + (4 << LDST_OFFSET_SHIFT));
3325 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3326 + JUMP_COND_CALM | JUMP_COND_NCP |
3327 + JUMP_COND_NOP | JUMP_COND_NIP |
3329 + set_jump_tgt_here(desc, wait_load_cmd);
3331 + /* Read salt and IV */
3332 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3333 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3335 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3336 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3339 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
3340 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3342 /* Read assoc data */
3343 @@ -866,7 +1426,7 @@ void cnstr_shdsc_rfc4106_decap(u32 * con
3344 FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
3347 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
3348 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
3350 /* Will read cryptlen bytes */
3351 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
3352 @@ -905,10 +1465,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap)
3353 * @desc: pointer to buffer used for descriptor construction
3354 * @cdata: pointer to block cipher transform definitions
3355 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3356 + * @ivsize: initialization vector size
3357 * @icvsize: integrity check value (ICV) size (truncated or full)
3358 + * @is_qi: true when called from caam/qi
3360 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3361 - unsigned int icvsize)
3362 + unsigned int ivsize, unsigned int icvsize,
3365 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3367 @@ -929,6 +1492,18 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3368 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3372 + /* assoclen is not needed, skip it */
3373 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3375 + /* Read salt and IV */
3376 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3377 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3379 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3380 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3383 /* assoclen + cryptlen = seqinlen */
3384 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
3386 @@ -940,7 +1515,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
3387 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3388 (0x6 << MOVE_LEN_SHIFT));
3389 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3390 - (0x8 << MOVE_LEN_SHIFT));
3391 + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3393 /* Will read assoclen + cryptlen bytes */
3394 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3395 @@ -975,10 +1550,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
3396 * @desc: pointer to buffer used for descriptor construction
3397 * @cdata: pointer to block cipher transform definitions
3398 * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
3399 + * @ivsize: initialization vector size
3400 * @icvsize: integrity check value (ICV) size (truncated or full)
3401 + * @is_qi: true when called from caam/qi
3403 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3404 - unsigned int icvsize)
3405 + unsigned int ivsize, unsigned int icvsize,
3408 u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
3410 @@ -999,6 +1577,18 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3411 append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3412 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3415 + /* assoclen is not needed, skip it */
3416 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
3418 + /* Read salt and IV */
3419 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
3420 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
3422 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
3423 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
3426 /* assoclen + cryptlen = seqoutlen */
3427 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3429 @@ -1010,7 +1600,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3430 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
3431 (0x6 << MOVE_LEN_SHIFT));
3432 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
3433 - (0x8 << MOVE_LEN_SHIFT));
3434 + (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
3436 /* Will read assoclen + cryptlen bytes */
3437 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3438 @@ -1044,6 +1634,138 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
3440 EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
3443 + * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
3444 + * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
3445 + * descriptor (non-protocol).
3446 + * @desc: pointer to buffer used for descriptor construction
3447 + * @cdata: pointer to block cipher transform definitions
3448 + * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
3449 + * OP_ALG_AAI_AEAD.
3450 + * @adata: pointer to authentication transform definitions
3451 + * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
3452 + * OP_ALG_AAI_AEAD.
3453 + * @ivsize: initialization vector size
3454 + * @icvsize: integrity check value (ICV) size (truncated or full)
3455 + * @encap: true if encapsulation, false if decapsulation
3456 + * @is_qi: true when called from caam/qi
3458 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3459 + struct alginfo *adata, unsigned int ivsize,
3460 + unsigned int icvsize, const bool encap,
3463 + u32 *key_jump_cmd, *wait_cmd;
3465 + const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
3467 + /* Note: Context registers are saved. */
3468 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
3470 + /* skip key loading if they are loaded due to sharing */
3471 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3474 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
3475 + CLASS_1 | KEY_DEST_CLASS_REG);
3477 + /* For IPsec load the salt from keymat in the context register */
3479 + append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
3480 + LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
3481 + 4 << LDST_OFFSET_SHIFT);
3483 + set_jump_tgt_here(desc, key_jump_cmd);
3485 + /* Class 2 and 1 operations: Poly & ChaCha */
3487 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3489 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3492 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
3493 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
3494 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
3499 + u32 *wait_load_cmd;
3500 + u32 ctx1_iv_off = is_ipsec ? 8 : 4;
3502 + /* REG3 = assoclen */
3503 + append_seq_load(desc, 4, LDST_CLASS_DECO |
3504 + LDST_SRCDST_WORD_DECO_MATH3 |
3505 + 4 << LDST_OFFSET_SHIFT);
3507 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
3508 + JUMP_COND_CALM | JUMP_COND_NCP |
3509 + JUMP_COND_NOP | JUMP_COND_NIP |
3511 + set_jump_tgt_here(desc, wait_load_cmd);
3513 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
3514 + LDST_SRCDST_BYTE_CONTEXT |
3515 + ctx1_iv_off << LDST_OFFSET_SHIFT);
3519 + * MAGIC with NFIFO
3520 + * Read associated data from the input and send them to class1 and
3521 + * class2 alignment blocks. From class1 send data to output fifo and
3522 + * then write it to memory since we don't need to encrypt AD.
3524 + nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
3525 + NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
3526 + append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
3527 + LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
3529 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
3530 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
3531 + append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
3532 + FIFOLD_CLASS_CLASS1 | LDST_VLF);
3533 + append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
3534 + MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
3535 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
3537 + /* IPsec - copy IV at the output */
3539 + append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
3542 + wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
3543 + JUMP_COND_NOP | JUMP_TEST_ALL);
3544 + set_jump_tgt_here(desc, wait_cmd);
3547 + /* Read and write cryptlen bytes */
3548 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
3549 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
3551 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
3554 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
3555 + LDST_SRCDST_BYTE_CONTEXT);
3557 + /* Read and write cryptlen bytes */
3558 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
3559 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
3561 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
3563 + /* Load ICV for verification */
3564 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
3565 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
3568 + print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
3569 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3572 +EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
3575 * For ablkcipher encrypt and decrypt, read from req->src and
3577 @@ -1062,7 +1784,8 @@ static inline void ablkcipher_append_src
3578 * @desc: pointer to buffer used for descriptor construction
3579 * @cdata: pointer to block cipher transform definitions
3580 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3581 - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3582 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3583 + * - OP_ALG_ALGSEL_CHACHA20
3584 * @ivsize: initialization vector size
3585 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3586 * @ctx1_iv_off: IV offset in CONTEXT1 register
3587 @@ -1084,7 +1807,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
3589 /* Load nonce into CONTEXT1 reg */
3591 - u8 *nonce = cdata->key_virt + cdata->keylen;
3592 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3594 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3595 LDST_CLASS_IND_CCB |
3596 @@ -1127,7 +1850,8 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_enc
3597 * @desc: pointer to buffer used for descriptor construction
3598 * @cdata: pointer to block cipher transform definitions
3599 * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
3600 - * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
3601 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
3602 + * - OP_ALG_ALGSEL_CHACHA20
3603 * @ivsize: initialization vector size
3604 * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
3605 * @ctx1_iv_off: IV offset in CONTEXT1 register
3606 @@ -1149,7 +1873,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
3608 /* Load nonce into CONTEXT1 reg */
3610 - u8 *nonce = cdata->key_virt + cdata->keylen;
3611 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3613 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3614 LDST_CLASS_IND_CCB |
3615 @@ -1218,7 +1942,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
3617 /* Load Nonce into CONTEXT1 reg */
3619 - u8 *nonce = cdata->key_virt + cdata->keylen;
3620 + const u8 *nonce = cdata->key_virt + cdata->keylen;
3622 append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
3623 LDST_CLASS_IND_CCB |
3624 --- a/drivers/crypto/caam/caamalg_desc.h
3625 +++ b/drivers/crypto/caam/caamalg_desc.h
3627 #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
3628 #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
3630 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
3631 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
3633 /* Note: Nonce is counted in cdata.keylen */
3634 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
3637 #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
3638 #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
3639 #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
3640 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
3641 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
3643 #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
3644 #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3645 #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
3646 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
3647 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
3649 #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
3650 #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
3651 #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
3652 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
3653 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
3655 #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
3656 #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
3660 void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
3661 - unsigned int icvsize);
3662 + unsigned int icvsize, int era);
3664 void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
3665 - unsigned int icvsize);
3666 + unsigned int icvsize, int era);
3668 void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
3669 struct alginfo *adata, unsigned int ivsize,
3670 unsigned int icvsize, const bool is_rfc3686,
3671 u32 *nonce, const u32 ctx1_iv_off,
3672 - const bool is_qi);
3673 + const bool is_qi, int era);
3675 void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
3676 struct alginfo *adata, unsigned int ivsize,
3677 unsigned int icvsize, const bool geniv,
3678 const bool is_rfc3686, u32 *nonce,
3679 - const u32 ctx1_iv_off, const bool is_qi);
3680 + const u32 ctx1_iv_off, const bool is_qi, int era);
3682 void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
3683 struct alginfo *adata, unsigned int ivsize,
3684 unsigned int icvsize, const bool is_rfc3686,
3685 u32 *nonce, const u32 ctx1_iv_off,
3686 - const bool is_qi);
3687 + const bool is_qi, int era);
3689 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
3690 + struct alginfo *adata, unsigned int assoclen,
3691 + unsigned int ivsize, unsigned int authsize,
3692 + unsigned int blocksize, int era);
3694 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
3695 + struct alginfo *adata, unsigned int assoclen,
3696 + unsigned int ivsize, unsigned int authsize,
3697 + unsigned int blocksize, int era);
3699 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
3700 - unsigned int icvsize);
3701 + unsigned int ivsize, unsigned int icvsize,
3702 + const bool is_qi);
3704 void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
3705 - unsigned int icvsize);
3706 + unsigned int ivsize, unsigned int icvsize,
3707 + const bool is_qi);
3709 void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
3710 - unsigned int icvsize);
3711 + unsigned int ivsize, unsigned int icvsize,
3712 + const bool is_qi);
3714 void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
3715 - unsigned int icvsize);
3716 + unsigned int ivsize, unsigned int icvsize,
3717 + const bool is_qi);
3719 void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
3720 - unsigned int icvsize);
3721 + unsigned int ivsize, unsigned int icvsize,
3722 + const bool is_qi);
3724 void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
3725 - unsigned int icvsize);
3726 + unsigned int ivsize, unsigned int icvsize,
3727 + const bool is_qi);
3729 +void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
3730 + struct alginfo *adata, unsigned int ivsize,
3731 + unsigned int icvsize, const bool encap,
3732 + const bool is_qi);
3734 void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
3735 unsigned int ivsize, const bool is_rfc3686,
3736 --- a/drivers/crypto/caam/caamalg_qi.c
3737 +++ b/drivers/crypto/caam/caamalg_qi.c
3746 #include "desc_constr.h"
3747 @@ -53,6 +53,7 @@ struct caam_ctx {
3748 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
3749 u8 key[CAAM_MAX_KEY_SIZE];
3751 + enum dma_data_direction dir;
3752 struct alginfo adata;
3753 struct alginfo cdata;
3754 unsigned int authsize;
3755 @@ -74,6 +75,7 @@ static int aead_set_sh_desc(struct crypt
3756 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
3757 OP_ALG_AAI_CTR_MOD128);
3758 const bool is_rfc3686 = alg->caam.rfc3686;
3759 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3761 if (!ctx->cdata.keylen || !ctx->authsize)
3763 @@ -124,7 +126,7 @@ static int aead_set_sh_desc(struct crypt
3765 cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3766 ivsize, ctx->authsize, is_rfc3686, nonce,
3767 - ctx1_iv_off, true);
3768 + ctx1_iv_off, true, ctrlpriv->era);
3771 /* aead_decrypt shared descriptor */
3772 @@ -149,7 +151,8 @@ skip_enc:
3774 cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3775 ivsize, ctx->authsize, alg->caam.geniv,
3776 - is_rfc3686, nonce, ctx1_iv_off, true);
3777 + is_rfc3686, nonce, ctx1_iv_off, true,
3780 if (!alg->caam.geniv)
3782 @@ -176,7 +179,7 @@ skip_enc:
3784 cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3785 ivsize, ctx->authsize, is_rfc3686, nonce,
3786 - ctx1_iv_off, true);
3787 + ctx1_iv_off, true, ctrlpriv->era);
3791 @@ -197,6 +200,7 @@ static int aead_setkey(struct crypto_aea
3793 struct caam_ctx *ctx = crypto_aead_ctx(aead);
3794 struct device *jrdev = ctx->jrdev;
3795 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3796 struct crypto_authenc_keys keys;
3799 @@ -211,6 +215,27 @@ static int aead_setkey(struct crypto_aea
3800 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3804 + * If DKP is supported, use it in the shared descriptor to generate
3807 + if (ctrlpriv->era >= 6) {
3808 + ctx->adata.keylen = keys.authkeylen;
3809 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3810 + OP_ALG_ALGSEL_MASK);
3812 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3815 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3816 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3818 + dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
3819 + ctx->adata.keylen_pad +
3820 + keys.enckeylen, ctx->dir);
3821 + goto skip_split_key;
3824 ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3825 keys.authkeylen, CAAM_MAX_KEY_SIZE -
3827 @@ -220,13 +245,14 @@ static int aead_setkey(struct crypto_aea
3828 /* postpend encryption key to auth split key */
3829 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3830 dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3831 - keys.enckeylen, DMA_TO_DEVICE);
3832 + keys.enckeylen, ctx->dir);
3834 print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3835 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3836 ctx->adata.keylen_pad + keys.enckeylen, 1);
3840 ctx->cdata.keylen = keys.enckeylen;
3842 ret = aead_set_sh_desc(aead);
3843 @@ -258,6 +284,468 @@ badkey:
3847 +static int tls_set_sh_desc(struct crypto_aead *tls)
3849 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3850 + unsigned int ivsize = crypto_aead_ivsize(tls);
3851 + unsigned int blocksize = crypto_aead_blocksize(tls);
3852 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
3853 + unsigned int data_len[2];
3855 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
3857 + if (!ctx->cdata.keylen || !ctx->authsize)
3861 + * TLS 1.0 encrypt shared descriptor
3862 + * Job Descriptor and Shared Descriptor
3863 + * must fit into the 64-word Descriptor h/w Buffer
3865 + data_len[0] = ctx->adata.keylen_pad;
3866 + data_len[1] = ctx->cdata.keylen;
3868 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
3869 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
3873 + ctx->adata.key_virt = ctx->key;
3875 + ctx->adata.key_dma = ctx->key_dma;
3878 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
3880 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3882 + ctx->adata.key_inline = !!(inl_mask & 1);
3883 + ctx->cdata.key_inline = !!(inl_mask & 2);
3885 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
3886 + assoclen, ivsize, ctx->authsize, blocksize,
3890 + * TLS 1.0 decrypt shared descriptor
3891 + * Keys do not fit inline, regardless of algorithms used
3893 + ctx->adata.key_inline = false;
3894 + ctx->adata.key_dma = ctx->key_dma;
3895 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
3897 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
3898 + assoclen, ivsize, ctx->authsize, blocksize,
3904 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
3906 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3908 + ctx->authsize = authsize;
3909 + tls_set_sh_desc(tls);
3914 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
3915 + unsigned int keylen)
3917 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
3918 + struct device *jrdev = ctx->jrdev;
3919 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
3920 + struct crypto_authenc_keys keys;
3923 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3927 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
3928 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
3930 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
3931 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
3935 + * If DKP is supported, use it in the shared descriptor to generate
3938 + if (ctrlpriv->era >= 6) {
3939 + ctx->adata.keylen = keys.authkeylen;
3940 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3941 + OP_ALG_ALGSEL_MASK);
3943 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
3946 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
3947 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
3949 + dma_sync_single_for_device(jrdev, ctx->key_dma,
3950 + ctx->adata.keylen_pad +
3951 + keys.enckeylen, ctx->dir);
3952 + goto skip_split_key;
3955 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
3956 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
3961 + /* postpend encryption key to auth split key */
3962 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
3963 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
3964 + keys.enckeylen, ctx->dir);
3967 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
3968 + ctx->adata.keylen, ctx->adata.keylen_pad);
3969 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
3970 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
3971 + ctx->adata.keylen_pad + keys.enckeylen, 1);
3975 + ctx->cdata.keylen = keys.enckeylen;
3977 + ret = tls_set_sh_desc(tls);
3981 + /* Now update the driver contexts with the new shared descriptor */
3982 + if (ctx->drv_ctx[ENCRYPT]) {
3983 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
3984 + ctx->sh_desc_enc);
3986 + dev_err(jrdev, "driver enc context update failed\n");
3991 + if (ctx->drv_ctx[DECRYPT]) {
3992 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
3993 + ctx->sh_desc_dec);
3995 + dev_err(jrdev, "driver dec context update failed\n");
4002 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
4006 +static int gcm_set_sh_desc(struct crypto_aead *aead)
4008 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4009 + unsigned int ivsize = crypto_aead_ivsize(aead);
4010 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4011 + ctx->cdata.keylen;
4013 + if (!ctx->cdata.keylen || !ctx->authsize)
4017 + * Job Descriptor and Shared Descriptor
4018 + * must fit into the 64-word Descriptor h/w Buffer
4020 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
4021 + ctx->cdata.key_inline = true;
4022 + ctx->cdata.key_virt = ctx->key;
4024 + ctx->cdata.key_inline = false;
4025 + ctx->cdata.key_dma = ctx->key_dma;
4028 + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4029 + ctx->authsize, true);
4032 + * Job Descriptor and Shared Descriptor
4033 + * must fit into the 64-word Descriptor h/w Buffer
4035 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
4036 + ctx->cdata.key_inline = true;
4037 + ctx->cdata.key_virt = ctx->key;
4039 + ctx->cdata.key_inline = false;
4040 + ctx->cdata.key_dma = ctx->key_dma;
4043 + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4044 + ctx->authsize, true);
4049 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
4051 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4053 + ctx->authsize = authsize;
4054 + gcm_set_sh_desc(authenc);
4059 +static int gcm_setkey(struct crypto_aead *aead,
4060 + const u8 *key, unsigned int keylen)
4062 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4063 + struct device *jrdev = ctx->jrdev;
4067 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4068 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4071 + memcpy(ctx->key, key, keylen);
4072 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
4073 + ctx->cdata.keylen = keylen;
4075 + ret = gcm_set_sh_desc(aead);
4079 + /* Now update the driver contexts with the new shared descriptor */
4080 + if (ctx->drv_ctx[ENCRYPT]) {
4081 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4082 + ctx->sh_desc_enc);
4084 + dev_err(jrdev, "driver enc context update failed\n");
4089 + if (ctx->drv_ctx[DECRYPT]) {
4090 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4091 + ctx->sh_desc_dec);
4093 + dev_err(jrdev, "driver dec context update failed\n");
4101 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
4103 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4104 + unsigned int ivsize = crypto_aead_ivsize(aead);
4105 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4106 + ctx->cdata.keylen;
4108 + if (!ctx->cdata.keylen || !ctx->authsize)
4111 + ctx->cdata.key_virt = ctx->key;
4114 + * Job Descriptor and Shared Descriptor
4115 + * must fit into the 64-word Descriptor h/w Buffer
4117 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
4118 + ctx->cdata.key_inline = true;
4120 + ctx->cdata.key_inline = false;
4121 + ctx->cdata.key_dma = ctx->key_dma;
4124 + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4125 + ctx->authsize, true);
4128 + * Job Descriptor and Shared Descriptor
4129 + * must fit into the 64-word Descriptor h/w Buffer
4131 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
4132 + ctx->cdata.key_inline = true;
4134 + ctx->cdata.key_inline = false;
4135 + ctx->cdata.key_dma = ctx->key_dma;
4138 + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4139 + ctx->authsize, true);
4144 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
4145 + unsigned int authsize)
4147 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4149 + ctx->authsize = authsize;
4150 + rfc4106_set_sh_desc(authenc);
4155 +static int rfc4106_setkey(struct crypto_aead *aead,
4156 + const u8 *key, unsigned int keylen)
4158 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4159 + struct device *jrdev = ctx->jrdev;
4166 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4167 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4170 + memcpy(ctx->key, key, keylen);
4172 + * The last four bytes of the key material are used as the salt value
4173 + * in the nonce. Update the AES key length.
4175 + ctx->cdata.keylen = keylen - 4;
4176 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
4179 + ret = rfc4106_set_sh_desc(aead);
4183 + /* Now update the driver contexts with the new shared descriptor */
4184 + if (ctx->drv_ctx[ENCRYPT]) {
4185 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4186 + ctx->sh_desc_enc);
4188 + dev_err(jrdev, "driver enc context update failed\n");
4193 + if (ctx->drv_ctx[DECRYPT]) {
4194 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4195 + ctx->sh_desc_dec);
4197 + dev_err(jrdev, "driver dec context update failed\n");
4205 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
4207 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4208 + unsigned int ivsize = crypto_aead_ivsize(aead);
4209 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
4210 + ctx->cdata.keylen;
4212 + if (!ctx->cdata.keylen || !ctx->authsize)
4215 + ctx->cdata.key_virt = ctx->key;
4218 + * Job Descriptor and Shared Descriptor
4219 + * must fit into the 64-word Descriptor h/w Buffer
4221 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
4222 + ctx->cdata.key_inline = true;
4224 + ctx->cdata.key_inline = false;
4225 + ctx->cdata.key_dma = ctx->key_dma;
4228 + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
4229 + ctx->authsize, true);
4232 + * Job Descriptor and Shared Descriptor
4233 + * must fit into the 64-word Descriptor h/w Buffer
4235 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
4236 + ctx->cdata.key_inline = true;
4238 + ctx->cdata.key_inline = false;
4239 + ctx->cdata.key_dma = ctx->key_dma;
4242 + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
4243 + ctx->authsize, true);
4248 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
4249 + unsigned int authsize)
4251 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
4253 + ctx->authsize = authsize;
4254 + rfc4543_set_sh_desc(authenc);
4259 +static int rfc4543_setkey(struct crypto_aead *aead,
4260 + const u8 *key, unsigned int keylen)
4262 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4263 + struct device *jrdev = ctx->jrdev;
4270 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
4271 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
4274 + memcpy(ctx->key, key, keylen);
4276 + * The last four bytes of the key material are used as the salt value
4277 + * in the nonce. Update the AES key length.
4279 + ctx->cdata.keylen = keylen - 4;
4280 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
4283 + ret = rfc4543_set_sh_desc(aead);
4287 + /* Now update the driver contexts with the new shared descriptor */
4288 + if (ctx->drv_ctx[ENCRYPT]) {
4289 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
4290 + ctx->sh_desc_enc);
4292 + dev_err(jrdev, "driver enc context update failed\n");
4297 + if (ctx->drv_ctx[DECRYPT]) {
4298 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
4299 + ctx->sh_desc_dec);
4301 + dev_err(jrdev, "driver dec context update failed\n");
4309 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
4310 const u8 *key, unsigned int keylen)
4312 @@ -414,6 +902,29 @@ struct aead_edesc {
4316 + * tls_edesc - s/w-extended tls descriptor
4317 + * @src_nents: number of segments in input scatterlist
4318 + * @dst_nents: number of segments in output scatterlist
4319 + * @iv_dma: dma address of iv for checking continuity and link table
4320 + * @qm_sg_bytes: length of dma mapped h/w link table
4321 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
4322 + * @qm_sg_dma: bus physical mapped address of h/w link table
4323 + * @drv_req: driver-specific request structure
4324 + * @sgt: the h/w link table, followed by IV
4329 + dma_addr_t iv_dma;
4331 + dma_addr_t qm_sg_dma;
4332 + struct scatterlist tmp[2];
4333 + struct scatterlist *dst;
4334 + struct caam_drv_req drv_req;
4335 + struct qm_sg_entry sgt[0];
4339 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
4340 * @src_nents: number of segments in input scatterlist
4341 * @dst_nents: number of segments in output scatterlist
4342 @@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
4343 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
4346 +static void tls_unmap(struct device *dev,
4347 + struct tls_edesc *edesc,
4348 + struct aead_request *req)
4350 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4351 + int ivsize = crypto_aead_ivsize(aead);
4353 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
4354 + edesc->dst_nents, edesc->iv_dma, ivsize,
4355 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
4356 + edesc->qm_sg_bytes);
4359 static void ablkcipher_unmap(struct device *dev,
4360 struct ablkcipher_edesc *edesc,
4361 struct ablkcipher_request *req)
4362 @@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
4363 qidev = caam_ctx->qidev;
4365 if (unlikely(status)) {
4366 + u32 ssrc = status & JRSTA_SSRC_MASK;
4367 + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
4369 caam_jr_strstatus(qidev, status);
4372 + * verify hw auth check passed else return -EBADMSG
4374 + if (ssrc == JRSTA_SSRC_CCB_ERROR &&
4375 + err_id == JRSTA_CCBERR_ERRID_ICVCHK)
4381 edesc = container_of(drv_req, typeof(*edesc), drv_req);
4382 @@ -647,9 +1181,24 @@ static struct aead_edesc *aead_edesc_all
4384 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
4385 * Input is not contiguous.
4386 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
4387 + * the end of the table by allocating more S/G entries. Logic:
4388 + * if (src != dst && output S/G)
4389 + * pad output S/G, if needed
4390 + * else if (src == dst && S/G)
4391 + * overlapping S/Gs; pad one of them
4392 + * else if (input S/G) ...
4393 + * pad input S/G, if needed
4395 - qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
4396 - (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4397 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
4398 + if (mapped_dst_nents > 1)
4399 + qm_sg_ents += ALIGN(mapped_dst_nents, 4);
4400 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
4401 + qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
4402 + 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
4404 + qm_sg_ents = ALIGN(qm_sg_ents, 4);
4406 sg_table = &edesc->sgt[0];
4407 qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4408 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
4409 @@ -785,6 +1334,260 @@ static int aead_decrypt(struct aead_requ
4410 return aead_crypt(req, false);
4413 +static int ipsec_gcm_encrypt(struct aead_request *req)
4415 + if (req->assoclen < 8)
4418 + return aead_crypt(req, true);
4421 +static int ipsec_gcm_decrypt(struct aead_request *req)
4423 + if (req->assoclen < 8)
4426 + return aead_crypt(req, false);
4429 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
4431 + struct device *qidev;
4432 + struct tls_edesc *edesc;
4433 + struct aead_request *aead_req = drv_req->app_ctx;
4434 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
4435 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
4438 + qidev = caam_ctx->qidev;
4440 + if (unlikely(status)) {
4441 + caam_jr_strstatus(qidev, status);
4445 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
4446 + tls_unmap(qidev, edesc, aead_req);
4448 + aead_request_complete(aead_req, ecode);
4449 + qi_cache_free(edesc);
4453 + * allocate and map the tls extended descriptor
4455 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
4457 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4458 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4459 + unsigned int blocksize = crypto_aead_blocksize(aead);
4460 + unsigned int padsize, authsize;
4461 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
4462 + typeof(*alg), aead);
4463 + struct device *qidev = ctx->qidev;
4464 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4465 + GFP_KERNEL : GFP_ATOMIC;
4466 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
4467 + struct tls_edesc *edesc;
4468 + dma_addr_t qm_sg_dma, iv_dma = 0;
4471 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
4472 + int in_len, out_len;
4473 + struct qm_sg_entry *sg_table, *fd_sgt;
4474 + struct caam_drv_ctx *drv_ctx;
4475 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
4476 + struct scatterlist *dst;
4479 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
4481 + authsize = ctx->authsize + padsize;
4483 + authsize = ctx->authsize;
4486 + drv_ctx = get_drv_ctx(ctx, op_type);
4487 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
4488 + return (struct tls_edesc *)drv_ctx;
4490 + /* allocate space for base edesc, link tables and IV */
4491 + edesc = qi_cache_alloc(GFP_DMA | flags);
4492 + if (unlikely(!edesc)) {
4493 + dev_err(qidev, "could not allocate extended descriptor\n");
4494 + return ERR_PTR(-ENOMEM);
4497 + if (likely(req->src == req->dst)) {
4498 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4500 + (encrypt ? authsize : 0));
4501 + if (unlikely(src_nents < 0)) {
4502 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
4503 + req->assoclen + req->cryptlen +
4504 + (encrypt ? authsize : 0));
4505 + qi_cache_free(edesc);
4506 + return ERR_PTR(src_nents);
4509 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
4510 + DMA_BIDIRECTIONAL);
4511 + if (unlikely(!mapped_src_nents)) {
4512 + dev_err(qidev, "unable to map source\n");
4513 + qi_cache_free(edesc);
4514 + return ERR_PTR(-ENOMEM);
4518 + src_nents = sg_nents_for_len(req->src, req->assoclen +
4520 + if (unlikely(src_nents < 0)) {
4521 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
4522 + req->assoclen + req->cryptlen);
4523 + qi_cache_free(edesc);
4524 + return ERR_PTR(src_nents);
4527 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
4528 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
4529 + (encrypt ? authsize : 0));
4530 + if (unlikely(dst_nents < 0)) {
4531 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
4533 + (encrypt ? authsize : 0));
4534 + qi_cache_free(edesc);
4535 + return ERR_PTR(dst_nents);
4539 + mapped_src_nents = dma_map_sg(qidev, req->src,
4540 + src_nents, DMA_TO_DEVICE);
4541 + if (unlikely(!mapped_src_nents)) {
4542 + dev_err(qidev, "unable to map source\n");
4543 + qi_cache_free(edesc);
4544 + return ERR_PTR(-ENOMEM);
4547 + mapped_src_nents = 0;
4550 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
4552 + if (unlikely(!mapped_dst_nents)) {
4553 + dev_err(qidev, "unable to map destination\n");
4554 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
4555 + qi_cache_free(edesc);
4556 + return ERR_PTR(-ENOMEM);
4561 + * Create S/G table: IV, src, dst.
4562 + * Input is not contiguous.
4564 + qm_sg_ents = 1 + mapped_src_nents +
4565 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
4566 + sg_table = &edesc->sgt[0];
4567 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
4569 + ivsize = crypto_aead_ivsize(aead);
4570 + iv = (u8 *)(sg_table + qm_sg_ents);
4571 + /* Make sure IV is located in a DMAable area */
4572 + memcpy(iv, req->iv, ivsize);
4573 + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
4574 + if (dma_mapping_error(qidev, iv_dma)) {
4575 + dev_err(qidev, "unable to map IV\n");
4576 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
4578 + qi_cache_free(edesc);
4579 + return ERR_PTR(-ENOMEM);
4582 + edesc->src_nents = src_nents;
4583 + edesc->dst_nents = dst_nents;
4585 + edesc->iv_dma = iv_dma;
4586 + edesc->drv_req.app_ctx = req;
4587 + edesc->drv_req.cbk = tls_done;
4588 + edesc->drv_req.drv_ctx = drv_ctx;
4590 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
4593 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
4594 + qm_sg_index += mapped_src_nents;
4596 + if (mapped_dst_nents > 1)
4597 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
4600 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
4601 + if (dma_mapping_error(qidev, qm_sg_dma)) {
4602 + dev_err(qidev, "unable to map S/G table\n");
4603 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
4604 + ivsize, op_type, 0, 0);
4605 + qi_cache_free(edesc);
4606 + return ERR_PTR(-ENOMEM);
4609 + edesc->qm_sg_dma = qm_sg_dma;
4610 + edesc->qm_sg_bytes = qm_sg_bytes;
4612 + out_len = req->cryptlen + (encrypt ? authsize : 0);
4613 + in_len = ivsize + req->assoclen + req->cryptlen;
4615 + fd_sgt = &edesc->drv_req.fd_sgt[0];
4617 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
4619 + if (req->dst == req->src)
4620 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
4621 + (sg_nents_for_len(req->src, req->assoclen) +
4622 + 1) * sizeof(*sg_table), out_len, 0);
4623 + else if (mapped_dst_nents == 1)
4624 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
4626 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
4627 + qm_sg_index, out_len, 0);
4632 +static int tls_crypt(struct aead_request *req, bool encrypt)
4634 + struct tls_edesc *edesc;
4635 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
4636 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
4639 + if (unlikely(caam_congested))
4642 + edesc = tls_edesc_alloc(req, encrypt);
4643 + if (IS_ERR_OR_NULL(edesc))
4644 + return PTR_ERR(edesc);
4646 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
4648 + ret = -EINPROGRESS;
4650 + tls_unmap(ctx->qidev, edesc, req);
4651 + qi_cache_free(edesc);
4657 +static int tls_encrypt(struct aead_request *req)
4659 + return tls_crypt(req, true);
4662 +static int tls_decrypt(struct aead_request *req)
4664 + return tls_crypt(req, false);
4667 static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
4669 struct ablkcipher_edesc *edesc;
4670 @@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
4671 qm_sg_ents = 1 + mapped_src_nents;
4672 dst_sg_idx = qm_sg_ents;
4674 - qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
4676 + * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
4677 + * the end of the table by allocating more S/G entries. Logic:
4678 + * if (src != dst && output S/G)
4679 + * pad output S/G, if needed
4680 + * else if (src == dst && S/G)
4681 + * overlapping S/Gs; pad one of them
4682 + * else if (input S/G) ...
4683 + * pad input S/G, if needed
4685 + if (mapped_dst_nents > 1)
4686 + qm_sg_ents += ALIGN(mapped_dst_nents, 4);
4687 + else if ((req->src == req->dst) && (mapped_src_nents > 1))
4688 + qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
4689 + 1 + ALIGN(mapped_src_nents, 4));
4691 + qm_sg_ents = ALIGN(qm_sg_ents, 4);
4693 qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
4694 if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
4695 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
4696 @@ -1308,6 +2128,61 @@ static struct caam_alg_template driver_a
4699 static struct caam_aead_alg driver_aeads[] = {
4703 + .cra_name = "rfc4106(gcm(aes))",
4704 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
4705 + .cra_blocksize = 1,
4707 + .setkey = rfc4106_setkey,
4708 + .setauthsize = rfc4106_setauthsize,
4709 + .encrypt = ipsec_gcm_encrypt,
4710 + .decrypt = ipsec_gcm_decrypt,
4712 + .maxauthsize = AES_BLOCK_SIZE,
4715 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4721 + .cra_name = "rfc4543(gcm(aes))",
4722 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
4723 + .cra_blocksize = 1,
4725 + .setkey = rfc4543_setkey,
4726 + .setauthsize = rfc4543_setauthsize,
4727 + .encrypt = ipsec_gcm_encrypt,
4728 + .decrypt = ipsec_gcm_decrypt,
4730 + .maxauthsize = AES_BLOCK_SIZE,
4733 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4736 + /* Galois Counter Mode */
4740 + .cra_name = "gcm(aes)",
4741 + .cra_driver_name = "gcm-aes-caam-qi",
4742 + .cra_blocksize = 1,
4744 + .setkey = gcm_setkey,
4745 + .setauthsize = gcm_setauthsize,
4746 + .encrypt = aead_encrypt,
4747 + .decrypt = aead_decrypt,
4749 + .maxauthsize = AES_BLOCK_SIZE,
4752 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
4755 /* single-pass ipsec_esp descriptor */
4758 @@ -2118,6 +2993,26 @@ static struct caam_aead_alg driver_aeads
4765 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
4766 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
4767 + .cra_blocksize = AES_BLOCK_SIZE,
4769 + .setkey = tls_setkey,
4770 + .setauthsize = tls_setauthsize,
4771 + .encrypt = tls_encrypt,
4772 + .decrypt = tls_decrypt,
4773 + .ivsize = AES_BLOCK_SIZE,
4774 + .maxauthsize = SHA1_DIGEST_SIZE,
4777 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
4778 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
4779 + OP_ALG_AAI_HMAC_PRECOMP,
4784 struct caam_crypto_alg {
4785 @@ -2126,9 +3021,21 @@ struct caam_crypto_alg {
4786 struct caam_alg_entry caam;
4789 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
4790 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
4793 struct caam_drv_private *priv;
4794 + struct device *dev;
4795 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
4796 + static const u8 digest_size[] = {
4799 + SHA224_DIGEST_SIZE,
4800 + SHA256_DIGEST_SIZE,
4801 + SHA384_DIGEST_SIZE,
4802 + SHA512_DIGEST_SIZE
4807 * distribute tfms across job rings to ensure in-order
4808 @@ -2140,10 +3047,19 @@ static int caam_init_common(struct caam_
4809 return PTR_ERR(ctx->jrdev);
4812 - ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
4814 - if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
4815 - dev_err(ctx->jrdev, "unable to map key\n");
4816 + priv = dev_get_drvdata(ctx->jrdev->parent);
4817 + if (priv->era >= 6 && uses_dkp) {
4818 + ctx->dir = DMA_BIDIRECTIONAL;
4819 + dev = ctx->jrdev->parent;
4821 + ctx->dir = DMA_TO_DEVICE;
4825 + ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
4827 + if (dma_mapping_error(dev, ctx->key_dma)) {
4828 + dev_err(dev, "unable to map key\n");
4829 caam_jr_free(ctx->jrdev);
4832 @@ -2152,8 +3068,23 @@ static int caam_init_common(struct caam_
4833 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
4834 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
4836 - priv = dev_get_drvdata(ctx->jrdev->parent);
4837 - ctx->qidev = priv->qidev;
4838 + if (ctx->adata.algtype) {
4839 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
4840 + >> OP_ALG_ALGSEL_SHIFT;
4841 + if (op_id < ARRAY_SIZE(digest_size)) {
4842 + ctx->authsize = digest_size[op_id];
4844 + dev_err(ctx->jrdev,
4845 + "incorrect op_id %d; must be less than %zu\n",
4846 + op_id, ARRAY_SIZE(digest_size));
4847 + caam_jr_free(ctx->jrdev);
4851 + ctx->authsize = 0;
4854 + ctx->qidev = ctx->jrdev->parent;
4856 spin_lock_init(&ctx->lock);
4857 ctx->drv_ctx[ENCRYPT] = NULL;
4858 @@ -2170,7 +3101,7 @@ static int caam_cra_init(struct crypto_t
4860 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
4862 - return caam_init_common(ctx, &caam_alg->caam);
4863 + return caam_init_common(ctx, &caam_alg->caam, false);
4866 static int caam_aead_init(struct crypto_aead *tfm)
4867 @@ -2180,17 +3111,25 @@ static int caam_aead_init(struct crypto_
4869 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
4871 - return caam_init_common(ctx, &caam_alg->caam);
4872 + return caam_init_common(ctx, &caam_alg->caam,
4873 + (alg->setkey == aead_setkey) ||
4874 + (alg->setkey == tls_setkey));
4877 static void caam_exit_common(struct caam_ctx *ctx)
4879 + struct device *dev;
4881 caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
4882 caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
4883 caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
4885 - dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
4887 + if (ctx->dir == DMA_BIDIRECTIONAL)
4888 + dev = ctx->jrdev->parent;
4892 + dma_unmap_single(dev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
4894 caam_jr_free(ctx->jrdev);
4896 @@ -2206,7 +3145,7 @@ static void caam_aead_exit(struct crypto
4899 static struct list_head alg_list;
4900 -static void __exit caam_qi_algapi_exit(void)
4901 +void caam_qi_algapi_exit(void)
4903 struct caam_crypto_alg *t_alg, *n;
4905 @@ -2282,53 +3221,48 @@ static void caam_aead_alg_init(struct ca
4906 alg->exit = caam_aead_exit;
4909 -static int __init caam_qi_algapi_init(void)
4910 +int caam_qi_algapi_init(struct device *ctrldev)
4912 - struct device_node *dev_node;
4913 - struct platform_device *pdev;
4914 - struct device *ctrldev;
4915 - struct caam_drv_private *priv;
4916 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
4918 - u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
4919 + u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
4920 unsigned int md_limit = SHA512_DIGEST_SIZE;
4921 bool registered = false;
4923 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
4925 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
4930 - pdev = of_find_device_by_node(dev_node);
4931 - of_node_put(dev_node);
4935 - ctrldev = &pdev->dev;
4936 - priv = dev_get_drvdata(ctrldev);
4939 - * If priv is NULL, it's probably because the caam driver wasn't
4940 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
4942 - if (!priv || !priv->qi_present)
4945 INIT_LIST_HEAD(&alg_list);
4948 * Register crypto algorithms the device supports.
4949 * First, detect presence and attributes of DES, AES, and MD blocks.
4951 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4952 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4953 - des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
4954 - aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
4955 - md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4956 + if (priv->era < 10) {
4957 + u32 cha_vid, cha_inst;
4959 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
4960 + aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
4961 + md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4963 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
4964 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
4965 + CHA_ID_LS_DES_SHIFT;
4966 + aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
4967 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
4971 + aesa = rd_reg32(&priv->ctrl->vreg.aesa);
4972 + mdha = rd_reg32(&priv->ctrl->vreg.mdha);
4974 + aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
4975 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
4977 + des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
4978 + aes_inst = aesa & CHA_VER_NUM_MASK;
4979 + md_inst = mdha & CHA_VER_NUM_MASK;
4982 /* If MD is present, limit digest size based on LP256 */
4983 - if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
4984 + if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
4985 md_limit = SHA256_DIGEST_SIZE;
4987 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4988 @@ -2349,14 +3283,14 @@ static int __init caam_qi_algapi_init(vo
4989 t_alg = caam_alg_alloc(alg);
4990 if (IS_ERR(t_alg)) {
4991 err = PTR_ERR(t_alg);
4992 - dev_warn(priv->qidev, "%s alg allocation failed\n",
4993 + dev_warn(ctrldev, "%s alg allocation failed\n",
4998 err = crypto_register_alg(&t_alg->crypto_alg);
5000 - dev_warn(priv->qidev, "%s alg registration failed\n",
5001 + dev_warn(ctrldev, "%s alg registration failed\n",
5002 t_alg->crypto_alg.cra_driver_name);
5005 @@ -2388,8 +3322,7 @@ static int __init caam_qi_algapi_init(vo
5006 * Check support for AES algorithms not available
5009 - if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
5010 - (alg_aai == OP_ALG_AAI_GCM))
5011 + if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
5015 @@ -2414,14 +3347,7 @@ static int __init caam_qi_algapi_init(vo
5019 - dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
5020 + dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
5025 -module_init(caam_qi_algapi_init);
5026 -module_exit(caam_qi_algapi_exit);
5028 -MODULE_LICENSE("GPL");
5029 -MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
5030 -MODULE_AUTHOR("Freescale Semiconductor");
5032 +++ b/drivers/crypto/caam/caamalg_qi2.c
5034 +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
5036 + * Copyright 2015-2016 Freescale Semiconductor Inc.
5037 + * Copyright 2017-2018 NXP
5040 +#include <linux/fsl/mc.h>
5041 +#include "compat.h"
5043 +#include "caamalg_qi2.h"
5044 +#include "dpseci_cmd.h"
5045 +#include "desc_constr.h"
5047 +#include "sg_sw_sec4.h"
5048 +#include "sg_sw_qm2.h"
5049 +#include "key_gen.h"
5050 +#include "caamalg_desc.h"
5051 +#include "caamhash_desc.h"
5052 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
5053 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
5055 +#define CAAM_CRA_PRIORITY 2000
5057 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
5058 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
5059 + SHA512_DIGEST_SIZE * 2)
5062 + * This is a a cache of buffers, from which the users of CAAM QI driver
5063 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
5064 + * NOTE: A more elegant solution would be to have some headroom in the frames
5065 + * being processed. This can be added by the dpaa2-eth driver. This would
5066 + * pose a problem for userspace application processing which cannot
5067 + * know of this limitation. So for now, this will work.
5068 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
5070 +static struct kmem_cache *qi_cache;
5072 +struct caam_alg_entry {
5073 + struct device *dev;
5074 + int class1_alg_type;
5075 + int class2_alg_type;
5080 +struct caam_aead_alg {
5081 + struct aead_alg aead;
5082 + struct caam_alg_entry caam;
5086 +struct caam_skcipher_alg {
5087 + struct skcipher_alg skcipher;
5088 + struct caam_alg_entry caam;
5093 + * caam_ctx - per-session context
5094 + * @flc: Flow Contexts array
5095 + * @key: virtual address of the key(s): [authentication key], encryption key
5096 + * @flc_dma: I/O virtual addresses of the Flow Contexts
5097 + * @key_dma: I/O virtual address of the key
5098 + * @dir: DMA direction for mapping key and Flow Contexts
5099 + * @dev: dpseci device
5100 + * @adata: authentication algorithm details
5101 + * @cdata: encryption algorithm details
5102 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
5105 + struct caam_flc flc[NUM_OP];
5106 + u8 key[CAAM_MAX_KEY_SIZE];
5107 + dma_addr_t flc_dma[NUM_OP];
5108 + dma_addr_t key_dma;
5109 + enum dma_data_direction dir;
5110 + struct device *dev;
5111 + struct alginfo adata;
5112 + struct alginfo cdata;
5113 + unsigned int authsize;
5116 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
5117 + dma_addr_t iova_addr)
5119 + phys_addr_t phys_addr;
5121 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
5124 + return phys_to_virt(phys_addr);
5128 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
5130 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
5131 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
5132 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
5133 + * hosting 16 SG entries.
5135 + * @flags - flags that would be used for the equivalent kmalloc(..) call
5137 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
5139 +static inline void *qi_cache_zalloc(gfp_t flags)
5141 + return kmem_cache_zalloc(qi_cache, flags);
5145 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
5147 + * @obj - buffer previously allocated by qi_cache_zalloc
5149 + * No checking is being done, the call is a passthrough call to
5150 + * kmem_cache_free(...)
5152 +static inline void qi_cache_free(void *obj)
5154 + kmem_cache_free(qi_cache, obj);
5157 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
5159 + switch (crypto_tfm_alg_type(areq->tfm)) {
5160 + case CRYPTO_ALG_TYPE_SKCIPHER:
5161 + return skcipher_request_ctx(skcipher_request_cast(areq));
5162 + case CRYPTO_ALG_TYPE_AEAD:
5163 + return aead_request_ctx(container_of(areq, struct aead_request,
5165 + case CRYPTO_ALG_TYPE_AHASH:
5166 + return ahash_request_ctx(ahash_request_cast(areq));
5168 + return ERR_PTR(-EINVAL);
5172 +static void caam_unmap(struct device *dev, struct scatterlist *src,
5173 + struct scatterlist *dst, int src_nents,
5174 + int dst_nents, dma_addr_t iv_dma, int ivsize,
5175 + dma_addr_t qm_sg_dma, int qm_sg_bytes)
5179 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
5180 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
5182 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
5186 + dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
5189 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
5192 +static int aead_set_sh_desc(struct crypto_aead *aead)
5194 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
5195 + typeof(*alg), aead);
5196 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5197 + unsigned int ivsize = crypto_aead_ivsize(aead);
5198 + struct device *dev = ctx->dev;
5199 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5200 + struct caam_flc *flc;
5202 + u32 ctx1_iv_off = 0;
5203 + u32 *nonce = NULL;
5204 + unsigned int data_len[2];
5206 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
5207 + OP_ALG_AAI_CTR_MOD128);
5208 + const bool is_rfc3686 = alg->caam.rfc3686;
5210 + if (!ctx->cdata.keylen || !ctx->authsize)
5214 + * AES-CTR needs to load IV in CONTEXT1 reg
5215 + * at an offset of 128bits (16bytes)
5216 + * CONTEXT1[255:128] = IV
5222 + * RFC3686 specific:
5223 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
5226 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
5227 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
5228 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
5231 + data_len[0] = ctx->adata.keylen_pad;
5232 + data_len[1] = ctx->cdata.keylen;
5234 + /* aead_encrypt shared descriptor */
5235 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
5236 + DESC_QI_AEAD_ENC_LEN) +
5237 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
5238 + DESC_JOB_IO_LEN, data_len, &inl_mask,
5239 + ARRAY_SIZE(data_len)) < 0)
5243 + ctx->adata.key_virt = ctx->key;
5245 + ctx->adata.key_dma = ctx->key_dma;
5248 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5250 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5252 + ctx->adata.key_inline = !!(inl_mask & 1);
5253 + ctx->cdata.key_inline = !!(inl_mask & 2);
5255 + flc = &ctx->flc[ENCRYPT];
5256 + desc = flc->sh_desc;
5258 + if (alg->caam.geniv)
5259 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
5260 + ivsize, ctx->authsize, is_rfc3686,
5261 + nonce, ctx1_iv_off, true,
5262 + priv->sec_attr.era);
5264 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
5265 + ivsize, ctx->authsize, is_rfc3686, nonce,
5266 + ctx1_iv_off, true, priv->sec_attr.era);
5268 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5269 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5270 + sizeof(flc->flc) + desc_bytes(desc),
5273 + /* aead_decrypt shared descriptor */
5274 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
5275 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
5276 + DESC_JOB_IO_LEN, data_len, &inl_mask,
5277 + ARRAY_SIZE(data_len)) < 0)
5281 + ctx->adata.key_virt = ctx->key;
5283 + ctx->adata.key_dma = ctx->key_dma;
5286 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5288 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5290 + ctx->adata.key_inline = !!(inl_mask & 1);
5291 + ctx->cdata.key_inline = !!(inl_mask & 2);
5293 + flc = &ctx->flc[DECRYPT];
5294 + desc = flc->sh_desc;
5295 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
5296 + ivsize, ctx->authsize, alg->caam.geniv,
5297 + is_rfc3686, nonce, ctx1_iv_off, true,
5298 + priv->sec_attr.era);
5299 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5300 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5301 + sizeof(flc->flc) + desc_bytes(desc),
5307 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
5309 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
5311 + ctx->authsize = authsize;
5312 + aead_set_sh_desc(authenc);
5317 +struct split_key_sh_result {
5318 + struct completion completion;
5320 + struct device *dev;
5323 +static void split_key_sh_done(void *cbk_ctx, u32 err)
5325 + struct split_key_sh_result *res = cbk_ctx;
5328 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
5332 + caam_qi2_strstatus(res->dev, err);
5335 + complete(&res->completion);
5338 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
5339 + unsigned int keylen)
5341 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5342 + struct device *dev = ctx->dev;
5343 + struct crypto_authenc_keys keys;
5345 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
5349 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
5350 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
5352 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5353 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5356 + ctx->adata.keylen = keys.authkeylen;
5357 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
5358 + OP_ALG_ALGSEL_MASK);
5360 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
5363 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
5364 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
5365 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
5366 + keys.enckeylen, ctx->dir);
5368 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
5369 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5370 + ctx->adata.keylen_pad + keys.enckeylen, 1);
5373 + ctx->cdata.keylen = keys.enckeylen;
5375 + return aead_set_sh_desc(aead);
5377 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5381 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
5384 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
5385 + struct caam_request *req_ctx = aead_request_ctx(req);
5386 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5387 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5388 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5389 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
5390 + typeof(*alg), aead);
5391 + struct device *dev = ctx->dev;
5392 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5393 + GFP_KERNEL : GFP_ATOMIC;
5394 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5395 + struct aead_edesc *edesc;
5396 + dma_addr_t qm_sg_dma, iv_dma = 0;
5398 + unsigned int authsize = ctx->authsize;
5399 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
5400 + int in_len, out_len;
5401 + struct dpaa2_sg_entry *sg_table;
5403 + /* allocate space for base edesc, link tables and IV */
5404 + edesc = qi_cache_zalloc(GFP_DMA | flags);
5405 + if (unlikely(!edesc)) {
5406 + dev_err(dev, "could not allocate extended descriptor\n");
5407 + return ERR_PTR(-ENOMEM);
5410 + if (unlikely(req->dst != req->src)) {
5411 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5413 + if (unlikely(src_nents < 0)) {
5414 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5415 + req->assoclen + req->cryptlen);
5416 + qi_cache_free(edesc);
5417 + return ERR_PTR(src_nents);
5420 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
5422 + (encrypt ? authsize :
5424 + if (unlikely(dst_nents < 0)) {
5425 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5426 + req->assoclen + req->cryptlen +
5427 + (encrypt ? authsize : (-authsize)));
5428 + qi_cache_free(edesc);
5429 + return ERR_PTR(dst_nents);
5433 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5435 + if (unlikely(!mapped_src_nents)) {
5436 + dev_err(dev, "unable to map source\n");
5437 + qi_cache_free(edesc);
5438 + return ERR_PTR(-ENOMEM);
5441 + mapped_src_nents = 0;
5444 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
5446 + if (unlikely(!mapped_dst_nents)) {
5447 + dev_err(dev, "unable to map destination\n");
5448 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5449 + qi_cache_free(edesc);
5450 + return ERR_PTR(-ENOMEM);
5453 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5455 + (encrypt ? authsize : 0));
5456 + if (unlikely(src_nents < 0)) {
5457 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5458 + req->assoclen + req->cryptlen +
5459 + (encrypt ? authsize : 0));
5460 + qi_cache_free(edesc);
5461 + return ERR_PTR(src_nents);
5464 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5465 + DMA_BIDIRECTIONAL);
5466 + if (unlikely(!mapped_src_nents)) {
5467 + dev_err(dev, "unable to map source\n");
5468 + qi_cache_free(edesc);
5469 + return ERR_PTR(-ENOMEM);
5473 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
5474 + ivsize = crypto_aead_ivsize(aead);
5477 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
5478 + * Input is not contiguous.
5480 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
5481 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
5482 + sg_table = &edesc->sgt[0];
5483 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
5484 + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
5485 + CAAM_QI_MEMCACHE_SIZE)) {
5486 + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
5487 + qm_sg_nents, ivsize);
5488 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
5490 + qi_cache_free(edesc);
5491 + return ERR_PTR(-ENOMEM);
5495 + u8 *iv = (u8 *)(sg_table + qm_sg_nents);
5497 + /* Make sure IV is located in a DMAable area */
5498 + memcpy(iv, req->iv, ivsize);
5500 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5501 + if (dma_mapping_error(dev, iv_dma)) {
5502 + dev_err(dev, "unable to map IV\n");
5503 + caam_unmap(dev, req->src, req->dst, src_nents,
5504 + dst_nents, 0, 0, 0, 0);
5505 + qi_cache_free(edesc);
5506 + return ERR_PTR(-ENOMEM);
5510 + edesc->src_nents = src_nents;
5511 + edesc->dst_nents = dst_nents;
5512 + edesc->iv_dma = iv_dma;
5514 + if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
5515 + OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
5517 + * The associated data comes already with the IV but we need
5518 + * to skip it when we authenticate or encrypt...
5520 + edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
5522 + edesc->assoclen = cpu_to_caam32(req->assoclen);
5523 + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
5525 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
5526 + dev_err(dev, "unable to map assoclen\n");
5527 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5528 + iv_dma, ivsize, 0, 0);
5529 + qi_cache_free(edesc);
5530 + return ERR_PTR(-ENOMEM);
5533 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
5536 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
5539 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
5540 + qm_sg_index += mapped_src_nents;
5542 + if (mapped_dst_nents > 1)
5543 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
5546 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
5547 + if (dma_mapping_error(dev, qm_sg_dma)) {
5548 + dev_err(dev, "unable to map S/G table\n");
5549 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
5550 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
5551 + iv_dma, ivsize, 0, 0);
5552 + qi_cache_free(edesc);
5553 + return ERR_PTR(-ENOMEM);
5556 + edesc->qm_sg_dma = qm_sg_dma;
5557 + edesc->qm_sg_bytes = qm_sg_bytes;
5559 + out_len = req->assoclen + req->cryptlen +
5560 + (encrypt ? ctx->authsize : (-ctx->authsize));
5561 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
5563 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5564 + dpaa2_fl_set_final(in_fle, true);
5565 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5566 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
5567 + dpaa2_fl_set_len(in_fle, in_len);
5569 + if (req->dst == req->src) {
5570 + if (mapped_src_nents == 1) {
5571 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5572 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
5574 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5575 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
5576 + (1 + !!ivsize) * sizeof(*sg_table));
5578 + } else if (mapped_dst_nents == 1) {
5579 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5580 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
5582 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5583 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
5584 + sizeof(*sg_table));
5587 + dpaa2_fl_set_len(out_fle, out_len);
5592 +static int chachapoly_set_sh_desc(struct crypto_aead *aead)
5594 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5595 + unsigned int ivsize = crypto_aead_ivsize(aead);
5596 + struct device *dev = ctx->dev;
5597 + struct caam_flc *flc;
5600 + if (!ctx->cdata.keylen || !ctx->authsize)
5603 + flc = &ctx->flc[ENCRYPT];
5604 + desc = flc->sh_desc;
5605 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
5606 + ctx->authsize, true, true);
5607 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5608 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5609 + sizeof(flc->flc) + desc_bytes(desc),
5612 + flc = &ctx->flc[DECRYPT];
5613 + desc = flc->sh_desc;
5614 + cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
5615 + ctx->authsize, false, true);
5616 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5617 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5618 + sizeof(flc->flc) + desc_bytes(desc),
5624 +static int chachapoly_setauthsize(struct crypto_aead *aead,
5625 + unsigned int authsize)
5627 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5629 + if (authsize != POLY1305_DIGEST_SIZE)
5632 + ctx->authsize = authsize;
5633 + return chachapoly_set_sh_desc(aead);
5636 +static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
5637 + unsigned int keylen)
5639 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5640 + unsigned int ivsize = crypto_aead_ivsize(aead);
5641 + unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
5643 + if (keylen != CHACHA20_KEY_SIZE + saltlen) {
5644 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
5648 + ctx->cdata.key_virt = key;
5649 + ctx->cdata.keylen = keylen - saltlen;
5651 + return chachapoly_set_sh_desc(aead);
5654 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
5657 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
5658 + unsigned int blocksize = crypto_aead_blocksize(tls);
5659 + unsigned int padsize, authsize;
5660 + struct caam_request *req_ctx = aead_request_ctx(req);
5661 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
5662 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
5663 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5664 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
5665 + typeof(*alg), aead);
5666 + struct device *dev = ctx->dev;
5667 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
5668 + GFP_KERNEL : GFP_ATOMIC;
5669 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
5670 + struct tls_edesc *edesc;
5671 + dma_addr_t qm_sg_dma, iv_dma = 0;
5674 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
5675 + int in_len, out_len;
5676 + struct dpaa2_sg_entry *sg_table;
5677 + struct scatterlist *dst;
5680 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
5682 + authsize = ctx->authsize + padsize;
5684 + authsize = ctx->authsize;
5687 + /* allocate space for base edesc, link tables and IV */
5688 + edesc = qi_cache_zalloc(GFP_DMA | flags);
5689 + if (unlikely(!edesc)) {
5690 + dev_err(dev, "could not allocate extended descriptor\n");
5691 + return ERR_PTR(-ENOMEM);
5694 + if (likely(req->src == req->dst)) {
5695 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5697 + (encrypt ? authsize : 0));
5698 + if (unlikely(src_nents < 0)) {
5699 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5700 + req->assoclen + req->cryptlen +
5701 + (encrypt ? authsize : 0));
5702 + qi_cache_free(edesc);
5703 + return ERR_PTR(src_nents);
5706 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
5707 + DMA_BIDIRECTIONAL);
5708 + if (unlikely(!mapped_src_nents)) {
5709 + dev_err(dev, "unable to map source\n");
5710 + qi_cache_free(edesc);
5711 + return ERR_PTR(-ENOMEM);
5715 + src_nents = sg_nents_for_len(req->src, req->assoclen +
5717 + if (unlikely(src_nents < 0)) {
5718 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
5719 + req->assoclen + req->cryptlen);
5720 + qi_cache_free(edesc);
5721 + return ERR_PTR(src_nents);
5724 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
5725 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
5726 + (encrypt ? authsize : 0));
5727 + if (unlikely(dst_nents < 0)) {
5728 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
5730 + (encrypt ? authsize : 0));
5731 + qi_cache_free(edesc);
5732 + return ERR_PTR(dst_nents);
5736 + mapped_src_nents = dma_map_sg(dev, req->src,
5737 + src_nents, DMA_TO_DEVICE);
5738 + if (unlikely(!mapped_src_nents)) {
5739 + dev_err(dev, "unable to map source\n");
5740 + qi_cache_free(edesc);
5741 + return ERR_PTR(-ENOMEM);
5744 + mapped_src_nents = 0;
5747 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
5749 + if (unlikely(!mapped_dst_nents)) {
5750 + dev_err(dev, "unable to map destination\n");
5751 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
5752 + qi_cache_free(edesc);
5753 + return ERR_PTR(-ENOMEM);
5758 + * Create S/G table: IV, src, dst.
5759 + * Input is not contiguous.
5761 + qm_sg_ents = 1 + mapped_src_nents +
5762 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
5763 + sg_table = &edesc->sgt[0];
5764 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
5766 + ivsize = crypto_aead_ivsize(tls);
5767 + iv = (u8 *)(sg_table + qm_sg_ents);
5768 + /* Make sure IV is located in a DMAable area */
5769 + memcpy(iv, req->iv, ivsize);
5770 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
5771 + if (dma_mapping_error(dev, iv_dma)) {
5772 + dev_err(dev, "unable to map IV\n");
5773 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
5775 + qi_cache_free(edesc);
5776 + return ERR_PTR(-ENOMEM);
5779 + edesc->src_nents = src_nents;
5780 + edesc->dst_nents = dst_nents;
5782 + edesc->iv_dma = iv_dma;
5784 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
5787 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
5788 + qm_sg_index += mapped_src_nents;
5790 + if (mapped_dst_nents > 1)
5791 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
5794 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
5795 + if (dma_mapping_error(dev, qm_sg_dma)) {
5796 + dev_err(dev, "unable to map S/G table\n");
5797 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
5799 + qi_cache_free(edesc);
5800 + return ERR_PTR(-ENOMEM);
5803 + edesc->qm_sg_dma = qm_sg_dma;
5804 + edesc->qm_sg_bytes = qm_sg_bytes;
5806 + out_len = req->cryptlen + (encrypt ? authsize : 0);
5807 + in_len = ivsize + req->assoclen + req->cryptlen;
5809 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
5810 + dpaa2_fl_set_final(in_fle, true);
5811 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
5812 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
5813 + dpaa2_fl_set_len(in_fle, in_len);
5815 + if (req->dst == req->src) {
5816 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5817 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
5818 + (sg_nents_for_len(req->src, req->assoclen) +
5819 + 1) * sizeof(*sg_table));
5820 + } else if (mapped_dst_nents == 1) {
5821 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
5822 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
5824 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
5825 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
5826 + sizeof(*sg_table));
5829 + dpaa2_fl_set_len(out_fle, out_len);
5834 +static int tls_set_sh_desc(struct crypto_aead *tls)
5836 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5837 + unsigned int ivsize = crypto_aead_ivsize(tls);
5838 + unsigned int blocksize = crypto_aead_blocksize(tls);
5839 + struct device *dev = ctx->dev;
5840 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5841 + struct caam_flc *flc;
5843 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
5844 + unsigned int data_len[2];
5847 + if (!ctx->cdata.keylen || !ctx->authsize)
5851 + * TLS 1.0 encrypt shared descriptor
5852 + * Job Descriptor and Shared Descriptor
5853 + * must fit into the 64-word Descriptor h/w Buffer
5855 + data_len[0] = ctx->adata.keylen_pad;
5856 + data_len[1] = ctx->cdata.keylen;
5858 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
5859 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
5863 + ctx->adata.key_virt = ctx->key;
5865 + ctx->adata.key_dma = ctx->key_dma;
5868 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
5870 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5872 + ctx->adata.key_inline = !!(inl_mask & 1);
5873 + ctx->cdata.key_inline = !!(inl_mask & 2);
5875 + flc = &ctx->flc[ENCRYPT];
5876 + desc = flc->sh_desc;
5877 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
5878 + assoclen, ivsize, ctx->authsize, blocksize,
5879 + priv->sec_attr.era);
5880 + flc->flc[1] = cpu_to_caam32(desc_len(desc));
5881 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5882 + sizeof(flc->flc) + desc_bytes(desc),
5886 + * TLS 1.0 decrypt shared descriptor
5887 + * Keys do not fit inline, regardless of algorithms used
5889 + ctx->adata.key_inline = false;
5890 + ctx->adata.key_dma = ctx->key_dma;
5891 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
5893 + flc = &ctx->flc[DECRYPT];
5894 + desc = flc->sh_desc;
5895 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
5896 + ctx->authsize, blocksize, priv->sec_attr.era);
5897 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5898 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
5899 + sizeof(flc->flc) + desc_bytes(desc),
5905 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
5906 + unsigned int keylen)
5908 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5909 + struct device *dev = ctx->dev;
5910 + struct crypto_authenc_keys keys;
5912 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
5916 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
5917 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
5919 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
5920 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
5923 + ctx->adata.keylen = keys.authkeylen;
5924 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
5925 + OP_ALG_ALGSEL_MASK);
5927 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
5930 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
5931 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
5932 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
5933 + keys.enckeylen, ctx->dir);
5935 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
5936 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
5937 + ctx->adata.keylen_pad + keys.enckeylen, 1);
5940 + ctx->cdata.keylen = keys.enckeylen;
5942 + return tls_set_sh_desc(tls);
5944 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
5948 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
5950 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
5952 + ctx->authsize = authsize;
5953 + tls_set_sh_desc(tls);
5958 +static int gcm_set_sh_desc(struct crypto_aead *aead)
5960 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
5961 + struct device *dev = ctx->dev;
5962 + unsigned int ivsize = crypto_aead_ivsize(aead);
5963 + struct caam_flc *flc;
5965 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
5966 + ctx->cdata.keylen;
5968 + if (!ctx->cdata.keylen || !ctx->authsize)
5972 + * AES GCM encrypt shared descriptor
5973 + * Job Descriptor and Shared Descriptor
5974 + * must fit into the 64-word Descriptor h/w Buffer
5976 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
5977 + ctx->cdata.key_inline = true;
5978 + ctx->cdata.key_virt = ctx->key;
5980 + ctx->cdata.key_inline = false;
5981 + ctx->cdata.key_dma = ctx->key_dma;
5984 + flc = &ctx->flc[ENCRYPT];
5985 + desc = flc->sh_desc;
5986 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
5987 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
5988 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
5989 + sizeof(flc->flc) + desc_bytes(desc),
5993 + * Job Descriptor and Shared Descriptors
5994 + * must all fit into the 64-word Descriptor h/w Buffer
5996 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
5997 + ctx->cdata.key_inline = true;
5998 + ctx->cdata.key_virt = ctx->key;
6000 + ctx->cdata.key_inline = false;
6001 + ctx->cdata.key_dma = ctx->key_dma;
6004 + flc = &ctx->flc[DECRYPT];
6005 + desc = flc->sh_desc;
6006 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
6007 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6008 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6009 + sizeof(flc->flc) + desc_bytes(desc),
6015 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
6017 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6019 + ctx->authsize = authsize;
6020 + gcm_set_sh_desc(authenc);
6025 +static int gcm_setkey(struct crypto_aead *aead,
6026 + const u8 *key, unsigned int keylen)
6028 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6029 + struct device *dev = ctx->dev;
6032 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6033 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6036 + memcpy(ctx->key, key, keylen);
6037 + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
6038 + ctx->cdata.keylen = keylen;
6040 + return gcm_set_sh_desc(aead);
6043 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
6045 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6046 + struct device *dev = ctx->dev;
6047 + unsigned int ivsize = crypto_aead_ivsize(aead);
6048 + struct caam_flc *flc;
6050 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
6051 + ctx->cdata.keylen;
6053 + if (!ctx->cdata.keylen || !ctx->authsize)
6056 + ctx->cdata.key_virt = ctx->key;
6059 + * RFC4106 encrypt shared descriptor
6060 + * Job Descriptor and Shared Descriptor
6061 + * must fit into the 64-word Descriptor h/w Buffer
6063 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
6064 + ctx->cdata.key_inline = true;
6066 + ctx->cdata.key_inline = false;
6067 + ctx->cdata.key_dma = ctx->key_dma;
6070 + flc = &ctx->flc[ENCRYPT];
6071 + desc = flc->sh_desc;
6072 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
6074 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6075 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6076 + sizeof(flc->flc) + desc_bytes(desc),
6080 + * Job Descriptor and Shared Descriptors
6081 + * must all fit into the 64-word Descriptor h/w Buffer
6083 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
6084 + ctx->cdata.key_inline = true;
6086 + ctx->cdata.key_inline = false;
6087 + ctx->cdata.key_dma = ctx->key_dma;
6090 + flc = &ctx->flc[DECRYPT];
6091 + desc = flc->sh_desc;
6092 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
6094 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6095 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6096 + sizeof(flc->flc) + desc_bytes(desc),
6102 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
6103 + unsigned int authsize)
6105 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6107 + ctx->authsize = authsize;
6108 + rfc4106_set_sh_desc(authenc);
6113 +static int rfc4106_setkey(struct crypto_aead *aead,
6114 + const u8 *key, unsigned int keylen)
6116 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6117 + struct device *dev = ctx->dev;
6123 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6124 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6127 + memcpy(ctx->key, key, keylen);
6129 + * The last four bytes of the key material are used as the salt value
6130 + * in the nonce. Update the AES key length.
6132 + ctx->cdata.keylen = keylen - 4;
6133 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
6136 + return rfc4106_set_sh_desc(aead);
6139 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
6141 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6142 + struct device *dev = ctx->dev;
6143 + unsigned int ivsize = crypto_aead_ivsize(aead);
6144 + struct caam_flc *flc;
6146 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
6147 + ctx->cdata.keylen;
6149 + if (!ctx->cdata.keylen || !ctx->authsize)
6152 + ctx->cdata.key_virt = ctx->key;
6155 + * RFC4543 encrypt shared descriptor
6156 + * Job Descriptor and Shared Descriptor
6157 + * must fit into the 64-word Descriptor h/w Buffer
6159 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
6160 + ctx->cdata.key_inline = true;
6162 + ctx->cdata.key_inline = false;
6163 + ctx->cdata.key_dma = ctx->key_dma;
6166 + flc = &ctx->flc[ENCRYPT];
6167 + desc = flc->sh_desc;
6168 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
6170 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6171 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6172 + sizeof(flc->flc) + desc_bytes(desc),
6176 + * Job Descriptor and Shared Descriptors
6177 + * must all fit into the 64-word Descriptor h/w Buffer
6179 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
6180 + ctx->cdata.key_inline = true;
6182 + ctx->cdata.key_inline = false;
6183 + ctx->cdata.key_dma = ctx->key_dma;
6186 + flc = &ctx->flc[DECRYPT];
6187 + desc = flc->sh_desc;
6188 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
6190 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6191 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6192 + sizeof(flc->flc) + desc_bytes(desc),
6198 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
6199 + unsigned int authsize)
6201 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
6203 + ctx->authsize = authsize;
6204 + rfc4543_set_sh_desc(authenc);
6209 +static int rfc4543_setkey(struct crypto_aead *aead,
6210 + const u8 *key, unsigned int keylen)
6212 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6213 + struct device *dev = ctx->dev;
6219 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6220 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6223 + memcpy(ctx->key, key, keylen);
6225 + * The last four bytes of the key material are used as the salt value
6226 + * in the nonce. Update the AES key length.
6228 + ctx->cdata.keylen = keylen - 4;
6229 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
6232 + return rfc4543_set_sh_desc(aead);
6235 +static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
6236 + unsigned int keylen)
6238 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6239 + struct caam_skcipher_alg *alg =
6240 + container_of(crypto_skcipher_alg(skcipher),
6241 + struct caam_skcipher_alg, skcipher);
6242 + struct device *dev = ctx->dev;
6243 + struct caam_flc *flc;
6244 + unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
6246 + u32 ctx1_iv_off = 0;
6247 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
6248 + OP_ALG_AAI_CTR_MOD128) &&
6249 + ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
6250 + OP_ALG_ALGSEL_CHACHA20);
6251 + const bool is_rfc3686 = alg->caam.rfc3686;
6254 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
6255 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
6258 + * AES-CTR needs to load IV in CONTEXT1 reg
6259 + * at an offset of 128bits (16bytes)
6260 + * CONTEXT1[255:128] = IV
6266 + * RFC3686 specific:
6267 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
6268 + * | *key = {KEY, NONCE}
6271 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
6272 + keylen -= CTR_RFC3686_NONCE_SIZE;
6275 + ctx->cdata.keylen = keylen;
6276 + ctx->cdata.key_virt = key;
6277 + ctx->cdata.key_inline = true;
6279 + /* skcipher_encrypt shared descriptor */
6280 + flc = &ctx->flc[ENCRYPT];
6281 + desc = flc->sh_desc;
6282 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
6283 + is_rfc3686, ctx1_iv_off);
6284 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6285 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6286 + sizeof(flc->flc) + desc_bytes(desc),
6289 + /* skcipher_decrypt shared descriptor */
6290 + flc = &ctx->flc[DECRYPT];
6291 + desc = flc->sh_desc;
6292 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
6293 + is_rfc3686, ctx1_iv_off);
6294 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6295 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6296 + sizeof(flc->flc) + desc_bytes(desc),
6302 +static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
6303 + unsigned int keylen)
6305 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6306 + struct device *dev = ctx->dev;
6307 + struct caam_flc *flc;
6310 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
6311 + dev_err(dev, "key size mismatch\n");
6312 + crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
6316 + ctx->cdata.keylen = keylen;
6317 + ctx->cdata.key_virt = key;
6318 + ctx->cdata.key_inline = true;
6320 + /* xts_skcipher_encrypt shared descriptor */
6321 + flc = &ctx->flc[ENCRYPT];
6322 + desc = flc->sh_desc;
6323 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
6324 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6325 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
6326 + sizeof(flc->flc) + desc_bytes(desc),
6329 + /* xts_skcipher_decrypt shared descriptor */
6330 + flc = &ctx->flc[DECRYPT];
6331 + desc = flc->sh_desc;
6332 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
6333 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
6334 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
6335 + sizeof(flc->flc) + desc_bytes(desc),
6341 +static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
6343 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6344 + struct caam_request *req_ctx = skcipher_request_ctx(req);
6345 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
6346 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
6347 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6348 + struct device *dev = ctx->dev;
6349 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
6350 + GFP_KERNEL : GFP_ATOMIC;
6351 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
6352 + struct skcipher_edesc *edesc;
6353 + dma_addr_t iv_dma;
6355 + int ivsize = crypto_skcipher_ivsize(skcipher);
6356 + int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
6357 + struct dpaa2_sg_entry *sg_table;
6359 + src_nents = sg_nents_for_len(req->src, req->cryptlen);
6360 + if (unlikely(src_nents < 0)) {
6361 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
6363 + return ERR_PTR(src_nents);
6366 + if (unlikely(req->dst != req->src)) {
6367 + dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
6368 + if (unlikely(dst_nents < 0)) {
6369 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
6371 + return ERR_PTR(dst_nents);
6374 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
6376 + if (unlikely(!mapped_src_nents)) {
6377 + dev_err(dev, "unable to map source\n");
6378 + return ERR_PTR(-ENOMEM);
6381 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
6383 + if (unlikely(!mapped_dst_nents)) {
6384 + dev_err(dev, "unable to map destination\n");
6385 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
6386 + return ERR_PTR(-ENOMEM);
6389 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
6390 + DMA_BIDIRECTIONAL);
6391 + if (unlikely(!mapped_src_nents)) {
6392 + dev_err(dev, "unable to map source\n");
6393 + return ERR_PTR(-ENOMEM);
6397 + qm_sg_ents = 1 + mapped_src_nents;
6398 + dst_sg_idx = qm_sg_ents;
6400 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
6401 + qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
6402 + if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
6403 + ivsize > CAAM_QI_MEMCACHE_SIZE)) {
6404 + dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
6405 + qm_sg_ents, ivsize);
6406 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6408 + return ERR_PTR(-ENOMEM);
6411 + /* allocate space for base edesc, link tables and IV */
6412 + edesc = qi_cache_zalloc(GFP_DMA | flags);
6413 + if (unlikely(!edesc)) {
6414 + dev_err(dev, "could not allocate extended descriptor\n");
6415 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6417 + return ERR_PTR(-ENOMEM);
6420 + /* Make sure IV is located in a DMAable area */
6421 + sg_table = &edesc->sgt[0];
6422 + iv = (u8 *)(sg_table + qm_sg_ents);
6423 + memcpy(iv, req->iv, ivsize);
6425 + iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
6426 + if (dma_mapping_error(dev, iv_dma)) {
6427 + dev_err(dev, "unable to map IV\n");
6428 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
6430 + qi_cache_free(edesc);
6431 + return ERR_PTR(-ENOMEM);
6434 + edesc->src_nents = src_nents;
6435 + edesc->dst_nents = dst_nents;
6436 + edesc->iv_dma = iv_dma;
6437 + edesc->qm_sg_bytes = qm_sg_bytes;
6439 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
6440 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
6442 + if (mapped_dst_nents > 1)
6443 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
6446 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
6448 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
6449 + dev_err(dev, "unable to map S/G table\n");
6450 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
6451 + iv_dma, ivsize, 0, 0);
6452 + qi_cache_free(edesc);
6453 + return ERR_PTR(-ENOMEM);
6456 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
6457 + dpaa2_fl_set_final(in_fle, true);
6458 + dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
6459 + dpaa2_fl_set_len(out_fle, req->cryptlen);
6461 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
6462 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
6464 + if (req->src == req->dst) {
6465 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
6466 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
6467 + sizeof(*sg_table));
6468 + } else if (mapped_dst_nents > 1) {
6469 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
6470 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
6471 + sizeof(*sg_table));
6473 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
6474 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
6480 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
6481 + struct aead_request *req)
6483 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6484 + int ivsize = crypto_aead_ivsize(aead);
6486 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
6487 + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
6488 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
6491 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
6492 + struct aead_request *req)
6494 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6495 + int ivsize = crypto_aead_ivsize(tls);
6497 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
6498 + edesc->dst_nents, edesc->iv_dma, ivsize, edesc->qm_sg_dma,
6499 + edesc->qm_sg_bytes);
6502 +static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
6503 + struct skcipher_request *req)
6505 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6506 + int ivsize = crypto_skcipher_ivsize(skcipher);
6508 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
6509 + edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
6512 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
6514 + struct crypto_async_request *areq = cbk_ctx;
6515 + struct aead_request *req = container_of(areq, struct aead_request,
6517 + struct caam_request *req_ctx = to_caam_req(areq);
6518 + struct aead_edesc *edesc = req_ctx->edesc;
6519 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6520 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6524 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6527 + if (unlikely(status)) {
6528 + caam_qi2_strstatus(ctx->dev, status);
6532 + aead_unmap(ctx->dev, edesc, req);
6533 + qi_cache_free(edesc);
6534 + aead_request_complete(req, ecode);
6537 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
6539 + struct crypto_async_request *areq = cbk_ctx;
6540 + struct aead_request *req = container_of(areq, struct aead_request,
6542 + struct caam_request *req_ctx = to_caam_req(areq);
6543 + struct aead_edesc *edesc = req_ctx->edesc;
6544 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6545 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6549 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6552 + if (unlikely(status)) {
6553 + caam_qi2_strstatus(ctx->dev, status);
6555 + * verify hw auth check passed else return -EBADMSG
6557 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
6558 + JRSTA_CCBERR_ERRID_ICVCHK)
6564 + aead_unmap(ctx->dev, edesc, req);
6565 + qi_cache_free(edesc);
6566 + aead_request_complete(req, ecode);
6569 +static int aead_encrypt(struct aead_request *req)
6571 + struct aead_edesc *edesc;
6572 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6573 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6574 + struct caam_request *caam_req = aead_request_ctx(req);
6577 + /* allocate extended descriptor */
6578 + edesc = aead_edesc_alloc(req, true);
6579 + if (IS_ERR(edesc))
6580 + return PTR_ERR(edesc);
6582 + caam_req->flc = &ctx->flc[ENCRYPT];
6583 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6584 + caam_req->cbk = aead_encrypt_done;
6585 + caam_req->ctx = &req->base;
6586 + caam_req->edesc = edesc;
6587 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6588 + if (ret != -EINPROGRESS &&
6589 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6590 + aead_unmap(ctx->dev, edesc, req);
6591 + qi_cache_free(edesc);
6597 +static int aead_decrypt(struct aead_request *req)
6599 + struct aead_edesc *edesc;
6600 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
6601 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
6602 + struct caam_request *caam_req = aead_request_ctx(req);
6605 + /* allocate extended descriptor */
6606 + edesc = aead_edesc_alloc(req, false);
6607 + if (IS_ERR(edesc))
6608 + return PTR_ERR(edesc);
6610 + caam_req->flc = &ctx->flc[DECRYPT];
6611 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6612 + caam_req->cbk = aead_decrypt_done;
6613 + caam_req->ctx = &req->base;
6614 + caam_req->edesc = edesc;
6615 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6616 + if (ret != -EINPROGRESS &&
6617 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6618 + aead_unmap(ctx->dev, edesc, req);
6619 + qi_cache_free(edesc);
6625 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
6627 + struct crypto_async_request *areq = cbk_ctx;
6628 + struct aead_request *req = container_of(areq, struct aead_request,
6630 + struct caam_request *req_ctx = to_caam_req(areq);
6631 + struct tls_edesc *edesc = req_ctx->edesc;
6632 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6633 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6637 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6640 + if (unlikely(status)) {
6641 + caam_qi2_strstatus(ctx->dev, status);
6645 + tls_unmap(ctx->dev, edesc, req);
6646 + qi_cache_free(edesc);
6647 + aead_request_complete(req, ecode);
6650 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
6652 + struct crypto_async_request *areq = cbk_ctx;
6653 + struct aead_request *req = container_of(areq, struct aead_request,
6655 + struct caam_request *req_ctx = to_caam_req(areq);
6656 + struct tls_edesc *edesc = req_ctx->edesc;
6657 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6658 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6662 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6665 + if (unlikely(status)) {
6666 + caam_qi2_strstatus(ctx->dev, status);
6668 + * verify hw auth check passed else return -EBADMSG
6670 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
6671 + JRSTA_CCBERR_ERRID_ICVCHK)
6677 + tls_unmap(ctx->dev, edesc, req);
6678 + qi_cache_free(edesc);
6679 + aead_request_complete(req, ecode);
6682 +static int tls_encrypt(struct aead_request *req)
6684 + struct tls_edesc *edesc;
6685 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6686 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6687 + struct caam_request *caam_req = aead_request_ctx(req);
6690 + /* allocate extended descriptor */
6691 + edesc = tls_edesc_alloc(req, true);
6692 + if (IS_ERR(edesc))
6693 + return PTR_ERR(edesc);
6695 + caam_req->flc = &ctx->flc[ENCRYPT];
6696 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6697 + caam_req->cbk = tls_encrypt_done;
6698 + caam_req->ctx = &req->base;
6699 + caam_req->edesc = edesc;
6700 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6701 + if (ret != -EINPROGRESS &&
6702 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6703 + tls_unmap(ctx->dev, edesc, req);
6704 + qi_cache_free(edesc);
6710 +static int tls_decrypt(struct aead_request *req)
6712 + struct tls_edesc *edesc;
6713 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6714 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
6715 + struct caam_request *caam_req = aead_request_ctx(req);
6718 + /* allocate extended descriptor */
6719 + edesc = tls_edesc_alloc(req, false);
6720 + if (IS_ERR(edesc))
6721 + return PTR_ERR(edesc);
6723 + caam_req->flc = &ctx->flc[DECRYPT];
6724 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6725 + caam_req->cbk = tls_decrypt_done;
6726 + caam_req->ctx = &req->base;
6727 + caam_req->edesc = edesc;
6728 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6729 + if (ret != -EINPROGRESS &&
6730 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6731 + tls_unmap(ctx->dev, edesc, req);
6732 + qi_cache_free(edesc);
6738 +static int ipsec_gcm_encrypt(struct aead_request *req)
6740 + if (req->assoclen < 8)
6743 + return aead_encrypt(req);
6746 +static int ipsec_gcm_decrypt(struct aead_request *req)
6748 + if (req->assoclen < 8)
6751 + return aead_decrypt(req);
6754 +static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
6756 + struct crypto_async_request *areq = cbk_ctx;
6757 + struct skcipher_request *req = skcipher_request_cast(areq);
6758 + struct caam_request *req_ctx = to_caam_req(areq);
6759 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6760 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6761 + struct skcipher_edesc *edesc = req_ctx->edesc;
6763 + int ivsize = crypto_skcipher_ivsize(skcipher);
6766 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6769 + if (unlikely(status)) {
6770 + caam_qi2_strstatus(ctx->dev, status);
6775 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
6776 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
6777 + edesc->src_nents > 1 ? 100 : ivsize, 1);
6778 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
6779 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
6780 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
6783 + skcipher_unmap(ctx->dev, edesc, req);
6786 + * The crypto API expects us to set the IV (req->iv) to the last
6787 + * ciphertext block. This is used e.g. by the CTS mode.
6789 + scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
6792 + qi_cache_free(edesc);
6793 + skcipher_request_complete(req, ecode);
6796 +static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
6798 + struct crypto_async_request *areq = cbk_ctx;
6799 + struct skcipher_request *req = skcipher_request_cast(areq);
6800 + struct caam_request *req_ctx = to_caam_req(areq);
6801 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6802 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6803 + struct skcipher_edesc *edesc = req_ctx->edesc;
6806 + int ivsize = crypto_skcipher_ivsize(skcipher);
6808 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
6811 + if (unlikely(status)) {
6812 + caam_qi2_strstatus(ctx->dev, status);
6817 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
6818 + DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
6819 + edesc->src_nents > 1 ? 100 : ivsize, 1);
6820 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
6821 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
6822 + edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
6825 + skcipher_unmap(ctx->dev, edesc, req);
6826 + qi_cache_free(edesc);
6827 + skcipher_request_complete(req, ecode);
6830 +static int skcipher_encrypt(struct skcipher_request *req)
6832 + struct skcipher_edesc *edesc;
6833 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6834 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6835 + struct caam_request *caam_req = skcipher_request_ctx(req);
6838 + /* allocate extended descriptor */
6839 + edesc = skcipher_edesc_alloc(req);
6840 + if (IS_ERR(edesc))
6841 + return PTR_ERR(edesc);
6843 + caam_req->flc = &ctx->flc[ENCRYPT];
6844 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
6845 + caam_req->cbk = skcipher_encrypt_done;
6846 + caam_req->ctx = &req->base;
6847 + caam_req->edesc = edesc;
6848 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6849 + if (ret != -EINPROGRESS &&
6850 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6851 + skcipher_unmap(ctx->dev, edesc, req);
6852 + qi_cache_free(edesc);
6858 +static int skcipher_decrypt(struct skcipher_request *req)
6860 + struct skcipher_edesc *edesc;
6861 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
6862 + struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
6863 + struct caam_request *caam_req = skcipher_request_ctx(req);
6864 + int ivsize = crypto_skcipher_ivsize(skcipher);
6867 + /* allocate extended descriptor */
6868 + edesc = skcipher_edesc_alloc(req);
6869 + if (IS_ERR(edesc))
6870 + return PTR_ERR(edesc);
6873 + * The crypto API expects us to set the IV (req->iv) to the last
6874 + * ciphertext block.
6876 + scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
6879 + caam_req->flc = &ctx->flc[DECRYPT];
6880 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
6881 + caam_req->cbk = skcipher_decrypt_done;
6882 + caam_req->ctx = &req->base;
6883 + caam_req->edesc = edesc;
6884 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
6885 + if (ret != -EINPROGRESS &&
6886 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
6887 + skcipher_unmap(ctx->dev, edesc, req);
6888 + qi_cache_free(edesc);
6894 +static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
6897 + dma_addr_t dma_addr;
6900 + /* copy descriptor header template value */
6901 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
6902 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
6904 + ctx->dev = caam->dev;
6905 + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
6907 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
6908 + offsetof(struct caam_ctx, flc_dma),
6909 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
6910 + if (dma_mapping_error(ctx->dev, dma_addr)) {
6911 + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
6915 + for (i = 0; i < NUM_OP; i++)
6916 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
6917 + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
6922 +static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
6924 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
6925 + struct caam_skcipher_alg *caam_alg =
6926 + container_of(alg, typeof(*caam_alg), skcipher);
6928 + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
6929 + return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
6932 +static int caam_cra_init_aead(struct crypto_aead *tfm)
6934 + struct aead_alg *alg = crypto_aead_alg(tfm);
6935 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
6938 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
6939 + return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
6940 + (alg->setkey == aead_setkey) ||
6941 + (alg->setkey == tls_setkey));
6944 +static void caam_exit_common(struct caam_ctx *ctx)
6946 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
6947 + offsetof(struct caam_ctx, flc_dma), ctx->dir,
6948 + DMA_ATTR_SKIP_CPU_SYNC);
6951 +static void caam_cra_exit(struct crypto_skcipher *tfm)
6953 + caam_exit_common(crypto_skcipher_ctx(tfm));
6956 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
6958 + caam_exit_common(crypto_aead_ctx(tfm));
6961 +static struct caam_skcipher_alg driver_algs[] = {
6965 + .cra_name = "cbc(aes)",
6966 + .cra_driver_name = "cbc-aes-caam-qi2",
6967 + .cra_blocksize = AES_BLOCK_SIZE,
6969 + .setkey = skcipher_setkey,
6970 + .encrypt = skcipher_encrypt,
6971 + .decrypt = skcipher_decrypt,
6972 + .min_keysize = AES_MIN_KEY_SIZE,
6973 + .max_keysize = AES_MAX_KEY_SIZE,
6974 + .ivsize = AES_BLOCK_SIZE,
6976 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
6981 + .cra_name = "cbc(des3_ede)",
6982 + .cra_driver_name = "cbc-3des-caam-qi2",
6983 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
6985 + .setkey = skcipher_setkey,
6986 + .encrypt = skcipher_encrypt,
6987 + .decrypt = skcipher_decrypt,
6988 + .min_keysize = DES3_EDE_KEY_SIZE,
6989 + .max_keysize = DES3_EDE_KEY_SIZE,
6990 + .ivsize = DES3_EDE_BLOCK_SIZE,
6992 + .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
6997 + .cra_name = "cbc(des)",
6998 + .cra_driver_name = "cbc-des-caam-qi2",
6999 + .cra_blocksize = DES_BLOCK_SIZE,
7001 + .setkey = skcipher_setkey,
7002 + .encrypt = skcipher_encrypt,
7003 + .decrypt = skcipher_decrypt,
7004 + .min_keysize = DES_KEY_SIZE,
7005 + .max_keysize = DES_KEY_SIZE,
7006 + .ivsize = DES_BLOCK_SIZE,
7008 + .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7013 + .cra_name = "ctr(aes)",
7014 + .cra_driver_name = "ctr-aes-caam-qi2",
7015 + .cra_blocksize = 1,
7017 + .setkey = skcipher_setkey,
7018 + .encrypt = skcipher_encrypt,
7019 + .decrypt = skcipher_decrypt,
7020 + .min_keysize = AES_MIN_KEY_SIZE,
7021 + .max_keysize = AES_MAX_KEY_SIZE,
7022 + .ivsize = AES_BLOCK_SIZE,
7023 + .chunksize = AES_BLOCK_SIZE,
7025 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
7026 + OP_ALG_AAI_CTR_MOD128,
7031 + .cra_name = "rfc3686(ctr(aes))",
7032 + .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
7033 + .cra_blocksize = 1,
7035 + .setkey = skcipher_setkey,
7036 + .encrypt = skcipher_encrypt,
7037 + .decrypt = skcipher_decrypt,
7038 + .min_keysize = AES_MIN_KEY_SIZE +
7039 + CTR_RFC3686_NONCE_SIZE,
7040 + .max_keysize = AES_MAX_KEY_SIZE +
7041 + CTR_RFC3686_NONCE_SIZE,
7042 + .ivsize = CTR_RFC3686_IV_SIZE,
7043 + .chunksize = AES_BLOCK_SIZE,
7046 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7047 + OP_ALG_AAI_CTR_MOD128,
7054 + .cra_name = "xts(aes)",
7055 + .cra_driver_name = "xts-aes-caam-qi2",
7056 + .cra_blocksize = AES_BLOCK_SIZE,
7058 + .setkey = xts_skcipher_setkey,
7059 + .encrypt = skcipher_encrypt,
7060 + .decrypt = skcipher_decrypt,
7061 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
7062 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
7063 + .ivsize = AES_BLOCK_SIZE,
7065 + .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
7070 + .cra_name = "chacha20",
7071 + .cra_driver_name = "chacha20-caam-qi2",
7072 + .cra_blocksize = 1,
7074 + .setkey = skcipher_setkey,
7075 + .encrypt = skcipher_encrypt,
7076 + .decrypt = skcipher_decrypt,
7077 + .min_keysize = CHACHA20_KEY_SIZE,
7078 + .max_keysize = CHACHA20_KEY_SIZE,
7079 + .ivsize = CHACHA20_IV_SIZE,
7081 + .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
7085 +static struct caam_aead_alg driver_aeads[] = {
7089 + .cra_name = "rfc4106(gcm(aes))",
7090 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
7091 + .cra_blocksize = 1,
7093 + .setkey = rfc4106_setkey,
7094 + .setauthsize = rfc4106_setauthsize,
7095 + .encrypt = ipsec_gcm_encrypt,
7096 + .decrypt = ipsec_gcm_decrypt,
7098 + .maxauthsize = AES_BLOCK_SIZE,
7101 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7107 + .cra_name = "rfc4543(gcm(aes))",
7108 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
7109 + .cra_blocksize = 1,
7111 + .setkey = rfc4543_setkey,
7112 + .setauthsize = rfc4543_setauthsize,
7113 + .encrypt = ipsec_gcm_encrypt,
7114 + .decrypt = ipsec_gcm_decrypt,
7116 + .maxauthsize = AES_BLOCK_SIZE,
7119 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7122 + /* Galois Counter Mode */
7126 + .cra_name = "gcm(aes)",
7127 + .cra_driver_name = "gcm-aes-caam-qi2",
7128 + .cra_blocksize = 1,
7130 + .setkey = gcm_setkey,
7131 + .setauthsize = gcm_setauthsize,
7132 + .encrypt = aead_encrypt,
7133 + .decrypt = aead_decrypt,
7135 + .maxauthsize = AES_BLOCK_SIZE,
7138 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
7141 + /* single-pass ipsec_esp descriptor */
7145 + .cra_name = "authenc(hmac(md5),cbc(aes))",
7146 + .cra_driver_name = "authenc-hmac-md5-"
7147 + "cbc-aes-caam-qi2",
7148 + .cra_blocksize = AES_BLOCK_SIZE,
7150 + .setkey = aead_setkey,
7151 + .setauthsize = aead_setauthsize,
7152 + .encrypt = aead_encrypt,
7153 + .decrypt = aead_decrypt,
7154 + .ivsize = AES_BLOCK_SIZE,
7155 + .maxauthsize = MD5_DIGEST_SIZE,
7158 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7159 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7160 + OP_ALG_AAI_HMAC_PRECOMP,
7166 + .cra_name = "echainiv(authenc(hmac(md5),"
7168 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7169 + "cbc-aes-caam-qi2",
7170 + .cra_blocksize = AES_BLOCK_SIZE,
7172 + .setkey = aead_setkey,
7173 + .setauthsize = aead_setauthsize,
7174 + .encrypt = aead_encrypt,
7175 + .decrypt = aead_decrypt,
7176 + .ivsize = AES_BLOCK_SIZE,
7177 + .maxauthsize = MD5_DIGEST_SIZE,
7180 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7181 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7182 + OP_ALG_AAI_HMAC_PRECOMP,
7189 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
7190 + .cra_driver_name = "authenc-hmac-sha1-"
7191 + "cbc-aes-caam-qi2",
7192 + .cra_blocksize = AES_BLOCK_SIZE,
7194 + .setkey = aead_setkey,
7195 + .setauthsize = aead_setauthsize,
7196 + .encrypt = aead_encrypt,
7197 + .decrypt = aead_decrypt,
7198 + .ivsize = AES_BLOCK_SIZE,
7199 + .maxauthsize = SHA1_DIGEST_SIZE,
7202 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7203 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7204 + OP_ALG_AAI_HMAC_PRECOMP,
7210 + .cra_name = "echainiv(authenc(hmac(sha1),"
7212 + .cra_driver_name = "echainiv-authenc-"
7213 + "hmac-sha1-cbc-aes-caam-qi2",
7214 + .cra_blocksize = AES_BLOCK_SIZE,
7216 + .setkey = aead_setkey,
7217 + .setauthsize = aead_setauthsize,
7218 + .encrypt = aead_encrypt,
7219 + .decrypt = aead_decrypt,
7220 + .ivsize = AES_BLOCK_SIZE,
7221 + .maxauthsize = SHA1_DIGEST_SIZE,
7224 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7225 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7226 + OP_ALG_AAI_HMAC_PRECOMP,
7233 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
7234 + .cra_driver_name = "authenc-hmac-sha224-"
7235 + "cbc-aes-caam-qi2",
7236 + .cra_blocksize = AES_BLOCK_SIZE,
7238 + .setkey = aead_setkey,
7239 + .setauthsize = aead_setauthsize,
7240 + .encrypt = aead_encrypt,
7241 + .decrypt = aead_decrypt,
7242 + .ivsize = AES_BLOCK_SIZE,
7243 + .maxauthsize = SHA224_DIGEST_SIZE,
7246 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7247 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7248 + OP_ALG_AAI_HMAC_PRECOMP,
7254 + .cra_name = "echainiv(authenc(hmac(sha224),"
7256 + .cra_driver_name = "echainiv-authenc-"
7257 + "hmac-sha224-cbc-aes-caam-qi2",
7258 + .cra_blocksize = AES_BLOCK_SIZE,
7260 + .setkey = aead_setkey,
7261 + .setauthsize = aead_setauthsize,
7262 + .encrypt = aead_encrypt,
7263 + .decrypt = aead_decrypt,
7264 + .ivsize = AES_BLOCK_SIZE,
7265 + .maxauthsize = SHA224_DIGEST_SIZE,
7268 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7269 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7270 + OP_ALG_AAI_HMAC_PRECOMP,
7277 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
7278 + .cra_driver_name = "authenc-hmac-sha256-"
7279 + "cbc-aes-caam-qi2",
7280 + .cra_blocksize = AES_BLOCK_SIZE,
7282 + .setkey = aead_setkey,
7283 + .setauthsize = aead_setauthsize,
7284 + .encrypt = aead_encrypt,
7285 + .decrypt = aead_decrypt,
7286 + .ivsize = AES_BLOCK_SIZE,
7287 + .maxauthsize = SHA256_DIGEST_SIZE,
7290 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7291 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7292 + OP_ALG_AAI_HMAC_PRECOMP,
7298 + .cra_name = "echainiv(authenc(hmac(sha256),"
7300 + .cra_driver_name = "echainiv-authenc-"
7301 + "hmac-sha256-cbc-aes-"
7303 + .cra_blocksize = AES_BLOCK_SIZE,
7305 + .setkey = aead_setkey,
7306 + .setauthsize = aead_setauthsize,
7307 + .encrypt = aead_encrypt,
7308 + .decrypt = aead_decrypt,
7309 + .ivsize = AES_BLOCK_SIZE,
7310 + .maxauthsize = SHA256_DIGEST_SIZE,
7313 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7314 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7315 + OP_ALG_AAI_HMAC_PRECOMP,
7322 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
7323 + .cra_driver_name = "authenc-hmac-sha384-"
7324 + "cbc-aes-caam-qi2",
7325 + .cra_blocksize = AES_BLOCK_SIZE,
7327 + .setkey = aead_setkey,
7328 + .setauthsize = aead_setauthsize,
7329 + .encrypt = aead_encrypt,
7330 + .decrypt = aead_decrypt,
7331 + .ivsize = AES_BLOCK_SIZE,
7332 + .maxauthsize = SHA384_DIGEST_SIZE,
7335 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7336 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7337 + OP_ALG_AAI_HMAC_PRECOMP,
7343 + .cra_name = "echainiv(authenc(hmac(sha384),"
7345 + .cra_driver_name = "echainiv-authenc-"
7346 + "hmac-sha384-cbc-aes-"
7348 + .cra_blocksize = AES_BLOCK_SIZE,
7350 + .setkey = aead_setkey,
7351 + .setauthsize = aead_setauthsize,
7352 + .encrypt = aead_encrypt,
7353 + .decrypt = aead_decrypt,
7354 + .ivsize = AES_BLOCK_SIZE,
7355 + .maxauthsize = SHA384_DIGEST_SIZE,
7358 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7359 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7360 + OP_ALG_AAI_HMAC_PRECOMP,
7367 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
7368 + .cra_driver_name = "authenc-hmac-sha512-"
7369 + "cbc-aes-caam-qi2",
7370 + .cra_blocksize = AES_BLOCK_SIZE,
7372 + .setkey = aead_setkey,
7373 + .setauthsize = aead_setauthsize,
7374 + .encrypt = aead_encrypt,
7375 + .decrypt = aead_decrypt,
7376 + .ivsize = AES_BLOCK_SIZE,
7377 + .maxauthsize = SHA512_DIGEST_SIZE,
7380 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7381 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7382 + OP_ALG_AAI_HMAC_PRECOMP,
7388 + .cra_name = "echainiv(authenc(hmac(sha512),"
7390 + .cra_driver_name = "echainiv-authenc-"
7391 + "hmac-sha512-cbc-aes-"
7393 + .cra_blocksize = AES_BLOCK_SIZE,
7395 + .setkey = aead_setkey,
7396 + .setauthsize = aead_setauthsize,
7397 + .encrypt = aead_encrypt,
7398 + .decrypt = aead_decrypt,
7399 + .ivsize = AES_BLOCK_SIZE,
7400 + .maxauthsize = SHA512_DIGEST_SIZE,
7403 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
7404 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7405 + OP_ALG_AAI_HMAC_PRECOMP,
7412 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
7413 + .cra_driver_name = "authenc-hmac-md5-"
7414 + "cbc-des3_ede-caam-qi2",
7415 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7417 + .setkey = aead_setkey,
7418 + .setauthsize = aead_setauthsize,
7419 + .encrypt = aead_encrypt,
7420 + .decrypt = aead_decrypt,
7421 + .ivsize = DES3_EDE_BLOCK_SIZE,
7422 + .maxauthsize = MD5_DIGEST_SIZE,
7425 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7426 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7427 + OP_ALG_AAI_HMAC_PRECOMP,
7433 + .cra_name = "echainiv(authenc(hmac(md5),"
7434 + "cbc(des3_ede)))",
7435 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7436 + "cbc-des3_ede-caam-qi2",
7437 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7439 + .setkey = aead_setkey,
7440 + .setauthsize = aead_setauthsize,
7441 + .encrypt = aead_encrypt,
7442 + .decrypt = aead_decrypt,
7443 + .ivsize = DES3_EDE_BLOCK_SIZE,
7444 + .maxauthsize = MD5_DIGEST_SIZE,
7447 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7448 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7449 + OP_ALG_AAI_HMAC_PRECOMP,
7456 + .cra_name = "authenc(hmac(sha1),"
7458 + .cra_driver_name = "authenc-hmac-sha1-"
7459 + "cbc-des3_ede-caam-qi2",
7460 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7462 + .setkey = aead_setkey,
7463 + .setauthsize = aead_setauthsize,
7464 + .encrypt = aead_encrypt,
7465 + .decrypt = aead_decrypt,
7466 + .ivsize = DES3_EDE_BLOCK_SIZE,
7467 + .maxauthsize = SHA1_DIGEST_SIZE,
7470 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7471 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7472 + OP_ALG_AAI_HMAC_PRECOMP,
7478 + .cra_name = "echainiv(authenc(hmac(sha1),"
7479 + "cbc(des3_ede)))",
7480 + .cra_driver_name = "echainiv-authenc-"
7482 + "cbc-des3_ede-caam-qi2",
7483 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7485 + .setkey = aead_setkey,
7486 + .setauthsize = aead_setauthsize,
7487 + .encrypt = aead_encrypt,
7488 + .decrypt = aead_decrypt,
7489 + .ivsize = DES3_EDE_BLOCK_SIZE,
7490 + .maxauthsize = SHA1_DIGEST_SIZE,
7493 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7494 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7495 + OP_ALG_AAI_HMAC_PRECOMP,
7502 + .cra_name = "authenc(hmac(sha224),"
7504 + .cra_driver_name = "authenc-hmac-sha224-"
7505 + "cbc-des3_ede-caam-qi2",
7506 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7508 + .setkey = aead_setkey,
7509 + .setauthsize = aead_setauthsize,
7510 + .encrypt = aead_encrypt,
7511 + .decrypt = aead_decrypt,
7512 + .ivsize = DES3_EDE_BLOCK_SIZE,
7513 + .maxauthsize = SHA224_DIGEST_SIZE,
7516 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7517 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7518 + OP_ALG_AAI_HMAC_PRECOMP,
7524 + .cra_name = "echainiv(authenc(hmac(sha224),"
7525 + "cbc(des3_ede)))",
7526 + .cra_driver_name = "echainiv-authenc-"
7528 + "cbc-des3_ede-caam-qi2",
7529 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7531 + .setkey = aead_setkey,
7532 + .setauthsize = aead_setauthsize,
7533 + .encrypt = aead_encrypt,
7534 + .decrypt = aead_decrypt,
7535 + .ivsize = DES3_EDE_BLOCK_SIZE,
7536 + .maxauthsize = SHA224_DIGEST_SIZE,
7539 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7540 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7541 + OP_ALG_AAI_HMAC_PRECOMP,
7548 + .cra_name = "authenc(hmac(sha256),"
7550 + .cra_driver_name = "authenc-hmac-sha256-"
7551 + "cbc-des3_ede-caam-qi2",
7552 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7554 + .setkey = aead_setkey,
7555 + .setauthsize = aead_setauthsize,
7556 + .encrypt = aead_encrypt,
7557 + .decrypt = aead_decrypt,
7558 + .ivsize = DES3_EDE_BLOCK_SIZE,
7559 + .maxauthsize = SHA256_DIGEST_SIZE,
7562 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7563 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7564 + OP_ALG_AAI_HMAC_PRECOMP,
7570 + .cra_name = "echainiv(authenc(hmac(sha256),"
7571 + "cbc(des3_ede)))",
7572 + .cra_driver_name = "echainiv-authenc-"
7574 + "cbc-des3_ede-caam-qi2",
7575 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7577 + .setkey = aead_setkey,
7578 + .setauthsize = aead_setauthsize,
7579 + .encrypt = aead_encrypt,
7580 + .decrypt = aead_decrypt,
7581 + .ivsize = DES3_EDE_BLOCK_SIZE,
7582 + .maxauthsize = SHA256_DIGEST_SIZE,
7585 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7586 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7587 + OP_ALG_AAI_HMAC_PRECOMP,
7594 + .cra_name = "authenc(hmac(sha384),"
7596 + .cra_driver_name = "authenc-hmac-sha384-"
7597 + "cbc-des3_ede-caam-qi2",
7598 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7600 + .setkey = aead_setkey,
7601 + .setauthsize = aead_setauthsize,
7602 + .encrypt = aead_encrypt,
7603 + .decrypt = aead_decrypt,
7604 + .ivsize = DES3_EDE_BLOCK_SIZE,
7605 + .maxauthsize = SHA384_DIGEST_SIZE,
7608 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7609 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7610 + OP_ALG_AAI_HMAC_PRECOMP,
7616 + .cra_name = "echainiv(authenc(hmac(sha384),"
7617 + "cbc(des3_ede)))",
7618 + .cra_driver_name = "echainiv-authenc-"
7620 + "cbc-des3_ede-caam-qi2",
7621 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7623 + .setkey = aead_setkey,
7624 + .setauthsize = aead_setauthsize,
7625 + .encrypt = aead_encrypt,
7626 + .decrypt = aead_decrypt,
7627 + .ivsize = DES3_EDE_BLOCK_SIZE,
7628 + .maxauthsize = SHA384_DIGEST_SIZE,
7631 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7632 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7633 + OP_ALG_AAI_HMAC_PRECOMP,
7640 + .cra_name = "authenc(hmac(sha512),"
7642 + .cra_driver_name = "authenc-hmac-sha512-"
7643 + "cbc-des3_ede-caam-qi2",
7644 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7646 + .setkey = aead_setkey,
7647 + .setauthsize = aead_setauthsize,
7648 + .encrypt = aead_encrypt,
7649 + .decrypt = aead_decrypt,
7650 + .ivsize = DES3_EDE_BLOCK_SIZE,
7651 + .maxauthsize = SHA512_DIGEST_SIZE,
7654 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7655 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7656 + OP_ALG_AAI_HMAC_PRECOMP,
7662 + .cra_name = "echainiv(authenc(hmac(sha512),"
7663 + "cbc(des3_ede)))",
7664 + .cra_driver_name = "echainiv-authenc-"
7666 + "cbc-des3_ede-caam-qi2",
7667 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
7669 + .setkey = aead_setkey,
7670 + .setauthsize = aead_setauthsize,
7671 + .encrypt = aead_encrypt,
7672 + .decrypt = aead_decrypt,
7673 + .ivsize = DES3_EDE_BLOCK_SIZE,
7674 + .maxauthsize = SHA512_DIGEST_SIZE,
7677 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
7678 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7679 + OP_ALG_AAI_HMAC_PRECOMP,
7686 + .cra_name = "authenc(hmac(md5),cbc(des))",
7687 + .cra_driver_name = "authenc-hmac-md5-"
7688 + "cbc-des-caam-qi2",
7689 + .cra_blocksize = DES_BLOCK_SIZE,
7691 + .setkey = aead_setkey,
7692 + .setauthsize = aead_setauthsize,
7693 + .encrypt = aead_encrypt,
7694 + .decrypt = aead_decrypt,
7695 + .ivsize = DES_BLOCK_SIZE,
7696 + .maxauthsize = MD5_DIGEST_SIZE,
7699 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7700 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7701 + OP_ALG_AAI_HMAC_PRECOMP,
7707 + .cra_name = "echainiv(authenc(hmac(md5),"
7709 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
7710 + "cbc-des-caam-qi2",
7711 + .cra_blocksize = DES_BLOCK_SIZE,
7713 + .setkey = aead_setkey,
7714 + .setauthsize = aead_setauthsize,
7715 + .encrypt = aead_encrypt,
7716 + .decrypt = aead_decrypt,
7717 + .ivsize = DES_BLOCK_SIZE,
7718 + .maxauthsize = MD5_DIGEST_SIZE,
7721 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7722 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7723 + OP_ALG_AAI_HMAC_PRECOMP,
7730 + .cra_name = "authenc(hmac(sha1),cbc(des))",
7731 + .cra_driver_name = "authenc-hmac-sha1-"
7732 + "cbc-des-caam-qi2",
7733 + .cra_blocksize = DES_BLOCK_SIZE,
7735 + .setkey = aead_setkey,
7736 + .setauthsize = aead_setauthsize,
7737 + .encrypt = aead_encrypt,
7738 + .decrypt = aead_decrypt,
7739 + .ivsize = DES_BLOCK_SIZE,
7740 + .maxauthsize = SHA1_DIGEST_SIZE,
7743 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7744 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7745 + OP_ALG_AAI_HMAC_PRECOMP,
7751 + .cra_name = "echainiv(authenc(hmac(sha1),"
7753 + .cra_driver_name = "echainiv-authenc-"
7754 + "hmac-sha1-cbc-des-caam-qi2",
7755 + .cra_blocksize = DES_BLOCK_SIZE,
7757 + .setkey = aead_setkey,
7758 + .setauthsize = aead_setauthsize,
7759 + .encrypt = aead_encrypt,
7760 + .decrypt = aead_decrypt,
7761 + .ivsize = DES_BLOCK_SIZE,
7762 + .maxauthsize = SHA1_DIGEST_SIZE,
7765 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7766 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
7767 + OP_ALG_AAI_HMAC_PRECOMP,
7774 + .cra_name = "authenc(hmac(sha224),cbc(des))",
7775 + .cra_driver_name = "authenc-hmac-sha224-"
7776 + "cbc-des-caam-qi2",
7777 + .cra_blocksize = DES_BLOCK_SIZE,
7779 + .setkey = aead_setkey,
7780 + .setauthsize = aead_setauthsize,
7781 + .encrypt = aead_encrypt,
7782 + .decrypt = aead_decrypt,
7783 + .ivsize = DES_BLOCK_SIZE,
7784 + .maxauthsize = SHA224_DIGEST_SIZE,
7787 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7788 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7789 + OP_ALG_AAI_HMAC_PRECOMP,
7795 + .cra_name = "echainiv(authenc(hmac(sha224),"
7797 + .cra_driver_name = "echainiv-authenc-"
7798 + "hmac-sha224-cbc-des-"
7800 + .cra_blocksize = DES_BLOCK_SIZE,
7802 + .setkey = aead_setkey,
7803 + .setauthsize = aead_setauthsize,
7804 + .encrypt = aead_encrypt,
7805 + .decrypt = aead_decrypt,
7806 + .ivsize = DES_BLOCK_SIZE,
7807 + .maxauthsize = SHA224_DIGEST_SIZE,
7810 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7811 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
7812 + OP_ALG_AAI_HMAC_PRECOMP,
7819 + .cra_name = "authenc(hmac(sha256),cbc(des))",
7820 + .cra_driver_name = "authenc-hmac-sha256-"
7821 + "cbc-des-caam-qi2",
7822 + .cra_blocksize = DES_BLOCK_SIZE,
7824 + .setkey = aead_setkey,
7825 + .setauthsize = aead_setauthsize,
7826 + .encrypt = aead_encrypt,
7827 + .decrypt = aead_decrypt,
7828 + .ivsize = DES_BLOCK_SIZE,
7829 + .maxauthsize = SHA256_DIGEST_SIZE,
7832 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7833 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7834 + OP_ALG_AAI_HMAC_PRECOMP,
7840 + .cra_name = "echainiv(authenc(hmac(sha256),"
7842 + .cra_driver_name = "echainiv-authenc-"
7843 + "hmac-sha256-cbc-desi-"
7845 + .cra_blocksize = DES_BLOCK_SIZE,
7847 + .setkey = aead_setkey,
7848 + .setauthsize = aead_setauthsize,
7849 + .encrypt = aead_encrypt,
7850 + .decrypt = aead_decrypt,
7851 + .ivsize = DES_BLOCK_SIZE,
7852 + .maxauthsize = SHA256_DIGEST_SIZE,
7855 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7856 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
7857 + OP_ALG_AAI_HMAC_PRECOMP,
7864 + .cra_name = "authenc(hmac(sha384),cbc(des))",
7865 + .cra_driver_name = "authenc-hmac-sha384-"
7866 + "cbc-des-caam-qi2",
7867 + .cra_blocksize = DES_BLOCK_SIZE,
7869 + .setkey = aead_setkey,
7870 + .setauthsize = aead_setauthsize,
7871 + .encrypt = aead_encrypt,
7872 + .decrypt = aead_decrypt,
7873 + .ivsize = DES_BLOCK_SIZE,
7874 + .maxauthsize = SHA384_DIGEST_SIZE,
7877 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7878 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7879 + OP_ALG_AAI_HMAC_PRECOMP,
7885 + .cra_name = "echainiv(authenc(hmac(sha384),"
7887 + .cra_driver_name = "echainiv-authenc-"
7888 + "hmac-sha384-cbc-des-"
7890 + .cra_blocksize = DES_BLOCK_SIZE,
7892 + .setkey = aead_setkey,
7893 + .setauthsize = aead_setauthsize,
7894 + .encrypt = aead_encrypt,
7895 + .decrypt = aead_decrypt,
7896 + .ivsize = DES_BLOCK_SIZE,
7897 + .maxauthsize = SHA384_DIGEST_SIZE,
7900 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7901 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
7902 + OP_ALG_AAI_HMAC_PRECOMP,
7909 + .cra_name = "authenc(hmac(sha512),cbc(des))",
7910 + .cra_driver_name = "authenc-hmac-sha512-"
7911 + "cbc-des-caam-qi2",
7912 + .cra_blocksize = DES_BLOCK_SIZE,
7914 + .setkey = aead_setkey,
7915 + .setauthsize = aead_setauthsize,
7916 + .encrypt = aead_encrypt,
7917 + .decrypt = aead_decrypt,
7918 + .ivsize = DES_BLOCK_SIZE,
7919 + .maxauthsize = SHA512_DIGEST_SIZE,
7922 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7923 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7924 + OP_ALG_AAI_HMAC_PRECOMP,
7930 + .cra_name = "echainiv(authenc(hmac(sha512),"
7932 + .cra_driver_name = "echainiv-authenc-"
7933 + "hmac-sha512-cbc-des-"
7935 + .cra_blocksize = DES_BLOCK_SIZE,
7937 + .setkey = aead_setkey,
7938 + .setauthsize = aead_setauthsize,
7939 + .encrypt = aead_encrypt,
7940 + .decrypt = aead_decrypt,
7941 + .ivsize = DES_BLOCK_SIZE,
7942 + .maxauthsize = SHA512_DIGEST_SIZE,
7945 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
7946 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
7947 + OP_ALG_AAI_HMAC_PRECOMP,
7954 + .cra_name = "authenc(hmac(md5),"
7955 + "rfc3686(ctr(aes)))",
7956 + .cra_driver_name = "authenc-hmac-md5-"
7957 + "rfc3686-ctr-aes-caam-qi2",
7958 + .cra_blocksize = 1,
7960 + .setkey = aead_setkey,
7961 + .setauthsize = aead_setauthsize,
7962 + .encrypt = aead_encrypt,
7963 + .decrypt = aead_decrypt,
7964 + .ivsize = CTR_RFC3686_IV_SIZE,
7965 + .maxauthsize = MD5_DIGEST_SIZE,
7968 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7969 + OP_ALG_AAI_CTR_MOD128,
7970 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7971 + OP_ALG_AAI_HMAC_PRECOMP,
7978 + .cra_name = "seqiv(authenc("
7979 + "hmac(md5),rfc3686(ctr(aes))))",
7980 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
7981 + "rfc3686-ctr-aes-caam-qi2",
7982 + .cra_blocksize = 1,
7984 + .setkey = aead_setkey,
7985 + .setauthsize = aead_setauthsize,
7986 + .encrypt = aead_encrypt,
7987 + .decrypt = aead_decrypt,
7988 + .ivsize = CTR_RFC3686_IV_SIZE,
7989 + .maxauthsize = MD5_DIGEST_SIZE,
7992 + .class1_alg_type = OP_ALG_ALGSEL_AES |
7993 + OP_ALG_AAI_CTR_MOD128,
7994 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
7995 + OP_ALG_AAI_HMAC_PRECOMP,
8003 + .cra_name = "authenc(hmac(sha1),"
8004 + "rfc3686(ctr(aes)))",
8005 + .cra_driver_name = "authenc-hmac-sha1-"
8006 + "rfc3686-ctr-aes-caam-qi2",
8007 + .cra_blocksize = 1,
8009 + .setkey = aead_setkey,
8010 + .setauthsize = aead_setauthsize,
8011 + .encrypt = aead_encrypt,
8012 + .decrypt = aead_decrypt,
8013 + .ivsize = CTR_RFC3686_IV_SIZE,
8014 + .maxauthsize = SHA1_DIGEST_SIZE,
8017 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8018 + OP_ALG_AAI_CTR_MOD128,
8019 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8020 + OP_ALG_AAI_HMAC_PRECOMP,
8027 + .cra_name = "seqiv(authenc("
8028 + "hmac(sha1),rfc3686(ctr(aes))))",
8029 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
8030 + "rfc3686-ctr-aes-caam-qi2",
8031 + .cra_blocksize = 1,
8033 + .setkey = aead_setkey,
8034 + .setauthsize = aead_setauthsize,
8035 + .encrypt = aead_encrypt,
8036 + .decrypt = aead_decrypt,
8037 + .ivsize = CTR_RFC3686_IV_SIZE,
8038 + .maxauthsize = SHA1_DIGEST_SIZE,
8041 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8042 + OP_ALG_AAI_CTR_MOD128,
8043 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8044 + OP_ALG_AAI_HMAC_PRECOMP,
8052 + .cra_name = "authenc(hmac(sha224),"
8053 + "rfc3686(ctr(aes)))",
8054 + .cra_driver_name = "authenc-hmac-sha224-"
8055 + "rfc3686-ctr-aes-caam-qi2",
8056 + .cra_blocksize = 1,
8058 + .setkey = aead_setkey,
8059 + .setauthsize = aead_setauthsize,
8060 + .encrypt = aead_encrypt,
8061 + .decrypt = aead_decrypt,
8062 + .ivsize = CTR_RFC3686_IV_SIZE,
8063 + .maxauthsize = SHA224_DIGEST_SIZE,
8066 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8067 + OP_ALG_AAI_CTR_MOD128,
8068 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
8069 + OP_ALG_AAI_HMAC_PRECOMP,
8076 + .cra_name = "seqiv(authenc("
8077 + "hmac(sha224),rfc3686(ctr(aes))))",
8078 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
8079 + "rfc3686-ctr-aes-caam-qi2",
8080 + .cra_blocksize = 1,
8082 + .setkey = aead_setkey,
8083 + .setauthsize = aead_setauthsize,
8084 + .encrypt = aead_encrypt,
8085 + .decrypt = aead_decrypt,
8086 + .ivsize = CTR_RFC3686_IV_SIZE,
8087 + .maxauthsize = SHA224_DIGEST_SIZE,
8090 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8091 + OP_ALG_AAI_CTR_MOD128,
8092 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
8093 + OP_ALG_AAI_HMAC_PRECOMP,
8101 + .cra_name = "authenc(hmac(sha256),"
8102 + "rfc3686(ctr(aes)))",
8103 + .cra_driver_name = "authenc-hmac-sha256-"
8104 + "rfc3686-ctr-aes-caam-qi2",
8105 + .cra_blocksize = 1,
8107 + .setkey = aead_setkey,
8108 + .setauthsize = aead_setauthsize,
8109 + .encrypt = aead_encrypt,
8110 + .decrypt = aead_decrypt,
8111 + .ivsize = CTR_RFC3686_IV_SIZE,
8112 + .maxauthsize = SHA256_DIGEST_SIZE,
8115 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8116 + OP_ALG_AAI_CTR_MOD128,
8117 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
8118 + OP_ALG_AAI_HMAC_PRECOMP,
8125 + .cra_name = "seqiv(authenc(hmac(sha256),"
8126 + "rfc3686(ctr(aes))))",
8127 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
8128 + "rfc3686-ctr-aes-caam-qi2",
8129 + .cra_blocksize = 1,
8131 + .setkey = aead_setkey,
8132 + .setauthsize = aead_setauthsize,
8133 + .encrypt = aead_encrypt,
8134 + .decrypt = aead_decrypt,
8135 + .ivsize = CTR_RFC3686_IV_SIZE,
8136 + .maxauthsize = SHA256_DIGEST_SIZE,
8139 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8140 + OP_ALG_AAI_CTR_MOD128,
8141 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
8142 + OP_ALG_AAI_HMAC_PRECOMP,
8150 + .cra_name = "authenc(hmac(sha384),"
8151 + "rfc3686(ctr(aes)))",
8152 + .cra_driver_name = "authenc-hmac-sha384-"
8153 + "rfc3686-ctr-aes-caam-qi2",
8154 + .cra_blocksize = 1,
8156 + .setkey = aead_setkey,
8157 + .setauthsize = aead_setauthsize,
8158 + .encrypt = aead_encrypt,
8159 + .decrypt = aead_decrypt,
8160 + .ivsize = CTR_RFC3686_IV_SIZE,
8161 + .maxauthsize = SHA384_DIGEST_SIZE,
8164 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8165 + OP_ALG_AAI_CTR_MOD128,
8166 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
8167 + OP_ALG_AAI_HMAC_PRECOMP,
8174 + .cra_name = "seqiv(authenc(hmac(sha384),"
8175 + "rfc3686(ctr(aes))))",
8176 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
8177 + "rfc3686-ctr-aes-caam-qi2",
8178 + .cra_blocksize = 1,
8180 + .setkey = aead_setkey,
8181 + .setauthsize = aead_setauthsize,
8182 + .encrypt = aead_encrypt,
8183 + .decrypt = aead_decrypt,
8184 + .ivsize = CTR_RFC3686_IV_SIZE,
8185 + .maxauthsize = SHA384_DIGEST_SIZE,
8188 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8189 + OP_ALG_AAI_CTR_MOD128,
8190 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
8191 + OP_ALG_AAI_HMAC_PRECOMP,
8199 + .cra_name = "rfc7539(chacha20,poly1305)",
8200 + .cra_driver_name = "rfc7539-chacha20-poly1305-"
8202 + .cra_blocksize = 1,
8204 + .setkey = chachapoly_setkey,
8205 + .setauthsize = chachapoly_setauthsize,
8206 + .encrypt = aead_encrypt,
8207 + .decrypt = aead_decrypt,
8208 + .ivsize = CHACHAPOLY_IV_SIZE,
8209 + .maxauthsize = POLY1305_DIGEST_SIZE,
8212 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
8214 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
8221 + .cra_name = "rfc7539esp(chacha20,poly1305)",
8222 + .cra_driver_name = "rfc7539esp-chacha20-"
8223 + "poly1305-caam-qi2",
8224 + .cra_blocksize = 1,
8226 + .setkey = chachapoly_setkey,
8227 + .setauthsize = chachapoly_setauthsize,
8228 + .encrypt = aead_encrypt,
8229 + .decrypt = aead_decrypt,
8231 + .maxauthsize = POLY1305_DIGEST_SIZE,
8234 + .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
8236 + .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
8243 + .cra_name = "authenc(hmac(sha512),"
8244 + "rfc3686(ctr(aes)))",
8245 + .cra_driver_name = "authenc-hmac-sha512-"
8246 + "rfc3686-ctr-aes-caam-qi2",
8247 + .cra_blocksize = 1,
8249 + .setkey = aead_setkey,
8250 + .setauthsize = aead_setauthsize,
8251 + .encrypt = aead_encrypt,
8252 + .decrypt = aead_decrypt,
8253 + .ivsize = CTR_RFC3686_IV_SIZE,
8254 + .maxauthsize = SHA512_DIGEST_SIZE,
8257 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8258 + OP_ALG_AAI_CTR_MOD128,
8259 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
8260 + OP_ALG_AAI_HMAC_PRECOMP,
8267 + .cra_name = "seqiv(authenc(hmac(sha512),"
8268 + "rfc3686(ctr(aes))))",
8269 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
8270 + "rfc3686-ctr-aes-caam-qi2",
8271 + .cra_blocksize = 1,
8273 + .setkey = aead_setkey,
8274 + .setauthsize = aead_setauthsize,
8275 + .encrypt = aead_encrypt,
8276 + .decrypt = aead_decrypt,
8277 + .ivsize = CTR_RFC3686_IV_SIZE,
8278 + .maxauthsize = SHA512_DIGEST_SIZE,
8281 + .class1_alg_type = OP_ALG_ALGSEL_AES |
8282 + OP_ALG_AAI_CTR_MOD128,
8283 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
8284 + OP_ALG_AAI_HMAC_PRECOMP,
8292 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
8293 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
8294 + .cra_blocksize = AES_BLOCK_SIZE,
8296 + .setkey = tls_setkey,
8297 + .setauthsize = tls_setauthsize,
8298 + .encrypt = tls_encrypt,
8299 + .decrypt = tls_decrypt,
8300 + .ivsize = AES_BLOCK_SIZE,
8301 + .maxauthsize = SHA1_DIGEST_SIZE,
8304 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
8305 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
8306 + OP_ALG_AAI_HMAC_PRECOMP,
8311 +static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
8313 + struct skcipher_alg *alg = &t_alg->skcipher;
8315 + alg->base.cra_module = THIS_MODULE;
8316 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
8317 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
8318 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
8320 + alg->init = caam_cra_init_skcipher;
8321 + alg->exit = caam_cra_exit;
8324 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
8326 + struct aead_alg *alg = &t_alg->aead;
8328 + alg->base.cra_module = THIS_MODULE;
8329 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
8330 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
8331 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
8333 + alg->init = caam_cra_init_aead;
8334 + alg->exit = caam_cra_exit_aead;
8337 +/* max hash key is max split key size */
8338 +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
8340 +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
8341 +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
8343 +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
8344 + CAAM_MAX_HASH_KEY_SIZE)
8345 +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
8347 +/* caam context sizes for hashes: running digest + 8 */
8348 +#define HASH_MSG_LEN 8
8349 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
8360 + * caam_hash_ctx - ahash per-session context
8361 + * @flc: Flow Contexts array
8362 + * @flc_dma: I/O virtual addresses of the Flow Contexts
8363 + * @key: virtual address of the authentication key
8364 + * @dev: dpseci device
8365 + * @ctx_len: size of Context Register
8366 + * @adata: hashing algorithm details
8368 +struct caam_hash_ctx {
8369 + struct caam_flc flc[HASH_NUM_OP];
8370 + dma_addr_t flc_dma[HASH_NUM_OP];
8371 + u8 key[CAAM_MAX_HASH_KEY_SIZE];
8372 + struct device *dev;
8374 + struct alginfo adata;
8378 +struct caam_hash_state {
8379 + struct caam_request caam_req;
8380 + dma_addr_t buf_dma;
8381 + dma_addr_t ctx_dma;
8382 + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
8384 + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
8386 + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
8387 + int (*update)(struct ahash_request *req);
8388 + int (*final)(struct ahash_request *req);
8389 + int (*finup)(struct ahash_request *req);
8393 +struct caam_export_state {
8394 + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
8395 + u8 caam_ctx[MAX_CTX_LEN];
8397 + int (*update)(struct ahash_request *req);
8398 + int (*final)(struct ahash_request *req);
8399 + int (*finup)(struct ahash_request *req);
8402 +static inline void switch_buf(struct caam_hash_state *state)
8404 + state->current_buf ^= 1;
8407 +static inline u8 *current_buf(struct caam_hash_state *state)
8409 + return state->current_buf ? state->buf_1 : state->buf_0;
8412 +static inline u8 *alt_buf(struct caam_hash_state *state)
8414 + return state->current_buf ? state->buf_0 : state->buf_1;
8417 +static inline int *current_buflen(struct caam_hash_state *state)
8419 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
8422 +static inline int *alt_buflen(struct caam_hash_state *state)
8424 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
8427 +/* Map current buffer in state (if length > 0) and put it in link table */
8428 +static inline int buf_map_to_qm_sg(struct device *dev,
8429 + struct dpaa2_sg_entry *qm_sg,
8430 + struct caam_hash_state *state)
8432 + int buflen = *current_buflen(state);
8437 + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
8439 + if (dma_mapping_error(dev, state->buf_dma)) {
8440 + dev_err(dev, "unable to map buf\n");
8441 + state->buf_dma = 0;
8445 + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
8450 +/* Map state->caam_ctx, and add it to link table */
8451 +static inline int ctx_map_to_qm_sg(struct device *dev,
8452 + struct caam_hash_state *state, int ctx_len,
8453 + struct dpaa2_sg_entry *qm_sg, u32 flag)
8455 + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
8456 + if (dma_mapping_error(dev, state->ctx_dma)) {
8457 + dev_err(dev, "unable to map ctx\n");
8458 + state->ctx_dma = 0;
8462 + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
8467 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
8469 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8470 + int digestsize = crypto_ahash_digestsize(ahash);
8471 + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
8472 + struct caam_flc *flc;
8475 + ctx->adata.key_virt = ctx->key;
8476 + ctx->adata.key_inline = true;
8478 + /* ahash_update shared descriptor */
8479 + flc = &ctx->flc[UPDATE];
8480 + desc = flc->sh_desc;
8481 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
8482 + ctx->ctx_len, true, priv->sec_attr.era);
8483 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8484 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
8485 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8487 + print_hex_dump(KERN_ERR,
8488 + "ahash update shdesc@" __stringify(__LINE__)": ",
8489 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8492 + /* ahash_update_first shared descriptor */
8493 + flc = &ctx->flc[UPDATE_FIRST];
8494 + desc = flc->sh_desc;
8495 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
8496 + ctx->ctx_len, false, priv->sec_attr.era);
8497 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8498 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
8499 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8501 + print_hex_dump(KERN_ERR,
8502 + "ahash update first shdesc@" __stringify(__LINE__)": ",
8503 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8506 + /* ahash_final shared descriptor */
8507 + flc = &ctx->flc[FINALIZE];
8508 + desc = flc->sh_desc;
8509 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
8510 + ctx->ctx_len, true, priv->sec_attr.era);
8511 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8512 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
8513 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8515 + print_hex_dump(KERN_ERR,
8516 + "ahash final shdesc@" __stringify(__LINE__)": ",
8517 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8520 + /* ahash_digest shared descriptor */
8521 + flc = &ctx->flc[DIGEST];
8522 + desc = flc->sh_desc;
8523 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
8524 + ctx->ctx_len, false, priv->sec_attr.era);
8525 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8526 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
8527 + desc_bytes(desc), DMA_BIDIRECTIONAL);
8529 + print_hex_dump(KERN_ERR,
8530 + "ahash digest shdesc@" __stringify(__LINE__)": ",
8531 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8537 +/* Digest hash size if it is too large */
8538 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
8539 + u32 *keylen, u8 *key_out, u32 digestsize)
8541 + struct caam_request *req_ctx;
8543 + struct split_key_sh_result result;
8544 + dma_addr_t src_dma, dst_dma;
8545 + struct caam_flc *flc;
8546 + dma_addr_t flc_dma;
8547 + int ret = -ENOMEM;
8548 + struct dpaa2_fl_entry *in_fle, *out_fle;
8550 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
8554 + in_fle = &req_ctx->fd_flt[1];
8555 + out_fle = &req_ctx->fd_flt[0];
8557 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
8561 + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
8563 + if (dma_mapping_error(ctx->dev, src_dma)) {
8564 + dev_err(ctx->dev, "unable to map key input memory\n");
8567 + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
8569 + if (dma_mapping_error(ctx->dev, dst_dma)) {
8570 + dev_err(ctx->dev, "unable to map key output memory\n");
8574 + desc = flc->sh_desc;
8576 + init_sh_desc(desc, 0);
8578 + /* descriptor to perform unkeyed hash on key_in */
8579 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
8580 + OP_ALG_AS_INITFINAL);
8581 + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
8582 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
8583 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
8584 + LDST_SRCDST_BYTE_CONTEXT);
8586 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
8587 + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
8588 + desc_bytes(desc), DMA_TO_DEVICE);
8589 + if (dma_mapping_error(ctx->dev, flc_dma)) {
8590 + dev_err(ctx->dev, "unable to map shared descriptor\n");
8594 + dpaa2_fl_set_final(in_fle, true);
8595 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
8596 + dpaa2_fl_set_addr(in_fle, src_dma);
8597 + dpaa2_fl_set_len(in_fle, *keylen);
8598 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8599 + dpaa2_fl_set_addr(out_fle, dst_dma);
8600 + dpaa2_fl_set_len(out_fle, digestsize);
8603 + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
8604 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
8605 + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
8606 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8610 + init_completion(&result.completion);
8611 + result.dev = ctx->dev;
8613 + req_ctx->flc = flc;
8614 + req_ctx->flc_dma = flc_dma;
8615 + req_ctx->cbk = split_key_sh_done;
8616 + req_ctx->ctx = &result;
8618 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8619 + if (ret == -EINPROGRESS) {
8621 + wait_for_completion(&result.completion);
8624 + print_hex_dump(KERN_ERR,
8625 + "digested key@" __stringify(__LINE__)": ",
8626 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
8631 + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
8634 + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
8636 + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
8642 + *keylen = digestsize;
8647 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
8648 + unsigned int keylen)
8650 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8651 + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
8652 + unsigned int digestsize = crypto_ahash_digestsize(ahash);
8654 + u8 *hashed_key = NULL;
8657 + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
8660 + if (keylen > blocksize) {
8661 + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
8662 + GFP_KERNEL | GFP_DMA);
8665 + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
8668 + goto bad_free_key;
8672 + ctx->adata.keylen = keylen;
8673 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
8674 + OP_ALG_ALGSEL_MASK);
8675 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
8676 + goto bad_free_key;
8678 + memcpy(ctx->key, key, keylen);
8680 + kfree(hashed_key);
8681 + return ahash_set_sh_desc(ahash);
8683 + kfree(hashed_key);
8684 + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
8688 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
8689 + struct ahash_request *req, int dst_len)
8691 + struct caam_hash_state *state = ahash_request_ctx(req);
8693 + if (edesc->src_nents)
8694 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
8695 + if (edesc->dst_dma)
8696 + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
8698 + if (edesc->qm_sg_bytes)
8699 + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
8702 + if (state->buf_dma) {
8703 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
8705 + state->buf_dma = 0;
8709 +static inline void ahash_unmap_ctx(struct device *dev,
8710 + struct ahash_edesc *edesc,
8711 + struct ahash_request *req, int dst_len,
8714 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8715 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8716 + struct caam_hash_state *state = ahash_request_ctx(req);
8718 + if (state->ctx_dma) {
8719 + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
8720 + state->ctx_dma = 0;
8722 + ahash_unmap(dev, edesc, req, dst_len);
8725 +static void ahash_done(void *cbk_ctx, u32 status)
8727 + struct crypto_async_request *areq = cbk_ctx;
8728 + struct ahash_request *req = ahash_request_cast(areq);
8729 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8730 + struct caam_hash_state *state = ahash_request_ctx(req);
8731 + struct ahash_edesc *edesc = state->caam_req.edesc;
8732 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8733 + int digestsize = crypto_ahash_digestsize(ahash);
8737 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8740 + if (unlikely(status)) {
8741 + caam_qi2_strstatus(ctx->dev, status);
8745 + ahash_unmap(ctx->dev, edesc, req, digestsize);
8746 + qi_cache_free(edesc);
8749 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8750 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8753 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8754 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8758 + req->base.complete(&req->base, ecode);
8761 +static void ahash_done_bi(void *cbk_ctx, u32 status)
8763 + struct crypto_async_request *areq = cbk_ctx;
8764 + struct ahash_request *req = ahash_request_cast(areq);
8765 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8766 + struct caam_hash_state *state = ahash_request_ctx(req);
8767 + struct ahash_edesc *edesc = state->caam_req.edesc;
8768 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8771 + int digestsize = crypto_ahash_digestsize(ahash);
8773 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8776 + if (unlikely(status)) {
8777 + caam_qi2_strstatus(ctx->dev, status);
8781 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
8782 + switch_buf(state);
8783 + qi_cache_free(edesc);
8786 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8787 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8790 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8791 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8795 + req->base.complete(&req->base, ecode);
8798 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
8800 + struct crypto_async_request *areq = cbk_ctx;
8801 + struct ahash_request *req = ahash_request_cast(areq);
8802 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8803 + struct caam_hash_state *state = ahash_request_ctx(req);
8804 + struct ahash_edesc *edesc = state->caam_req.edesc;
8805 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8806 + int digestsize = crypto_ahash_digestsize(ahash);
8810 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8813 + if (unlikely(status)) {
8814 + caam_qi2_strstatus(ctx->dev, status);
8818 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
8819 + qi_cache_free(edesc);
8822 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8823 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8826 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8827 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8831 + req->base.complete(&req->base, ecode);
8834 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
8836 + struct crypto_async_request *areq = cbk_ctx;
8837 + struct ahash_request *req = ahash_request_cast(areq);
8838 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8839 + struct caam_hash_state *state = ahash_request_ctx(req);
8840 + struct ahash_edesc *edesc = state->caam_req.edesc;
8841 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8844 + int digestsize = crypto_ahash_digestsize(ahash);
8846 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
8849 + if (unlikely(status)) {
8850 + caam_qi2_strstatus(ctx->dev, status);
8854 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
8855 + switch_buf(state);
8856 + qi_cache_free(edesc);
8859 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
8860 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
8863 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
8864 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
8868 + req->base.complete(&req->base, ecode);
8871 +static int ahash_update_ctx(struct ahash_request *req)
8873 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
8874 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
8875 + struct caam_hash_state *state = ahash_request_ctx(req);
8876 + struct caam_request *req_ctx = &state->caam_req;
8877 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
8878 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
8879 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
8880 + GFP_KERNEL : GFP_ATOMIC;
8881 + u8 *buf = current_buf(state);
8882 + int *buflen = current_buflen(state);
8883 + u8 *next_buf = alt_buf(state);
8884 + int *next_buflen = alt_buflen(state), last_buflen;
8885 + int in_len = *buflen + req->nbytes, to_hash;
8886 + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
8887 + struct ahash_edesc *edesc;
8890 + last_buflen = *next_buflen;
8891 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
8892 + to_hash = in_len - *next_buflen;
8895 + struct dpaa2_sg_entry *sg_table;
8897 + src_nents = sg_nents_for_len(req->src,
8898 + req->nbytes - (*next_buflen));
8899 + if (src_nents < 0) {
8900 + dev_err(ctx->dev, "Invalid number of src SG.\n");
8905 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
8907 + if (!mapped_nents) {
8908 + dev_err(ctx->dev, "unable to DMA map source\n");
8915 + /* allocate space for base edesc and link tables */
8916 + edesc = qi_cache_zalloc(GFP_DMA | flags);
8918 + dma_unmap_sg(ctx->dev, req->src, src_nents,
8923 + edesc->src_nents = src_nents;
8924 + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
8925 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
8926 + sizeof(*sg_table);
8927 + sg_table = &edesc->sgt[0];
8929 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
8930 + DMA_BIDIRECTIONAL);
8934 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
8938 + if (mapped_nents) {
8939 + sg_to_qm_sg_last(req->src, mapped_nents,
8940 + sg_table + qm_sg_src_index, 0);
8942 + scatterwalk_map_and_copy(next_buf, req->src,
8943 + to_hash - *buflen,
8946 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
8950 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
8951 + qm_sg_bytes, DMA_TO_DEVICE);
8952 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
8953 + dev_err(ctx->dev, "unable to map S/G table\n");
8957 + edesc->qm_sg_bytes = qm_sg_bytes;
8959 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
8960 + dpaa2_fl_set_final(in_fle, true);
8961 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
8962 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
8963 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
8964 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
8965 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
8966 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
8968 + req_ctx->flc = &ctx->flc[UPDATE];
8969 + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
8970 + req_ctx->cbk = ahash_done_bi;
8971 + req_ctx->ctx = &req->base;
8972 + req_ctx->edesc = edesc;
8974 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
8975 + if (ret != -EINPROGRESS &&
8976 + !(ret == -EBUSY &&
8977 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
8979 + } else if (*next_buflen) {
8980 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
8982 + *buflen = *next_buflen;
8983 + *next_buflen = last_buflen;
8986 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
8987 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
8988 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
8989 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
8995 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
8996 + qi_cache_free(edesc);
9000 +static int ahash_final_ctx(struct ahash_request *req)
9002 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9003 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9004 + struct caam_hash_state *state = ahash_request_ctx(req);
9005 + struct caam_request *req_ctx = &state->caam_req;
9006 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9007 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9008 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9009 + GFP_KERNEL : GFP_ATOMIC;
9010 + int buflen = *current_buflen(state);
9011 + int qm_sg_bytes, qm_sg_src_index;
9012 + int digestsize = crypto_ahash_digestsize(ahash);
9013 + struct ahash_edesc *edesc;
9014 + struct dpaa2_sg_entry *sg_table;
9017 + /* allocate space for base edesc and link tables */
9018 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9022 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
9023 + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
9024 + sg_table = &edesc->sgt[0];
9026 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
9031 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
9035 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
9037 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9039 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9040 + dev_err(ctx->dev, "unable to map S/G table\n");
9044 + edesc->qm_sg_bytes = qm_sg_bytes;
9046 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9048 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9049 + dev_err(ctx->dev, "unable to map dst\n");
9050 + edesc->dst_dma = 0;
9055 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9056 + dpaa2_fl_set_final(in_fle, true);
9057 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9058 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9059 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
9060 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9061 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9062 + dpaa2_fl_set_len(out_fle, digestsize);
9064 + req_ctx->flc = &ctx->flc[FINALIZE];
9065 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
9066 + req_ctx->cbk = ahash_done_ctx_src;
9067 + req_ctx->ctx = &req->base;
9068 + req_ctx->edesc = edesc;
9070 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9071 + if (ret == -EINPROGRESS ||
9072 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9076 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
9077 + qi_cache_free(edesc);
9081 +static int ahash_finup_ctx(struct ahash_request *req)
9083 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9084 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9085 + struct caam_hash_state *state = ahash_request_ctx(req);
9086 + struct caam_request *req_ctx = &state->caam_req;
9087 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9088 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9089 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9090 + GFP_KERNEL : GFP_ATOMIC;
9091 + int buflen = *current_buflen(state);
9092 + int qm_sg_bytes, qm_sg_src_index;
9093 + int src_nents, mapped_nents;
9094 + int digestsize = crypto_ahash_digestsize(ahash);
9095 + struct ahash_edesc *edesc;
9096 + struct dpaa2_sg_entry *sg_table;
9099 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9100 + if (src_nents < 0) {
9101 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9106 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9108 + if (!mapped_nents) {
9109 + dev_err(ctx->dev, "unable to DMA map source\n");
9116 + /* allocate space for base edesc and link tables */
9117 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9119 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9123 + edesc->src_nents = src_nents;
9124 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
9125 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
9126 + sg_table = &edesc->sgt[0];
9128 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
9133 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
9137 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
9139 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9141 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9142 + dev_err(ctx->dev, "unable to map S/G table\n");
9146 + edesc->qm_sg_bytes = qm_sg_bytes;
9148 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9150 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9151 + dev_err(ctx->dev, "unable to map dst\n");
9152 + edesc->dst_dma = 0;
9157 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9158 + dpaa2_fl_set_final(in_fle, true);
9159 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9160 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9161 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
9162 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9163 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9164 + dpaa2_fl_set_len(out_fle, digestsize);
9166 + req_ctx->flc = &ctx->flc[FINALIZE];
9167 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
9168 + req_ctx->cbk = ahash_done_ctx_src;
9169 + req_ctx->ctx = &req->base;
9170 + req_ctx->edesc = edesc;
9172 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9173 + if (ret == -EINPROGRESS ||
9174 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9178 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
9179 + qi_cache_free(edesc);
9183 +static int ahash_digest(struct ahash_request *req)
9185 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9186 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9187 + struct caam_hash_state *state = ahash_request_ctx(req);
9188 + struct caam_request *req_ctx = &state->caam_req;
9189 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9190 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9191 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9192 + GFP_KERNEL : GFP_ATOMIC;
9193 + int digestsize = crypto_ahash_digestsize(ahash);
9194 + int src_nents, mapped_nents;
9195 + struct ahash_edesc *edesc;
9196 + int ret = -ENOMEM;
9198 + state->buf_dma = 0;
9200 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9201 + if (src_nents < 0) {
9202 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9207 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9209 + if (!mapped_nents) {
9210 + dev_err(ctx->dev, "unable to map source for DMA\n");
9217 + /* allocate space for base edesc and link tables */
9218 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9220 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9224 + edesc->src_nents = src_nents;
9225 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9227 + if (mapped_nents > 1) {
9229 + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
9231 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
9232 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
9233 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9234 + qm_sg_bytes, DMA_TO_DEVICE);
9235 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9236 + dev_err(ctx->dev, "unable to map S/G table\n");
9239 + edesc->qm_sg_bytes = qm_sg_bytes;
9240 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9241 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9243 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9244 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
9247 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9249 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9250 + dev_err(ctx->dev, "unable to map dst\n");
9251 + edesc->dst_dma = 0;
9255 + dpaa2_fl_set_final(in_fle, true);
9256 + dpaa2_fl_set_len(in_fle, req->nbytes);
9257 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9258 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9259 + dpaa2_fl_set_len(out_fle, digestsize);
9261 + req_ctx->flc = &ctx->flc[DIGEST];
9262 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9263 + req_ctx->cbk = ahash_done;
9264 + req_ctx->ctx = &req->base;
9265 + req_ctx->edesc = edesc;
9266 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9267 + if (ret == -EINPROGRESS ||
9268 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9272 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9273 + qi_cache_free(edesc);
9277 +static int ahash_final_no_ctx(struct ahash_request *req)
9279 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9280 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9281 + struct caam_hash_state *state = ahash_request_ctx(req);
9282 + struct caam_request *req_ctx = &state->caam_req;
9283 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9284 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9285 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9286 + GFP_KERNEL : GFP_ATOMIC;
9287 + u8 *buf = current_buf(state);
9288 + int buflen = *current_buflen(state);
9289 + int digestsize = crypto_ahash_digestsize(ahash);
9290 + struct ahash_edesc *edesc;
9291 + int ret = -ENOMEM;
9293 + /* allocate space for base edesc and link tables */
9294 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9298 + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
9299 + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
9300 + dev_err(ctx->dev, "unable to map src\n");
9304 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9306 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9307 + dev_err(ctx->dev, "unable to map dst\n");
9308 + edesc->dst_dma = 0;
9312 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9313 + dpaa2_fl_set_final(in_fle, true);
9314 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9315 + dpaa2_fl_set_addr(in_fle, state->buf_dma);
9316 + dpaa2_fl_set_len(in_fle, buflen);
9317 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9318 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9319 + dpaa2_fl_set_len(out_fle, digestsize);
9321 + req_ctx->flc = &ctx->flc[DIGEST];
9322 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9323 + req_ctx->cbk = ahash_done;
9324 + req_ctx->ctx = &req->base;
9325 + req_ctx->edesc = edesc;
9327 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9328 + if (ret == -EINPROGRESS ||
9329 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9333 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9334 + qi_cache_free(edesc);
9338 +static int ahash_update_no_ctx(struct ahash_request *req)
9340 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9341 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9342 + struct caam_hash_state *state = ahash_request_ctx(req);
9343 + struct caam_request *req_ctx = &state->caam_req;
9344 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9345 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9346 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9347 + GFP_KERNEL : GFP_ATOMIC;
9348 + u8 *buf = current_buf(state);
9349 + int *buflen = current_buflen(state);
9350 + u8 *next_buf = alt_buf(state);
9351 + int *next_buflen = alt_buflen(state);
9352 + int in_len = *buflen + req->nbytes, to_hash;
9353 + int qm_sg_bytes, src_nents, mapped_nents;
9354 + struct ahash_edesc *edesc;
9357 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
9358 + to_hash = in_len - *next_buflen;
9361 + struct dpaa2_sg_entry *sg_table;
9363 + src_nents = sg_nents_for_len(req->src,
9364 + req->nbytes - *next_buflen);
9365 + if (src_nents < 0) {
9366 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9371 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9373 + if (!mapped_nents) {
9374 + dev_err(ctx->dev, "unable to DMA map source\n");
9381 + /* allocate space for base edesc and link tables */
9382 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9384 + dma_unmap_sg(ctx->dev, req->src, src_nents,
9389 + edesc->src_nents = src_nents;
9390 + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
9391 + sg_table = &edesc->sgt[0];
9393 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
9397 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
9400 + scatterwalk_map_and_copy(next_buf, req->src,
9401 + to_hash - *buflen,
9404 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9405 + qm_sg_bytes, DMA_TO_DEVICE);
9406 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9407 + dev_err(ctx->dev, "unable to map S/G table\n");
9411 + edesc->qm_sg_bytes = qm_sg_bytes;
9413 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
9414 + ctx->ctx_len, DMA_FROM_DEVICE);
9415 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
9416 + dev_err(ctx->dev, "unable to map ctx\n");
9417 + state->ctx_dma = 0;
9422 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9423 + dpaa2_fl_set_final(in_fle, true);
9424 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9425 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9426 + dpaa2_fl_set_len(in_fle, to_hash);
9427 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9428 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
9429 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
9431 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
9432 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
9433 + req_ctx->cbk = ahash_done_ctx_dst;
9434 + req_ctx->ctx = &req->base;
9435 + req_ctx->edesc = edesc;
9437 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9438 + if (ret != -EINPROGRESS &&
9439 + !(ret == -EBUSY &&
9440 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9443 + state->update = ahash_update_ctx;
9444 + state->finup = ahash_finup_ctx;
9445 + state->final = ahash_final_ctx;
9446 + } else if (*next_buflen) {
9447 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
9449 + *buflen = *next_buflen;
9453 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
9454 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
9455 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
9456 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
9462 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
9463 + qi_cache_free(edesc);
9467 +static int ahash_finup_no_ctx(struct ahash_request *req)
9469 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9470 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9471 + struct caam_hash_state *state = ahash_request_ctx(req);
9472 + struct caam_request *req_ctx = &state->caam_req;
9473 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9474 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9475 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9476 + GFP_KERNEL : GFP_ATOMIC;
9477 + int buflen = *current_buflen(state);
9478 + int qm_sg_bytes, src_nents, mapped_nents;
9479 + int digestsize = crypto_ahash_digestsize(ahash);
9480 + struct ahash_edesc *edesc;
9481 + struct dpaa2_sg_entry *sg_table;
9484 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9485 + if (src_nents < 0) {
9486 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9491 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9493 + if (!mapped_nents) {
9494 + dev_err(ctx->dev, "unable to DMA map source\n");
9501 + /* allocate space for base edesc and link tables */
9502 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9504 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
9508 + edesc->src_nents = src_nents;
9509 + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
9510 + sg_table = &edesc->sgt[0];
9512 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
9516 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
9518 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
9520 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9521 + dev_err(ctx->dev, "unable to map S/G table\n");
9525 + edesc->qm_sg_bytes = qm_sg_bytes;
9527 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
9529 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
9530 + dev_err(ctx->dev, "unable to map dst\n");
9531 + edesc->dst_dma = 0;
9536 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9537 + dpaa2_fl_set_final(in_fle, true);
9538 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9539 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9540 + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
9541 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9542 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
9543 + dpaa2_fl_set_len(out_fle, digestsize);
9545 + req_ctx->flc = &ctx->flc[DIGEST];
9546 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
9547 + req_ctx->cbk = ahash_done;
9548 + req_ctx->ctx = &req->base;
9549 + req_ctx->edesc = edesc;
9550 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9551 + if (ret != -EINPROGRESS &&
9552 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
9557 + ahash_unmap(ctx->dev, edesc, req, digestsize);
9558 + qi_cache_free(edesc);
9562 +static int ahash_update_first(struct ahash_request *req)
9564 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
9565 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
9566 + struct caam_hash_state *state = ahash_request_ctx(req);
9567 + struct caam_request *req_ctx = &state->caam_req;
9568 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
9569 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
9570 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9571 + GFP_KERNEL : GFP_ATOMIC;
9572 + u8 *next_buf = alt_buf(state);
9573 + int *next_buflen = alt_buflen(state);
9575 + int src_nents, mapped_nents;
9576 + struct ahash_edesc *edesc;
9579 + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
9581 + to_hash = req->nbytes - *next_buflen;
9584 + struct dpaa2_sg_entry *sg_table;
9586 + src_nents = sg_nents_for_len(req->src,
9587 + req->nbytes - (*next_buflen));
9588 + if (src_nents < 0) {
9589 + dev_err(ctx->dev, "Invalid number of src SG.\n");
9594 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
9596 + if (!mapped_nents) {
9597 + dev_err(ctx->dev, "unable to map source for DMA\n");
9604 + /* allocate space for base edesc and link tables */
9605 + edesc = qi_cache_zalloc(GFP_DMA | flags);
9607 + dma_unmap_sg(ctx->dev, req->src, src_nents,
9612 + edesc->src_nents = src_nents;
9613 + sg_table = &edesc->sgt[0];
9615 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
9616 + dpaa2_fl_set_final(in_fle, true);
9617 + dpaa2_fl_set_len(in_fle, to_hash);
9619 + if (mapped_nents > 1) {
9622 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
9623 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
9624 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
9627 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
9628 + dev_err(ctx->dev, "unable to map S/G table\n");
9632 + edesc->qm_sg_bytes = qm_sg_bytes;
9633 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
9634 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
9636 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
9637 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
9641 + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
9644 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
9645 + ctx->ctx_len, DMA_FROM_DEVICE);
9646 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
9647 + dev_err(ctx->dev, "unable to map ctx\n");
9648 + state->ctx_dma = 0;
9653 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
9654 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
9655 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
9657 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
9658 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
9659 + req_ctx->cbk = ahash_done_ctx_dst;
9660 + req_ctx->ctx = &req->base;
9661 + req_ctx->edesc = edesc;
9663 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
9664 + if (ret != -EINPROGRESS &&
9665 + !(ret == -EBUSY && req->base.flags &
9666 + CRYPTO_TFM_REQ_MAY_BACKLOG))
9669 + state->update = ahash_update_ctx;
9670 + state->finup = ahash_finup_ctx;
9671 + state->final = ahash_final_ctx;
9672 + } else if (*next_buflen) {
9673 + state->update = ahash_update_no_ctx;
9674 + state->finup = ahash_finup_no_ctx;
9675 + state->final = ahash_final_no_ctx;
9676 + scatterwalk_map_and_copy(next_buf, req->src, 0,
9678 + switch_buf(state);
9681 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
9682 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
9687 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
9688 + qi_cache_free(edesc);
9692 +static int ahash_finup_first(struct ahash_request *req)
9694 + return ahash_digest(req);
9697 +static int ahash_init(struct ahash_request *req)
9699 + struct caam_hash_state *state = ahash_request_ctx(req);
9701 + state->update = ahash_update_first;
9702 + state->finup = ahash_finup_first;
9703 + state->final = ahash_final_no_ctx;
9705 + state->ctx_dma = 0;
9706 + state->current_buf = 0;
9707 + state->buf_dma = 0;
9708 + state->buflen_0 = 0;
9709 + state->buflen_1 = 0;
9714 +static int ahash_update(struct ahash_request *req)
9716 + struct caam_hash_state *state = ahash_request_ctx(req);
9718 + return state->update(req);
9721 +static int ahash_finup(struct ahash_request *req)
9723 + struct caam_hash_state *state = ahash_request_ctx(req);
9725 + return state->finup(req);
9728 +static int ahash_final(struct ahash_request *req)
9730 + struct caam_hash_state *state = ahash_request_ctx(req);
9732 + return state->final(req);
9735 +static int ahash_export(struct ahash_request *req, void *out)
9737 + struct caam_hash_state *state = ahash_request_ctx(req);
9738 + struct caam_export_state *export = out;
9742 + if (state->current_buf) {
9743 + buf = state->buf_1;
9744 + len = state->buflen_1;
9746 + buf = state->buf_0;
9747 + len = state->buflen_0;
9750 + memcpy(export->buf, buf, len);
9751 + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
9752 + export->buflen = len;
9753 + export->update = state->update;
9754 + export->final = state->final;
9755 + export->finup = state->finup;
9760 +static int ahash_import(struct ahash_request *req, const void *in)
9762 + struct caam_hash_state *state = ahash_request_ctx(req);
9763 + const struct caam_export_state *export = in;
9765 + memset(state, 0, sizeof(*state));
9766 + memcpy(state->buf_0, export->buf, export->buflen);
9767 + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
9768 + state->buflen_0 = export->buflen;
9769 + state->update = export->update;
9770 + state->final = export->final;
9771 + state->finup = export->finup;
9776 +struct caam_hash_template {
9777 + char name[CRYPTO_MAX_ALG_NAME];
9778 + char driver_name[CRYPTO_MAX_ALG_NAME];
9779 + char hmac_name[CRYPTO_MAX_ALG_NAME];
9780 + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
9781 + unsigned int blocksize;
9782 + struct ahash_alg template_ahash;
9786 +/* ahash descriptors */
9787 +static struct caam_hash_template driver_hash[] = {
9790 + .driver_name = "sha1-caam-qi2",
9791 + .hmac_name = "hmac(sha1)",
9792 + .hmac_driver_name = "hmac-sha1-caam-qi2",
9793 + .blocksize = SHA1_BLOCK_SIZE,
9794 + .template_ahash = {
9795 + .init = ahash_init,
9796 + .update = ahash_update,
9797 + .final = ahash_final,
9798 + .finup = ahash_finup,
9799 + .digest = ahash_digest,
9800 + .export = ahash_export,
9801 + .import = ahash_import,
9802 + .setkey = ahash_setkey,
9804 + .digestsize = SHA1_DIGEST_SIZE,
9805 + .statesize = sizeof(struct caam_export_state),
9808 + .alg_type = OP_ALG_ALGSEL_SHA1,
9811 + .driver_name = "sha224-caam-qi2",
9812 + .hmac_name = "hmac(sha224)",
9813 + .hmac_driver_name = "hmac-sha224-caam-qi2",
9814 + .blocksize = SHA224_BLOCK_SIZE,
9815 + .template_ahash = {
9816 + .init = ahash_init,
9817 + .update = ahash_update,
9818 + .final = ahash_final,
9819 + .finup = ahash_finup,
9820 + .digest = ahash_digest,
9821 + .export = ahash_export,
9822 + .import = ahash_import,
9823 + .setkey = ahash_setkey,
9825 + .digestsize = SHA224_DIGEST_SIZE,
9826 + .statesize = sizeof(struct caam_export_state),
9829 + .alg_type = OP_ALG_ALGSEL_SHA224,
9832 + .driver_name = "sha256-caam-qi2",
9833 + .hmac_name = "hmac(sha256)",
9834 + .hmac_driver_name = "hmac-sha256-caam-qi2",
9835 + .blocksize = SHA256_BLOCK_SIZE,
9836 + .template_ahash = {
9837 + .init = ahash_init,
9838 + .update = ahash_update,
9839 + .final = ahash_final,
9840 + .finup = ahash_finup,
9841 + .digest = ahash_digest,
9842 + .export = ahash_export,
9843 + .import = ahash_import,
9844 + .setkey = ahash_setkey,
9846 + .digestsize = SHA256_DIGEST_SIZE,
9847 + .statesize = sizeof(struct caam_export_state),
9850 + .alg_type = OP_ALG_ALGSEL_SHA256,
9853 + .driver_name = "sha384-caam-qi2",
9854 + .hmac_name = "hmac(sha384)",
9855 + .hmac_driver_name = "hmac-sha384-caam-qi2",
9856 + .blocksize = SHA384_BLOCK_SIZE,
9857 + .template_ahash = {
9858 + .init = ahash_init,
9859 + .update = ahash_update,
9860 + .final = ahash_final,
9861 + .finup = ahash_finup,
9862 + .digest = ahash_digest,
9863 + .export = ahash_export,
9864 + .import = ahash_import,
9865 + .setkey = ahash_setkey,
9867 + .digestsize = SHA384_DIGEST_SIZE,
9868 + .statesize = sizeof(struct caam_export_state),
9871 + .alg_type = OP_ALG_ALGSEL_SHA384,
9874 + .driver_name = "sha512-caam-qi2",
9875 + .hmac_name = "hmac(sha512)",
9876 + .hmac_driver_name = "hmac-sha512-caam-qi2",
9877 + .blocksize = SHA512_BLOCK_SIZE,
9878 + .template_ahash = {
9879 + .init = ahash_init,
9880 + .update = ahash_update,
9881 + .final = ahash_final,
9882 + .finup = ahash_finup,
9883 + .digest = ahash_digest,
9884 + .export = ahash_export,
9885 + .import = ahash_import,
9886 + .setkey = ahash_setkey,
9888 + .digestsize = SHA512_DIGEST_SIZE,
9889 + .statesize = sizeof(struct caam_export_state),
9892 + .alg_type = OP_ALG_ALGSEL_SHA512,
9895 + .driver_name = "md5-caam-qi2",
9896 + .hmac_name = "hmac(md5)",
9897 + .hmac_driver_name = "hmac-md5-caam-qi2",
9898 + .blocksize = MD5_BLOCK_WORDS * 4,
9899 + .template_ahash = {
9900 + .init = ahash_init,
9901 + .update = ahash_update,
9902 + .final = ahash_final,
9903 + .finup = ahash_finup,
9904 + .digest = ahash_digest,
9905 + .export = ahash_export,
9906 + .import = ahash_import,
9907 + .setkey = ahash_setkey,
9909 + .digestsize = MD5_DIGEST_SIZE,
9910 + .statesize = sizeof(struct caam_export_state),
9913 + .alg_type = OP_ALG_ALGSEL_MD5,
9917 +struct caam_hash_alg {
9918 + struct list_head entry;
9919 + struct device *dev;
9921 + struct ahash_alg ahash_alg;
9924 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
9926 + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
9927 + struct crypto_alg *base = tfm->__crt_alg;
9928 + struct hash_alg_common *halg =
9929 + container_of(base, struct hash_alg_common, base);
9930 + struct ahash_alg *alg =
9931 + container_of(halg, struct ahash_alg, halg);
9932 + struct caam_hash_alg *caam_hash =
9933 + container_of(alg, struct caam_hash_alg, ahash_alg);
9934 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
9935 + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
9936 + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
9937 + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
9938 + HASH_MSG_LEN + 32,
9939 + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
9940 + HASH_MSG_LEN + 64,
9941 + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
9942 + dma_addr_t dma_addr;
9945 + ctx->dev = caam_hash->dev;
9947 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
9948 + DMA_BIDIRECTIONAL,
9949 + DMA_ATTR_SKIP_CPU_SYNC);
9950 + if (dma_mapping_error(ctx->dev, dma_addr)) {
9951 + dev_err(ctx->dev, "unable to map shared descriptors\n");
9955 + for (i = 0; i < HASH_NUM_OP; i++)
9956 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
9958 + /* copy descriptor header template value */
9959 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
9961 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
9962 + OP_ALG_ALGSEL_SUBMASK) >>
9963 + OP_ALG_ALGSEL_SHIFT];
9965 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
9966 + sizeof(struct caam_hash_state));
9968 + return ahash_set_sh_desc(ahash);
9971 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
9973 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
9975 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
9976 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
9979 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
9980 + struct caam_hash_template *template, bool keyed)
9982 + struct caam_hash_alg *t_alg;
9983 + struct ahash_alg *halg;
9984 + struct crypto_alg *alg;
9986 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
9988 + return ERR_PTR(-ENOMEM);
9990 + t_alg->ahash_alg = template->template_ahash;
9991 + halg = &t_alg->ahash_alg;
9992 + alg = &halg->halg.base;
9995 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
9996 + template->hmac_name);
9997 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
9998 + template->hmac_driver_name);
10000 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
10002 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
10003 + template->driver_name);
10004 + t_alg->ahash_alg.setkey = NULL;
10006 + alg->cra_module = THIS_MODULE;
10007 + alg->cra_init = caam_hash_cra_init;
10008 + alg->cra_exit = caam_hash_cra_exit;
10009 + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
10010 + alg->cra_priority = CAAM_CRA_PRIORITY;
10011 + alg->cra_blocksize = template->blocksize;
10012 + alg->cra_alignmask = 0;
10013 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
10014 + alg->cra_type = &crypto_ahash_type;
10016 + t_alg->alg_type = template->alg_type;
10017 + t_alg->dev = dev;
10022 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
10024 + struct dpaa2_caam_priv_per_cpu *ppriv;
10026 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
10027 + napi_schedule_irqoff(&ppriv->napi);
10030 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
10032 + struct device *dev = priv->dev;
10033 + struct dpaa2_io_notification_ctx *nctx;
10034 + struct dpaa2_caam_priv_per_cpu *ppriv;
10035 + int err, i = 0, cpu;
10037 + for_each_online_cpu(cpu) {
10038 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10039 + ppriv->priv = priv;
10040 + nctx = &ppriv->nctx;
10041 + nctx->is_cdan = 0;
10042 + nctx->id = ppriv->rsp_fqid;
10043 + nctx->desired_cpu = cpu;
10044 + nctx->cb = dpaa2_caam_fqdan_cb;
10046 + /* Register notification callbacks */
10047 + ppriv->dpio = dpaa2_io_service_select(cpu);
10048 + err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
10049 + if (unlikely(err)) {
10050 + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
10053 + * If no affine DPIO for this core, there's probably
10054 + * none available for next cores either. Signal we want
10055 + * to retry later, in case the DPIO devices weren't
10058 + err = -EPROBE_DEFER;
10062 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
10064 + if (unlikely(!ppriv->store)) {
10065 + dev_err(dev, "dpaa2_io_store_create() failed\n");
10070 + if (++i == priv->num_pairs)
10077 + for_each_online_cpu(cpu) {
10078 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10079 + if (!ppriv->nctx.cb)
10081 + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
10084 + for_each_online_cpu(cpu) {
10085 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10086 + if (!ppriv->store)
10088 + dpaa2_io_store_destroy(ppriv->store);
10094 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
10096 + struct dpaa2_caam_priv_per_cpu *ppriv;
10097 + struct device *dev = priv->dev;
10100 + for_each_online_cpu(cpu) {
10101 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10102 + dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
10103 + dpaa2_io_store_destroy(ppriv->store);
10105 + if (++i == priv->num_pairs)
10110 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
10112 + struct dpseci_rx_queue_cfg rx_queue_cfg;
10113 + struct device *dev = priv->dev;
10114 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10115 + struct dpaa2_caam_priv_per_cpu *ppriv;
10116 + int err = 0, i = 0, cpu;
10118 + /* Configure Rx queues */
10119 + for_each_online_cpu(cpu) {
10120 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10122 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
10123 + DPSECI_QUEUE_OPT_USER_CTX;
10124 + rx_queue_cfg.order_preservation_en = 0;
10125 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
10126 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
10128 + * Rx priority (WQ) doesn't really matter, since we use
10129 + * pull mode, i.e. volatile dequeues from specific FQs
10131 + rx_queue_cfg.dest_cfg.priority = 0;
10132 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
10134 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10137 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
10142 + if (++i == priv->num_pairs)
10149 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
10151 + struct device *dev = priv->dev;
10153 + if (!priv->cscn_mem)
10156 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10157 + kfree(priv->cscn_mem);
10160 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
10162 + struct device *dev = priv->dev;
10163 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10165 + dpaa2_dpseci_congestion_free(priv);
10166 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
10169 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
10170 + const struct dpaa2_fd *fd)
10172 + struct caam_request *req;
10175 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
10176 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
10180 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
10181 + if (unlikely(fd_err))
10182 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
10185 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
10186 + * in FD[ERR] or FD[FRC].
10188 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
10189 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
10190 + DMA_BIDIRECTIONAL);
10191 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
10194 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
10198 + /* Retry while portal is busy */
10200 + err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
10202 + } while (err == -EBUSY);
10204 + if (unlikely(err))
10205 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
10210 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
10212 + struct dpaa2_dq *dq;
10213 + int cleaned = 0, is_last;
10216 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
10217 + if (unlikely(!dq)) {
10218 + if (unlikely(!is_last)) {
10219 + dev_dbg(ppriv->priv->dev,
10220 + "FQ %d returned no valid frames\n",
10221 + ppriv->rsp_fqid);
10223 + * MUST retry until we get some sort of
10224 + * valid response token (be it "empty dequeue"
10225 + * or a valid frame).
10233 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
10235 + } while (!is_last);
10240 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
10242 + struct dpaa2_caam_priv_per_cpu *ppriv;
10243 + struct dpaa2_caam_priv *priv;
10244 + int err, cleaned = 0, store_cleaned;
10246 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
10247 + priv = ppriv->priv;
10249 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
10253 + store_cleaned = dpaa2_caam_store_consume(ppriv);
10254 + cleaned += store_cleaned;
10256 + if (store_cleaned == 0 ||
10257 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
10260 + /* Try to dequeue some more */
10261 + err = dpaa2_caam_pull_fq(ppriv);
10262 + if (unlikely(err))
10266 + if (cleaned < budget) {
10267 + napi_complete_done(napi, cleaned);
10268 + err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
10269 + if (unlikely(err))
10270 + dev_err(priv->dev, "Notification rearm failed: %d\n",
10277 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
10280 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
10281 + struct device *dev = priv->dev;
10285 + * Congestion group feature supported starting with DPSECI API v5.1
10286 + * and only when object has been created with this capability.
10288 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
10289 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
10292 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
10293 + GFP_KERNEL | GFP_DMA);
10294 + if (!priv->cscn_mem)
10297 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
10298 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
10299 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10300 + if (dma_mapping_error(dev, priv->cscn_dma)) {
10301 + dev_err(dev, "Error mapping CSCN memory area\n");
10303 + goto err_dma_map;
10306 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
10307 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
10308 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
10309 + cong_notif_cfg.message_ctx = (u64)priv;
10310 + cong_notif_cfg.message_iova = priv->cscn_dma;
10311 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
10312 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
10313 + DPSECI_CGN_MODE_COHERENT_WRITE;
10315 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
10316 + &cong_notif_cfg);
10318 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
10319 + goto err_set_cong;
10325 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
10327 + kfree(priv->cscn_mem);
10332 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
10334 + struct device *dev = &ls_dev->dev;
10335 + struct dpaa2_caam_priv *priv;
10336 + struct dpaa2_caam_priv_per_cpu *ppriv;
10340 + priv = dev_get_drvdata(dev);
10343 + priv->dpsec_id = ls_dev->obj_desc.id;
10345 + /* Get a handle for the DPSECI this interface is associate with */
10346 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
10348 + dev_err(dev, "dpsec_open() failed: %d\n", err);
10352 + dev_info(dev, "Opened dpseci object successfully\n");
10354 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
10355 + &priv->minor_ver);
10357 + dev_err(dev, "dpseci_get_api_version() failed\n");
10358 + goto err_get_vers;
10361 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
10362 + &priv->dpseci_attr);
10364 + dev_err(dev, "dpseci_get_attributes() failed\n");
10365 + goto err_get_vers;
10368 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
10369 + &priv->sec_attr);
10371 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
10372 + goto err_get_vers;
10375 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
10377 + dev_err(dev, "setup_congestion() failed\n");
10378 + goto err_get_vers;
10381 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
10382 + priv->dpseci_attr.num_tx_queues);
10383 + if (priv->num_pairs > num_online_cpus()) {
10384 + dev_warn(dev, "%d queues won't be used\n",
10385 + priv->num_pairs - num_online_cpus());
10386 + priv->num_pairs = num_online_cpus();
10389 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
10390 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10391 + &priv->rx_queue_attr[i]);
10393 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
10394 + goto err_get_rx_queue;
10398 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
10399 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
10400 + &priv->tx_queue_attr[i]);
10402 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
10403 + goto err_get_rx_queue;
10408 + for_each_online_cpu(cpu) {
10411 + j = i % priv->num_pairs;
10413 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
10414 + ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
10417 + * Allow all cores to enqueue, while only some of them
10418 + * will take part in dequeuing.
10420 + if (++i > priv->num_pairs)
10423 + ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
10426 + dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", j,
10427 + priv->rx_queue_attr[j].fqid,
10428 + priv->tx_queue_attr[j].fqid);
10430 + ppriv->net_dev.dev = *dev;
10431 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
10432 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
10433 + DPAA2_CAAM_NAPI_WEIGHT);
10439 + dpaa2_dpseci_congestion_free(priv);
10441 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
10446 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
10448 + struct device *dev = priv->dev;
10449 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10450 + struct dpaa2_caam_priv_per_cpu *ppriv;
10453 + for (i = 0; i < priv->num_pairs; i++) {
10454 + ppriv = per_cpu_ptr(priv->ppriv, i);
10455 + napi_enable(&ppriv->napi);
10458 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
10460 + dev_err(dev, "dpseci_enable() failed\n");
10464 + dev_info(dev, "DPSECI version %d.%d\n",
10466 + priv->minor_ver);
10471 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
10473 + struct device *dev = priv->dev;
10474 + struct dpaa2_caam_priv_per_cpu *ppriv;
10475 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
10476 + int i, err = 0, enabled;
10478 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
10480 + dev_err(dev, "dpseci_disable() failed\n");
10484 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
10486 + dev_err(dev, "dpseci_is_enabled() failed\n");
10490 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
10492 + for (i = 0; i < priv->num_pairs; i++) {
10493 + ppriv = per_cpu_ptr(priv->ppriv, i);
10494 + napi_disable(&ppriv->napi);
10495 + netif_napi_del(&ppriv->napi);
10501 +static struct list_head hash_list;
10503 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
10505 + struct device *dev;
10506 + struct dpaa2_caam_priv *priv;
10508 + bool registered = false;
10511 + * There is no way to get CAAM endianness - there is no direct register
10512 + * space access and MC f/w does not provide this attribute.
10513 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
10516 + caam_little_end = true;
10518 + caam_imx = false;
10520 + dev = &dpseci_dev->dev;
10522 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
10526 + dev_set_drvdata(dev, priv);
10528 + priv->domain = iommu_get_domain_for_dev(dev);
10530 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
10531 + 0, SLAB_CACHE_DMA, NULL);
10533 + dev_err(dev, "Can't allocate SEC cache\n");
10535 + goto err_qicache;
10538 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
10540 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
10541 + goto err_dma_mask;
10544 + /* Obtain a MC portal */
10545 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
10547 + if (err == -ENXIO)
10548 + err = -EPROBE_DEFER;
10550 + dev_err(dev, "MC portal allocation failed\n");
10552 + goto err_dma_mask;
10555 + priv->ppriv = alloc_percpu(*priv->ppriv);
10556 + if (!priv->ppriv) {
10557 + dev_err(dev, "alloc_percpu() failed\n");
10559 + goto err_alloc_ppriv;
10562 + /* DPSECI initialization */
10563 + err = dpaa2_dpseci_setup(dpseci_dev);
10565 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
10566 + goto err_dpseci_setup;
10570 + err = dpaa2_dpseci_dpio_setup(priv);
10572 + if (err != -EPROBE_DEFER)
10573 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
10574 + goto err_dpio_setup;
10577 + /* DPSECI binding to DPIO */
10578 + err = dpaa2_dpseci_bind(priv);
10580 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
10584 + /* DPSECI enable */
10585 + err = dpaa2_dpseci_enable(priv);
10587 + dev_err(dev, "dpaa2_dpseci_enable() failed");
10591 + /* register crypto algorithms the device supports */
10592 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
10593 + struct caam_skcipher_alg *t_alg = driver_algs + i;
10594 + u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
10596 + /* Skip DES algorithms if not supported by device */
10597 + if (!priv->sec_attr.des_acc_num &&
10598 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
10599 + (alg_sel == OP_ALG_ALGSEL_DES)))
10602 + /* Skip AES algorithms if not supported by device */
10603 + if (!priv->sec_attr.aes_acc_num &&
10604 + (alg_sel == OP_ALG_ALGSEL_AES))
10607 + /* Skip CHACHA20 algorithms if not supported by device */
10608 + if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
10609 + !priv->sec_attr.ccha_acc_num)
10612 + t_alg->caam.dev = dev;
10613 + caam_skcipher_alg_init(t_alg);
10615 + err = crypto_register_skcipher(&t_alg->skcipher);
10617 + dev_warn(dev, "%s alg registration failed: %d\n",
10618 + t_alg->skcipher.base.cra_driver_name, err);
10622 + t_alg->registered = true;
10623 + registered = true;
10626 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
10627 + struct caam_aead_alg *t_alg = driver_aeads + i;
10628 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
10629 + OP_ALG_ALGSEL_MASK;
10630 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
10631 + OP_ALG_ALGSEL_MASK;
10633 + /* Skip DES algorithms if not supported by device */
10634 + if (!priv->sec_attr.des_acc_num &&
10635 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
10636 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
10639 + /* Skip AES algorithms if not supported by device */
10640 + if (!priv->sec_attr.aes_acc_num &&
10641 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
10644 + /* Skip CHACHA20 algorithms if not supported by device */
10645 + if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
10646 + !priv->sec_attr.ccha_acc_num)
10649 + /* Skip POLY1305 algorithms if not supported by device */
10650 + if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
10651 + !priv->sec_attr.ptha_acc_num)
10655 + * Skip algorithms requiring message digests
10656 + * if MD not supported by device.
10658 + if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
10659 + !priv->sec_attr.md_acc_num)
10662 + t_alg->caam.dev = dev;
10663 + caam_aead_alg_init(t_alg);
10665 + err = crypto_register_aead(&t_alg->aead);
10667 + dev_warn(dev, "%s alg registration failed: %d\n",
10668 + t_alg->aead.base.cra_driver_name, err);
10672 + t_alg->registered = true;
10673 + registered = true;
10676 + dev_info(dev, "algorithms registered in /proc/crypto\n");
10678 + /* register hash algorithms the device supports */
10679 + INIT_LIST_HEAD(&hash_list);
10682 + * Skip registration of any hashing algorithms if MD block
10683 + * is not present.
10685 + if (!priv->sec_attr.md_acc_num)
10688 + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
10689 + struct caam_hash_alg *t_alg;
10690 + struct caam_hash_template *alg = driver_hash + i;
10692 + /* register hmac version */
10693 + t_alg = caam_hash_alloc(dev, alg, true);
10694 + if (IS_ERR(t_alg)) {
10695 + err = PTR_ERR(t_alg);
10696 + dev_warn(dev, "%s hash alg allocation failed: %d\n",
10697 + alg->driver_name, err);
10701 + err = crypto_register_ahash(&t_alg->ahash_alg);
10703 + dev_warn(dev, "%s alg registration failed: %d\n",
10704 + t_alg->ahash_alg.halg.base.cra_driver_name,
10708 + list_add_tail(&t_alg->entry, &hash_list);
10711 + /* register unkeyed version */
10712 + t_alg = caam_hash_alloc(dev, alg, false);
10713 + if (IS_ERR(t_alg)) {
10714 + err = PTR_ERR(t_alg);
10715 + dev_warn(dev, "%s alg allocation failed: %d\n",
10716 + alg->driver_name, err);
10720 + err = crypto_register_ahash(&t_alg->ahash_alg);
10722 + dev_warn(dev, "%s alg registration failed: %d\n",
10723 + t_alg->ahash_alg.halg.base.cra_driver_name,
10727 + list_add_tail(&t_alg->entry, &hash_list);
10730 + if (!list_empty(&hash_list))
10731 + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
10736 + dpaa2_dpseci_dpio_free(priv);
10738 + dpaa2_dpseci_free(priv);
10740 + free_percpu(priv->ppriv);
10742 + fsl_mc_portal_free(priv->mc_io);
10744 + kmem_cache_destroy(qi_cache);
10746 + dev_set_drvdata(dev, NULL);
10751 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
10753 + struct device *dev;
10754 + struct dpaa2_caam_priv *priv;
10757 + dev = &ls_dev->dev;
10758 + priv = dev_get_drvdata(dev);
10760 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
10761 + struct caam_aead_alg *t_alg = driver_aeads + i;
10763 + if (t_alg->registered)
10764 + crypto_unregister_aead(&t_alg->aead);
10767 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
10768 + struct caam_skcipher_alg *t_alg = driver_algs + i;
10770 + if (t_alg->registered)
10771 + crypto_unregister_skcipher(&t_alg->skcipher);
10774 + if (hash_list.next) {
10775 + struct caam_hash_alg *t_hash_alg, *p;
10777 + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
10778 + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
10779 + list_del(&t_hash_alg->entry);
10780 + kfree(t_hash_alg);
10784 + dpaa2_dpseci_disable(priv);
10785 + dpaa2_dpseci_dpio_free(priv);
10786 + dpaa2_dpseci_free(priv);
10787 + free_percpu(priv->ppriv);
10788 + fsl_mc_portal_free(priv->mc_io);
10789 + dev_set_drvdata(dev, NULL);
10790 + kmem_cache_destroy(qi_cache);
10795 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
10797 + struct dpaa2_fd fd;
10798 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
10799 + struct dpaa2_caam_priv_per_cpu *ppriv;
10803 + return PTR_ERR(req);
10805 + if (priv->cscn_mem) {
10806 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
10808 + DMA_FROM_DEVICE);
10809 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
10810 + dev_dbg_ratelimited(dev, "Dropping request\n");
10815 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
10817 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
10818 + DMA_BIDIRECTIONAL);
10819 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
10820 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
10824 + memset(&fd, 0, sizeof(fd));
10825 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
10826 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
10827 + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
10828 + dpaa2_fd_set_flc(&fd, req->flc_dma);
10830 + ppriv = this_cpu_ptr(priv->ppriv);
10831 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
10832 + err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
10834 + if (err != -EBUSY)
10840 + if (unlikely(err)) {
10841 + dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
10845 + return -EINPROGRESS;
10848 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
10849 + DMA_BIDIRECTIONAL);
10852 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
10854 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
10856 + .vendor = FSL_MC_VENDOR_FREESCALE,
10857 + .obj_type = "dpseci",
10859 + { .vendor = 0x0 }
10862 +static struct fsl_mc_driver dpaa2_caam_driver = {
10864 + .name = KBUILD_MODNAME,
10865 + .owner = THIS_MODULE,
10867 + .probe = dpaa2_caam_probe,
10868 + .remove = dpaa2_caam_remove,
10869 + .match_id_table = dpaa2_caam_match_id_table
10872 +MODULE_LICENSE("Dual BSD/GPL");
10873 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
10874 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
10876 +module_fsl_mc_driver(dpaa2_caam_driver);
10878 +++ b/drivers/crypto/caam/caamalg_qi2.h
10881 + * Copyright 2015-2016 Freescale Semiconductor Inc.
10882 + * Copyright 2017 NXP
10884 + * Redistribution and use in source and binary forms, with or without
10885 + * modification, are permitted provided that the following conditions are met:
10886 + * * Redistributions of source code must retain the above copyright
10887 + * notice, this list of conditions and the following disclaimer.
10888 + * * Redistributions in binary form must reproduce the above copyright
10889 + * notice, this list of conditions and the following disclaimer in the
10890 + * documentation and/or other materials provided with the distribution.
10891 + * * Neither the names of the above-listed copyright holders nor the
10892 + * names of any contributors may be used to endorse or promote products
10893 + * derived from this software without specific prior written permission.
10896 + * ALTERNATIVELY, this software may be distributed under the terms of the
10897 + * GNU General Public License ("GPL") as published by the Free Software
10898 + * Foundation, either version 2 of that License or (at your option) any
10901 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
10902 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
10903 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
10904 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
10905 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
10906 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
10907 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
10908 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
10909 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
10910 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
10911 + * POSSIBILITY OF SUCH DAMAGE.
10914 +#ifndef _CAAMALG_QI2_H_
10915 +#define _CAAMALG_QI2_H_
10917 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
10918 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
10919 +#include <linux/threads.h>
10920 +#include "dpseci.h"
10921 +#include "desc_constr.h"
10923 +#define DPAA2_CAAM_STORE_SIZE 16
10924 +/* NAPI weight *must* be a multiple of the store size. */
10925 +#define DPAA2_CAAM_NAPI_WEIGHT 64
10927 +/* The congestion entrance threshold was chosen so that on LS2088
10928 + * we support the maximum throughput for the available memory
10930 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
10931 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
10934 + * dpaa2_caam_priv - driver private data
10935 + * @dpseci_id: DPSECI object unique ID
10936 + * @major_ver: DPSECI major version
10937 + * @minor_ver: DPSECI minor version
10938 + * @dpseci_attr: DPSECI attributes
10939 + * @sec_attr: SEC engine attributes
10940 + * @rx_queue_attr: array of Rx queue attributes
10941 + * @tx_queue_attr: array of Tx queue attributes
10942 + * @cscn_mem: pointer to memory region containing the
10943 + * dpaa2_cscn struct; it's size is larger than
10944 + * sizeof(struct dpaa2_cscn) to accommodate alignment
10945 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
10946 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
10947 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
10948 + * @dev: device associated with the DPSECI object
10949 + * @mc_io: pointer to MC portal's I/O object
10950 + * @domain: IOMMU domain
10951 + * @ppriv: per CPU pointers to privata data
10953 +struct dpaa2_caam_priv {
10959 + struct dpseci_attr dpseci_attr;
10960 + struct dpseci_sec_attr sec_attr;
10961 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
10962 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
10967 + void *cscn_mem_aligned;
10968 + dma_addr_t cscn_dma;
10970 + struct device *dev;
10971 + struct fsl_mc_io *mc_io;
10972 + struct iommu_domain *domain;
10974 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
10978 + * dpaa2_caam_priv_per_cpu - per CPU private data
10979 + * @napi: napi structure
10980 + * @net_dev: netdev used by napi
10981 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
10982 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
10983 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
10984 + * @nctx: notification context of response FQ
10985 + * @store: where dequeued frames are stored
10986 + * @priv: backpointer to dpaa2_caam_priv
10987 + * @dpio: portal used for data path operations
10989 +struct dpaa2_caam_priv_per_cpu {
10990 + struct napi_struct napi;
10991 + struct net_device net_dev;
10995 + struct dpaa2_io_notification_ctx nctx;
10996 + struct dpaa2_io_store *store;
10997 + struct dpaa2_caam_priv *priv;
10998 + struct dpaa2_io *dpio;
11002 + * The CAAM QI hardware constructs a job descriptor which points
11003 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
11004 + * When the job descriptor is executed by deco, the whole job
11005 + * descriptor together with shared descriptor gets loaded in
11006 + * deco buffer which is 64 words long (each 32-bit).
11008 + * The job descriptor constructed by QI hardware has layout:
11010 + * HEADER (1 word)
11011 + * Shdesc ptr (1 or 2 words)
11012 + * SEQ_OUT_PTR (1 word)
11013 + * Out ptr (1 or 2 words)
11014 + * Out length (1 word)
11015 + * SEQ_IN_PTR (1 word)
11016 + * In ptr (1 or 2 words)
11017 + * In length (1 word)
11019 + * The shdesc ptr is used to fetch shared descriptor contents
11020 + * into deco buffer.
11022 + * Apart from shdesc contents, the total number of words that
11023 + * get loaded in deco buffer are '8' or '11'. The remaining words
11024 + * in deco buffer can be used for storing shared descriptor.
11026 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
11028 +/* Length of a single buffer in the QI driver memory cache */
11029 +#define CAAM_QI_MEMCACHE_SIZE 512
11032 + * aead_edesc - s/w-extended aead descriptor
11033 + * @src_nents: number of segments in input scatterlist
11034 + * @dst_nents: number of segments in output scatterlist
11035 + * @iv_dma: dma address of iv for checking continuity and link table
11036 + * @qm_sg_bytes: length of dma mapped h/w link table
11037 + * @qm_sg_dma: bus physical mapped address of h/w link table
11038 + * @assoclen: associated data length, in CAAM endianness
11039 + * @assoclen_dma: bus physical mapped address of req->assoclen
11040 + * @sgt: the h/w link table, followed by IV
11042 +struct aead_edesc {
11045 + dma_addr_t iv_dma;
11047 + dma_addr_t qm_sg_dma;
11048 + unsigned int assoclen;
11049 + dma_addr_t assoclen_dma;
11050 + struct dpaa2_sg_entry sgt[0];
11054 + * tls_edesc - s/w-extended tls descriptor
11055 + * @src_nents: number of segments in input scatterlist
11056 + * @dst_nents: number of segments in output scatterlist
11057 + * @iv_dma: dma address of iv for checking continuity and link table
11058 + * @qm_sg_bytes: length of dma mapped h/w link table
11059 + * @qm_sg_dma: bus physical mapped address of h/w link table
11060 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
11061 + * @dst: pointer to output scatterlist, usefull for unmapping
11062 + * @sgt: the h/w link table, followed by IV
11064 +struct tls_edesc {
11067 + dma_addr_t iv_dma;
11069 + dma_addr_t qm_sg_dma;
11070 + struct scatterlist tmp[2];
11071 + struct scatterlist *dst;
11072 + struct dpaa2_sg_entry sgt[0];
11076 + * skcipher_edesc - s/w-extended skcipher descriptor
11077 + * @src_nents: number of segments in input scatterlist
11078 + * @dst_nents: number of segments in output scatterlist
11079 + * @iv_dma: dma address of iv for checking continuity and link table
11080 + * @qm_sg_bytes: length of dma mapped qm_sg space
11081 + * @qm_sg_dma: I/O virtual address of h/w link table
11082 + * @sgt: the h/w link table, followed by IV
11084 +struct skcipher_edesc {
11087 + dma_addr_t iv_dma;
11089 + dma_addr_t qm_sg_dma;
11090 + struct dpaa2_sg_entry sgt[0];
11094 + * ahash_edesc - s/w-extended ahash descriptor
11095 + * @dst_dma: I/O virtual address of req->result
11096 + * @qm_sg_dma: I/O virtual address of h/w link table
11097 + * @src_nents: number of segments in input scatterlist
11098 + * @qm_sg_bytes: length of dma mapped qm_sg space
11099 + * @sgt: pointer to h/w link table
11101 +struct ahash_edesc {
11102 + dma_addr_t dst_dma;
11103 + dma_addr_t qm_sg_dma;
11106 + struct dpaa2_sg_entry sgt[0];
11110 + * caam_flc - Flow Context (FLC)
11111 + * @flc: Flow Context options
11112 + * @sh_desc: Shared Descriptor
11116 + u32 sh_desc[MAX_SDLEN];
11117 +} ____cacheline_aligned;
11126 + * caam_request - the request structure the driver application should fill while
11127 + * submitting a job to driver.
11128 + * @fd_flt: Frame list table defining input and output
11129 + * fd_flt[0] - FLE pointing to output buffer
11130 + * fd_flt[1] - FLE pointing to input buffer
11131 + * @fd_flt_dma: DMA address for the frame list table
11132 + * @flc: Flow Context
11133 + * @flc_dma: I/O virtual address of Flow Context
11134 + * @cbk: Callback function to invoke when job is completed
11135 + * @ctx: arbit context attached with request by the application
11136 + * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
11138 +struct caam_request {
11139 + struct dpaa2_fl_entry fd_flt[2];
11140 + dma_addr_t fd_flt_dma;
11141 + struct caam_flc *flc;
11142 + dma_addr_t flc_dma;
11143 + void (*cbk)(void *ctx, u32 err);
11149 + * dpaa2_caam_enqueue() - enqueue a crypto request
11150 + * @dev: device associated with the DPSECI object
11151 + * @req: pointer to caam_request
11153 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
11155 +#endif /* _CAAMALG_QI2_H_ */
11156 --- a/drivers/crypto/caam/caamhash.c
11157 +++ b/drivers/crypto/caam/caamhash.c
11159 * caam - Freescale FSL CAAM support for ahash functions of crypto API
11161 * Copyright 2011 Freescale Semiconductor, Inc.
11162 + * Copyright 2018 NXP
11164 * Based on caamalg.c crypto API driver.
11168 #include "sg_sw_sec4.h"
11169 #include "key_gen.h"
11170 +#include "caamhash_desc.h"
11172 #define CAAM_CRA_PRIORITY 3000
11175 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
11176 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
11178 -/* length of descriptors text */
11179 -#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
11180 -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
11181 -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11182 -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11183 -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11184 -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11186 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
11187 CAAM_MAX_HASH_KEY_SIZE)
11188 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
11189 @@ -107,6 +101,7 @@ struct caam_hash_ctx {
11190 dma_addr_t sh_desc_update_first_dma;
11191 dma_addr_t sh_desc_fin_dma;
11192 dma_addr_t sh_desc_digest_dma;
11193 + enum dma_data_direction dir;
11194 struct device *jrdev;
11195 u8 key[CAAM_MAX_HASH_KEY_SIZE];
11197 @@ -218,7 +213,7 @@ static inline int buf_map_to_sec4_sg(str
11200 /* Map state->caam_ctx, and add it to link table */
11201 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
11202 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
11203 struct caam_hash_state *state, int ctx_len,
11204 struct sec4_sg_entry *sec4_sg, u32 flag)
11206 @@ -234,68 +229,22 @@ static inline int ctx_map_to_sec4_sg(u32
11211 - * For ahash update, final and finup (import_ctx = true)
11212 - * import context, read and write to seqout
11213 - * For ahash firsts and digest (import_ctx = false)
11214 - * read and write to seqout
11216 -static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
11217 - struct caam_hash_ctx *ctx, bool import_ctx)
11219 - u32 op = ctx->adata.algtype;
11220 - u32 *skip_key_load;
11222 - init_sh_desc(desc, HDR_SHARE_SERIAL);
11224 - /* Append key if it has been set; ahash update excluded */
11225 - if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
11226 - /* Skip key loading if already shared */
11227 - skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11230 - append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
11231 - ctx->adata.keylen, CLASS_2 |
11232 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
11234 - set_jump_tgt_here(desc, skip_key_load);
11236 - op |= OP_ALG_AAI_HMAC_PRECOMP;
11239 - /* If needed, import context from software */
11241 - append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
11242 - LDST_SRCDST_BYTE_CONTEXT);
11244 - /* Class 2 operation */
11245 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
11248 - * Load from buf and/or src and write to req->result or state->context
11249 - * Calculate remaining bytes to read
11251 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11252 - /* Read remaining bytes */
11253 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
11254 - FIFOLD_TYPE_MSG | KEY_VLF);
11255 - /* Store class2 context bytes */
11256 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
11257 - LDST_SRCDST_BYTE_CONTEXT);
11260 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
11262 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11263 int digestsize = crypto_ahash_digestsize(ahash);
11264 struct device *jrdev = ctx->jrdev;
11265 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
11268 + ctx->adata.key_virt = ctx->key;
11270 /* ahash_update shared descriptor */
11271 desc = ctx->sh_desc_update;
11272 - ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
11273 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
11274 + ctx->ctx_len, true, ctrlpriv->era);
11275 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
11276 - desc_bytes(desc), DMA_TO_DEVICE);
11277 + desc_bytes(desc), ctx->dir);
11279 print_hex_dump(KERN_ERR,
11280 "ahash update shdesc@"__stringify(__LINE__)": ",
11281 @@ -304,9 +253,10 @@ static int ahash_set_sh_desc(struct cryp
11283 /* ahash_update_first shared descriptor */
11284 desc = ctx->sh_desc_update_first;
11285 - ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
11286 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
11287 + ctx->ctx_len, false, ctrlpriv->era);
11288 dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
11289 - desc_bytes(desc), DMA_TO_DEVICE);
11290 + desc_bytes(desc), ctx->dir);
11292 print_hex_dump(KERN_ERR,
11293 "ahash update first shdesc@"__stringify(__LINE__)": ",
11294 @@ -315,9 +265,10 @@ static int ahash_set_sh_desc(struct cryp
11296 /* ahash_final shared descriptor */
11297 desc = ctx->sh_desc_fin;
11298 - ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
11299 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
11300 + ctx->ctx_len, true, ctrlpriv->era);
11301 dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
11302 - desc_bytes(desc), DMA_TO_DEVICE);
11303 + desc_bytes(desc), ctx->dir);
11305 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
11306 DUMP_PREFIX_ADDRESS, 16, 4, desc,
11307 @@ -326,9 +277,10 @@ static int ahash_set_sh_desc(struct cryp
11309 /* ahash_digest shared descriptor */
11310 desc = ctx->sh_desc_digest;
11311 - ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
11312 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
11313 + ctx->ctx_len, false, ctrlpriv->era);
11314 dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
11315 - desc_bytes(desc), DMA_TO_DEVICE);
11316 + desc_bytes(desc), ctx->dir);
11318 print_hex_dump(KERN_ERR,
11319 "ahash digest shdesc@"__stringify(__LINE__)": ",
11320 @@ -421,6 +373,7 @@ static int ahash_setkey(struct crypto_ah
11321 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
11322 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
11323 int digestsize = crypto_ahash_digestsize(ahash);
11324 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
11326 u8 *hashed_key = NULL;
11328 @@ -441,16 +394,26 @@ static int ahash_setkey(struct crypto_ah
11332 - ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
11333 - CAAM_MAX_HASH_KEY_SIZE);
11335 - goto bad_free_key;
11337 + * If DKP is supported, use it in the shared descriptor to generate
11340 + if (ctrlpriv->era >= 6) {
11341 + ctx->adata.key_inline = true;
11342 + ctx->adata.keylen = keylen;
11343 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
11344 + OP_ALG_ALGSEL_MASK);
11347 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
11348 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
11349 - ctx->adata.keylen_pad, 1);
11351 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
11352 + goto bad_free_key;
11354 + memcpy(ctx->key, key, keylen);
11356 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
11357 + keylen, CAAM_MAX_HASH_KEY_SIZE);
11359 + goto bad_free_key;
11363 return ahash_set_sh_desc(ahash);
11364 @@ -773,7 +736,7 @@ static int ahash_update_ctx(struct ahash
11365 edesc->src_nents = src_nents;
11366 edesc->sec4_sg_bytes = sec4_sg_bytes;
11368 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11369 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11370 edesc->sec4_sg, DMA_BIDIRECTIONAL);
11373 @@ -871,9 +834,8 @@ static int ahash_final_ctx(struct ahash_
11374 desc = edesc->hw_desc;
11376 edesc->sec4_sg_bytes = sec4_sg_bytes;
11377 - edesc->src_nents = 0;
11379 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11380 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11381 edesc->sec4_sg, DMA_TO_DEVICE);
11384 @@ -967,7 +929,7 @@ static int ahash_finup_ctx(struct ahash_
11386 edesc->src_nents = src_nents;
11388 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
11389 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
11390 edesc->sec4_sg, DMA_TO_DEVICE);
11393 @@ -1126,7 +1088,6 @@ static int ahash_final_no_ctx(struct aha
11394 dev_err(jrdev, "unable to map dst\n");
11397 - edesc->src_nents = 0;
11400 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
11401 @@ -1208,7 +1169,6 @@ static int ahash_update_no_ctx(struct ah
11403 edesc->src_nents = src_nents;
11404 edesc->sec4_sg_bytes = sec4_sg_bytes;
11405 - edesc->dst_dma = 0;
11407 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
11409 @@ -1420,7 +1380,6 @@ static int ahash_update_first(struct aha
11412 edesc->src_nents = src_nents;
11413 - edesc->dst_dma = 0;
11415 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
11417 @@ -1722,6 +1681,7 @@ static int caam_hash_cra_init(struct cry
11419 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
11420 dma_addr_t dma_addr;
11421 + struct caam_drv_private *priv;
11424 * Get a Job ring from Job Ring driver to ensure in-order
11425 @@ -1733,10 +1693,13 @@ static int caam_hash_cra_init(struct cry
11426 return PTR_ERR(ctx->jrdev);
11429 + priv = dev_get_drvdata(ctx->jrdev->parent);
11430 + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
11432 dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
11433 offsetof(struct caam_hash_ctx,
11434 sh_desc_update_dma),
11435 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
11436 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
11437 if (dma_mapping_error(ctx->jrdev, dma_addr)) {
11438 dev_err(ctx->jrdev, "unable to map shared descriptors\n");
11439 caam_jr_free(ctx->jrdev);
11440 @@ -1771,11 +1734,11 @@ static void caam_hash_cra_exit(struct cr
11441 dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
11442 offsetof(struct caam_hash_ctx,
11443 sh_desc_update_dma),
11444 - DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
11445 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
11446 caam_jr_free(ctx->jrdev);
11449 -static void __exit caam_algapi_hash_exit(void)
11450 +void caam_algapi_hash_exit(void)
11452 struct caam_hash_alg *t_alg, *n;
11454 @@ -1834,56 +1797,38 @@ caam_hash_alloc(struct caam_hash_templat
11458 -static int __init caam_algapi_hash_init(void)
11459 +int caam_algapi_hash_init(struct device *ctrldev)
11461 - struct device_node *dev_node;
11462 - struct platform_device *pdev;
11463 - struct device *ctrldev;
11464 int i = 0, err = 0;
11465 - struct caam_drv_private *priv;
11466 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11467 unsigned int md_limit = SHA512_DIGEST_SIZE;
11468 - u32 cha_inst, cha_vid;
11470 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11472 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11477 - pdev = of_find_device_by_node(dev_node);
11479 - of_node_put(dev_node);
11483 - ctrldev = &pdev->dev;
11484 - priv = dev_get_drvdata(ctrldev);
11485 - of_node_put(dev_node);
11488 - * If priv is NULL, it's probably because the caam driver wasn't
11489 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11493 + u32 md_inst, md_vid;
11496 * Register crypto algorithms the device supports. First, identify
11497 * presence and attributes of MD block.
11499 - cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
11500 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
11501 + if (priv->era < 10) {
11502 + md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
11503 + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
11504 + md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11505 + CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
11507 + u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
11509 + md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
11510 + md_inst = mdha & CHA_VER_NUM_MASK;
11514 * Skip registration of any hashing algorithms if MD block
11517 - if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
11521 /* Limit digest size based on LP256 */
11522 - if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
11523 + if (md_vid == CHA_VER_VID_MD_LP256)
11524 md_limit = SHA256_DIGEST_SIZE;
11526 INIT_LIST_HEAD(&hash_list);
11527 @@ -1934,10 +1879,3 @@ static int __init caam_algapi_hash_init(
11532 -module_init(caam_algapi_hash_init);
11533 -module_exit(caam_algapi_hash_exit);
11535 -MODULE_LICENSE("GPL");
11536 -MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
11537 -MODULE_AUTHOR("Freescale Semiconductor - NMG");
11539 +++ b/drivers/crypto/caam/caamhash_desc.c
11542 + * Shared descriptors for ahash algorithms
11544 + * Copyright 2017 NXP
11546 + * Redistribution and use in source and binary forms, with or without
11547 + * modification, are permitted provided that the following conditions are met:
11548 + * * Redistributions of source code must retain the above copyright
11549 + * notice, this list of conditions and the following disclaimer.
11550 + * * Redistributions in binary form must reproduce the above copyright
11551 + * notice, this list of conditions and the following disclaimer in the
11552 + * documentation and/or other materials provided with the distribution.
11553 + * * Neither the names of the above-listed copyright holders nor the
11554 + * names of any contributors may be used to endorse or promote products
11555 + * derived from this software without specific prior written permission.
11558 + * ALTERNATIVELY, this software may be distributed under the terms of the
11559 + * GNU General Public License ("GPL") as published by the Free Software
11560 + * Foundation, either version 2 of that License or (at your option) any
11563 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11564 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11565 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11566 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11567 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11568 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11569 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11570 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11571 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11572 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11573 + * POSSIBILITY OF SUCH DAMAGE.
11576 +#include "compat.h"
11577 +#include "desc_constr.h"
11578 +#include "caamhash_desc.h"
11581 + * cnstr_shdsc_ahash - ahash shared descriptor
11582 + * @desc: pointer to buffer used for descriptor construction
11583 + * @adata: pointer to authentication transform definitions.
11584 + * A split key is required for SEC Era < 6; the size of the split key
11585 + * is specified in this case.
11586 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
11587 + * SHA256, SHA384, SHA512}.
11588 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
11589 + * @digestsize: algorithm's digest size
11590 + * @ctx_len: size of Context Register
11591 + * @import_ctx: true if previous Context Register needs to be restored
11592 + * must be true for ahash update and final
11593 + * must be false for for ahash first and digest
11596 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
11597 + int digestsize, int ctx_len, bool import_ctx, int era)
11599 + u32 op = adata->algtype;
11601 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11603 + /* Append key if it has been set; ahash update excluded */
11604 + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
11605 + u32 *skip_key_load;
11607 + /* Skip key loading if already shared */
11608 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11612 + append_key_as_imm(desc, adata->key_virt,
11613 + adata->keylen_pad,
11614 + adata->keylen, CLASS_2 |
11615 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
11617 + append_proto_dkp(desc, adata);
11619 + set_jump_tgt_here(desc, skip_key_load);
11621 + op |= OP_ALG_AAI_HMAC_PRECOMP;
11624 + /* If needed, import context from software */
11626 + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
11627 + LDST_SRCDST_BYTE_CONTEXT);
11629 + /* Class 2 operation */
11630 + append_operation(desc, op | state | OP_ALG_ENCRYPT);
11633 + * Load from buf and/or src and write to req->result or state->context
11634 + * Calculate remaining bytes to read
11636 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11637 + /* Read remaining bytes */
11638 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
11639 + FIFOLD_TYPE_MSG | KEY_VLF);
11640 + /* Store class2 context bytes */
11641 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
11642 + LDST_SRCDST_BYTE_CONTEXT);
11644 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
11646 +MODULE_LICENSE("Dual BSD/GPL");
11647 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
11648 +MODULE_AUTHOR("NXP Semiconductors");
11650 +++ b/drivers/crypto/caam/caamhash_desc.h
11653 + * Shared descriptors for ahash algorithms
11655 + * Copyright 2017 NXP
11657 + * Redistribution and use in source and binary forms, with or without
11658 + * modification, are permitted provided that the following conditions are met:
11659 + * * Redistributions of source code must retain the above copyright
11660 + * notice, this list of conditions and the following disclaimer.
11661 + * * Redistributions in binary form must reproduce the above copyright
11662 + * notice, this list of conditions and the following disclaimer in the
11663 + * documentation and/or other materials provided with the distribution.
11664 + * * Neither the names of the above-listed copyright holders nor the
11665 + * names of any contributors may be used to endorse or promote products
11666 + * derived from this software without specific prior written permission.
11669 + * ALTERNATIVELY, this software may be distributed under the terms of the
11670 + * GNU General Public License ("GPL") as published by the Free Software
11671 + * Foundation, either version 2 of that License or (at your option) any
11674 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
11675 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
11676 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
11677 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
11678 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11679 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11680 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
11681 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
11682 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
11683 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
11684 + * POSSIBILITY OF SUCH DAMAGE.
11687 +#ifndef _CAAMHASH_DESC_H_
11688 +#define _CAAMHASH_DESC_H_
11690 +/* length of descriptors text */
11691 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
11692 +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
11693 +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11694 +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
11695 +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
11697 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
11698 + int digestsize, int ctx_len, bool import_ctx, int era);
11700 +#endif /* _CAAMHASH_DESC_H_ */
11701 --- a/drivers/crypto/caam/caampkc.c
11702 +++ b/drivers/crypto/caam/caampkc.c
11704 * caam - Freescale FSL CAAM support for Public Key Cryptography
11706 * Copyright 2016 Freescale Semiconductor, Inc.
11707 + * Copyright 2018 NXP
11709 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
11710 * all the desired key parameters, input and output pointers.
11711 @@ -1017,46 +1018,22 @@ static struct akcipher_alg caam_rsa = {
11714 /* Public Key Cryptography module initialization handler */
11715 -static int __init caam_pkc_init(void)
11716 +int caam_pkc_init(struct device *ctrldev)
11718 - struct device_node *dev_node;
11719 - struct platform_device *pdev;
11720 - struct device *ctrldev;
11721 - struct caam_drv_private *priv;
11722 - u32 cha_inst, pk_inst;
11723 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11727 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11729 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11734 - pdev = of_find_device_by_node(dev_node);
11736 - of_node_put(dev_node);
11740 - ctrldev = &pdev->dev;
11741 - priv = dev_get_drvdata(ctrldev);
11742 - of_node_put(dev_node);
11745 - * If priv is NULL, it's probably because the caam driver wasn't
11746 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11751 /* Determine public key hardware accelerator presence. */
11752 - cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
11753 - pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
11754 + if (priv->era < 10)
11755 + pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11756 + CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
11758 + pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
11760 /* Do not register algorithms if PKHA is not present. */
11765 err = crypto_register_akcipher(&caam_rsa);
11767 @@ -1068,14 +1045,7 @@ static int __init caam_pkc_init(void)
11771 -static void __exit caam_pkc_exit(void)
11772 +void caam_pkc_exit(void)
11774 crypto_unregister_akcipher(&caam_rsa);
11777 -module_init(caam_pkc_init);
11778 -module_exit(caam_pkc_exit);
11780 -MODULE_LICENSE("Dual BSD/GPL");
11781 -MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
11782 -MODULE_AUTHOR("Freescale Semiconductor");
11783 --- a/drivers/crypto/caam/caamrng.c
11784 +++ b/drivers/crypto/caam/caamrng.c
11786 * caam - Freescale FSL CAAM support for hw_random
11788 * Copyright 2011 Freescale Semiconductor, Inc.
11789 + * Copyright 2018 NXP
11791 * Based on caamalg.c crypto API driver.
11793 @@ -294,49 +295,29 @@ static struct hwrng caam_rng = {
11797 -static void __exit caam_rng_exit(void)
11798 +void caam_rng_exit(void)
11800 caam_jr_free(rng_ctx->jrdev);
11801 hwrng_unregister(&caam_rng);
11805 -static int __init caam_rng_init(void)
11806 +int caam_rng_init(struct device *ctrldev)
11808 struct device *dev;
11809 - struct device_node *dev_node;
11810 - struct platform_device *pdev;
11811 - struct device *ctrldev;
11812 - struct caam_drv_private *priv;
11814 + struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
11817 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
11819 - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
11824 - pdev = of_find_device_by_node(dev_node);
11826 - of_node_put(dev_node);
11830 - ctrldev = &pdev->dev;
11831 - priv = dev_get_drvdata(ctrldev);
11832 - of_node_put(dev_node);
11835 - * If priv is NULL, it's probably because the caam driver wasn't
11836 - * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
11841 /* Check for an instantiated RNG before registration */
11842 - if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
11844 + if (priv->era < 10)
11845 + rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
11846 + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
11848 + rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
11853 dev = caam_jr_alloc();
11855 @@ -364,10 +345,3 @@ free_caam_alloc:
11860 -module_init(caam_rng_init);
11861 -module_exit(caam_rng_exit);
11863 -MODULE_LICENSE("GPL");
11864 -MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
11865 -MODULE_AUTHOR("Freescale Semiconductor - NMG");
11866 --- a/drivers/crypto/caam/compat.h
11867 +++ b/drivers/crypto/caam/compat.h
11869 #include <linux/of_platform.h>
11870 #include <linux/dma-mapping.h>
11871 #include <linux/io.h>
11872 +#include <linux/iommu.h>
11873 #include <linux/spinlock.h>
11874 #include <linux/rtnetlink.h>
11875 #include <linux/in.h>
11876 @@ -34,10 +35,13 @@
11877 #include <crypto/des.h>
11878 #include <crypto/sha.h>
11879 #include <crypto/md5.h>
11880 +#include <crypto/chacha20.h>
11881 +#include <crypto/poly1305.h>
11882 #include <crypto/internal/aead.h>
11883 #include <crypto/authenc.h>
11884 #include <crypto/akcipher.h>
11885 #include <crypto/scatterwalk.h>
11886 +#include <crypto/skcipher.h>
11887 #include <crypto/internal/skcipher.h>
11888 #include <crypto/internal/hash.h>
11889 #include <crypto/internal/rsa.h>
11890 --- a/drivers/crypto/caam/ctrl.c
11891 +++ b/drivers/crypto/caam/ctrl.c
11893 * Controller-level driver, kernel property detection, initialization
11895 * Copyright 2008-2012 Freescale Semiconductor, Inc.
11896 + * Copyright 2018 NXP
11899 #include <linux/device.h>
11900 @@ -16,17 +17,15 @@
11901 #include "desc_constr.h"
11904 -bool caam_little_end;
11905 -EXPORT_SYMBOL(caam_little_end);
11907 EXPORT_SYMBOL(caam_dpaa2);
11909 -EXPORT_SYMBOL(caam_imx);
11911 #ifdef CONFIG_CAAM_QI
11915 +static struct platform_device *caam_dma_dev;
11918 * i.MX targets tend to have clock control subsystems that can
11919 * enable/disable clocking to our device.
11920 @@ -105,7 +104,7 @@ static inline int run_descriptor_deco0(s
11921 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
11922 struct caam_deco __iomem *deco = ctrlpriv->deco;
11923 unsigned int timeout = 100000;
11924 - u32 deco_dbg_reg, flags;
11925 + u32 deco_dbg_reg, deco_state, flags;
11929 @@ -148,13 +147,22 @@ static inline int run_descriptor_deco0(s
11930 timeout = 10000000;
11932 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
11934 + if (ctrlpriv->era < 10)
11935 + deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
11936 + DESC_DBG_DECO_STAT_SHIFT;
11938 + deco_state = (rd_reg32(&deco->dbg_exec) &
11939 + DESC_DER_DECO_STAT_MASK) >>
11940 + DESC_DER_DECO_STAT_SHIFT;
11943 * If an error occured in the descriptor, then
11944 * the DECO status field will be set to 0x0D
11946 - if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
11947 - DESC_DBG_DECO_STAT_HOST_ERR)
11948 + if (deco_state == DECO_STAT_HOST_ERR)
11952 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
11954 @@ -316,15 +324,15 @@ static int caam_remove(struct platform_d
11955 of_platform_depopulate(ctrldev);
11957 #ifdef CONFIG_CAAM_QI
11958 - if (ctrlpriv->qidev)
11959 - caam_qi_shutdown(ctrlpriv->qidev);
11960 + if (ctrlpriv->qi_init)
11961 + caam_qi_shutdown(ctrldev);
11965 * De-initialize RNG state handles initialized by this driver.
11966 - * In case of DPAA 2.x, RNG is managed by MC firmware.
11967 + * In case of SoCs with Management Complex, RNG is managed by MC f/w.
11969 - if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
11970 + if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
11971 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
11973 /* Shut down debug views */
11974 @@ -332,6 +340,9 @@ static int caam_remove(struct platform_d
11975 debugfs_remove_recursive(ctrlpriv->dfs_root);
11978 + if (caam_dma_dev)
11979 + platform_device_unregister(caam_dma_dev);
11981 /* Unmap controller region */
11984 @@ -433,6 +444,10 @@ static int caam_probe(struct platform_de
11985 {.family = "Freescale i.MX"},
11988 + static struct platform_device_info caam_dma_pdev_info = {
11989 + .name = "caam-dma",
11990 + .id = PLATFORM_DEVID_NONE
11992 struct device *dev;
11993 struct device_node *nprop, *np;
11994 struct caam_ctrl __iomem *ctrl;
11995 @@ -442,7 +457,7 @@ static int caam_probe(struct platform_de
11996 struct caam_perfmon *perfmon;
11998 u32 scfgr, comp_params;
12002 int BLOCK_OFFSET = 0;
12004 @@ -454,15 +469,54 @@ static int caam_probe(struct platform_de
12005 dev_set_drvdata(dev, ctrlpriv);
12006 nprop = pdev->dev.of_node;
12008 + /* Get configuration properties from device tree */
12009 + /* First, get register page */
12010 + ctrl = of_iomap(nprop, 0);
12012 + dev_err(dev, "caam: of_iomap() failed\n");
12016 + caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
12017 + (CSTA_PLEND | CSTA_ALT_PLEND));
12018 caam_imx = (bool)soc_device_match(imx_soc);
12020 + comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
12021 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
12022 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
12024 +#ifdef CONFIG_CAAM_QI
12025 + /* If (DPAA 1.x) QI present, check whether dependencies are available */
12026 + if (ctrlpriv->qi_present && !caam_dpaa2) {
12027 + ret = qman_is_probed();
12029 + ret = -EPROBE_DEFER;
12030 + goto iounmap_ctrl;
12031 + } else if (ret < 0) {
12032 + dev_err(dev, "failing probe due to qman probe error\n");
12034 + goto iounmap_ctrl;
12037 + ret = qman_portals_probed();
12039 + ret = -EPROBE_DEFER;
12040 + goto iounmap_ctrl;
12041 + } else if (ret < 0) {
12042 + dev_err(dev, "failing probe due to qman portals probe error\n");
12044 + goto iounmap_ctrl;
12049 /* Enable clocking */
12050 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
12052 ret = PTR_ERR(clk);
12053 dev_err(&pdev->dev,
12054 "can't identify CAAM ipg clk: %d\n", ret);
12056 + goto iounmap_ctrl;
12058 ctrlpriv->caam_ipg = clk;
12060 @@ -471,7 +525,7 @@ static int caam_probe(struct platform_de
12061 ret = PTR_ERR(clk);
12062 dev_err(&pdev->dev,
12063 "can't identify CAAM mem clk: %d\n", ret);
12065 + goto iounmap_ctrl;
12067 ctrlpriv->caam_mem = clk;
12069 @@ -480,7 +534,7 @@ static int caam_probe(struct platform_de
12070 ret = PTR_ERR(clk);
12071 dev_err(&pdev->dev,
12072 "can't identify CAAM aclk clk: %d\n", ret);
12074 + goto iounmap_ctrl;
12076 ctrlpriv->caam_aclk = clk;
12078 @@ -490,7 +544,7 @@ static int caam_probe(struct platform_de
12079 ret = PTR_ERR(clk);
12080 dev_err(&pdev->dev,
12081 "can't identify CAAM emi_slow clk: %d\n", ret);
12083 + goto iounmap_ctrl;
12085 ctrlpriv->caam_emi_slow = clk;
12087 @@ -498,7 +552,7 @@ static int caam_probe(struct platform_de
12088 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
12090 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
12092 + goto iounmap_ctrl;
12095 ret = clk_prepare_enable(ctrlpriv->caam_mem);
12096 @@ -523,25 +577,10 @@ static int caam_probe(struct platform_de
12100 - /* Get configuration properties from device tree */
12101 - /* First, get register page */
12102 - ctrl = of_iomap(nprop, 0);
12103 - if (ctrl == NULL) {
12104 - dev_err(dev, "caam: of_iomap() failed\n");
12106 - goto disable_caam_emi_slow;
12109 - caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
12110 - (CSTA_PLEND | CSTA_ALT_PLEND));
12112 - /* Finding the page size for using the CTPR_MS register */
12113 - comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
12114 - pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
12116 /* Allocating the BLOCK_OFFSET based on the supported page size on
12119 + pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
12121 BLOCK_OFFSET = PG_SIZE_4K;
12123 @@ -563,11 +602,14 @@ static int caam_probe(struct platform_de
12125 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
12126 * long pointers in master configuration register.
12127 - * In case of DPAA 2.x, Management Complex firmware performs
12128 + * In case of SoCs with Management Complex, MC f/w performs
12129 * the configuration.
12131 - caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
12133 + np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
12134 + ctrlpriv->mc_en = !!np;
12137 + if (!ctrlpriv->mc_en)
12138 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
12139 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
12140 MCFGR_WDENABLE | MCFGR_LARGE_BURST |
12141 @@ -612,14 +654,11 @@ static int caam_probe(struct platform_de
12144 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
12145 - goto iounmap_ctrl;
12146 + goto disable_caam_emi_slow;
12149 - ret = of_platform_populate(nprop, caam_match, NULL, dev);
12151 - dev_err(dev, "JR platform devices creation error\n");
12152 - goto iounmap_ctrl;
12154 + ctrlpriv->era = caam_get_era();
12155 + ctrlpriv->domain = iommu_get_domain_for_dev(dev);
12157 #ifdef CONFIG_DEBUG_FS
12159 @@ -633,21 +672,7 @@ static int caam_probe(struct platform_de
12160 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
12164 - for_each_available_child_of_node(nprop, np)
12165 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
12166 - of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
12167 - ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
12168 - ((__force uint8_t *)ctrl +
12169 - (ring + JR_BLOCK_NUMBER) *
12172 - ctrlpriv->total_jobrs++;
12176 /* Check to see if (DPAA 1.x) QI present. If so, enable */
12177 - ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
12178 if (ctrlpriv->qi_present && !caam_dpaa2) {
12179 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
12180 ((__force uint8_t *)ctrl +
12181 @@ -664,6 +689,25 @@ static int caam_probe(struct platform_de
12185 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
12187 + dev_err(dev, "JR platform devices creation error\n");
12188 + goto shutdown_qi;
12192 + for_each_available_child_of_node(nprop, np)
12193 + if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
12194 + of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
12195 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
12196 + ((__force uint8_t *)ctrl +
12197 + (ring + JR_BLOCK_NUMBER) *
12200 + ctrlpriv->total_jobrs++;
12204 /* If no QI and no rings specified, quit and go home */
12205 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
12206 dev_err(dev, "no queues configured, terminating\n");
12207 @@ -671,15 +715,29 @@ static int caam_probe(struct platform_de
12211 - cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
12212 + caam_dma_pdev_info.parent = dev;
12213 + caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
12214 + caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
12215 + if (IS_ERR(caam_dma_dev)) {
12216 + dev_err(dev, "Unable to create and register caam-dma dev\n");
12217 + caam_dma_dev = 0;
12219 + set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
12222 + if (ctrlpriv->era < 10)
12223 + rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
12224 + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
12226 + rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
12227 + CHA_VER_VID_SHIFT;
12230 * If SEC has RNG version >= 4 and RNG state handle has not been
12231 * already instantiated, do RNG instantiation
12232 - * In case of DPAA 2.x, RNG is managed by MC firmware.
12233 + * In case of SoCs with Management Complex, RNG is managed by MC f/w.
12235 - if (!caam_dpaa2 &&
12236 - (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
12237 + if (!ctrlpriv->mc_en && rng_vid >= 4) {
12238 ctrlpriv->rng4_sh_init =
12239 rd_reg32(&ctrl->r4tst[0].rdsta);
12241 @@ -746,10 +804,9 @@ static int caam_probe(struct platform_de
12243 /* Report "alive" for developer to see */
12244 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
12246 - dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
12247 - ctrlpriv->total_jobrs, ctrlpriv->qi_present,
12248 - caam_dpaa2 ? "yes" : "no");
12250 + dev_info(dev, "job rings = %d, qi = %d\n",
12251 + ctrlpriv->total_jobrs, ctrlpriv->qi_present);
12253 #ifdef CONFIG_DEBUG_FS
12254 debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
12255 @@ -816,8 +873,11 @@ caam_remove:
12262 +#ifdef CONFIG_CAAM_QI
12263 + if (ctrlpriv->qi_init)
12264 + caam_qi_shutdown(dev);
12266 disable_caam_emi_slow:
12267 if (ctrlpriv->caam_emi_slow)
12268 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
12269 @@ -827,6 +887,8 @@ disable_caam_mem:
12270 clk_disable_unprepare(ctrlpriv->caam_mem);
12272 clk_disable_unprepare(ctrlpriv->caam_ipg);
12278 --- a/drivers/crypto/caam/desc.h
12279 +++ b/drivers/crypto/caam/desc.h
12281 * Definitions to support CAAM descriptor instruction generation
12283 * Copyright 2008-2011 Freescale Semiconductor, Inc.
12284 + * Copyright 2018 NXP
12289 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
12290 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
12291 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
12292 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
12293 #define CMD_STORE (0x0a << CMD_SHIFT)
12294 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
12295 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
12296 @@ -242,6 +244,7 @@
12297 #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
12298 #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
12299 #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
12300 +#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
12301 #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
12303 /* Offset in source/destination */
12304 @@ -284,6 +287,12 @@
12305 #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
12306 #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
12308 +/* Special Length definitions when dst=sm, nfifo-{sm,m} */
12309 +#define LDLEN_MATH0 0
12310 +#define LDLEN_MATH1 1
12311 +#define LDLEN_MATH2 2
12312 +#define LDLEN_MATH3 3
12315 * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
12316 * Command Constructs
12317 @@ -355,6 +364,7 @@
12318 #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
12319 #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
12320 #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
12321 +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
12323 /* Other types. Need to OR in last/flush bits as desired */
12324 #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
12325 @@ -408,6 +418,7 @@
12326 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
12327 #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
12328 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
12329 +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
12330 #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
12333 @@ -444,6 +455,18 @@
12334 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
12335 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
12336 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
12337 +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
12338 +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
12339 +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
12340 +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
12341 +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
12342 +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
12343 +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
12344 +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
12345 +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
12346 +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
12347 +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
12348 +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
12350 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
12351 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
12352 @@ -1093,6 +1116,22 @@
12353 /* MacSec protinfos */
12354 #define OP_PCL_MACSEC 0x0001
12356 +/* Derived Key Protocol (DKP) Protinfo */
12357 +#define OP_PCL_DKP_SRC_SHIFT 14
12358 +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
12359 +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
12360 +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
12361 +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
12362 +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
12363 +#define OP_PCL_DKP_DST_SHIFT 12
12364 +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
12365 +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
12366 +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
12367 +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
12368 +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
12369 +#define OP_PCL_DKP_KEY_SHIFT 0
12370 +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
12372 /* PKI unidirectional protocol protinfo bits */
12373 #define OP_PCL_PKPROT_TEST 0x0008
12374 #define OP_PCL_PKPROT_DECRYPT 0x0004
12375 @@ -1105,6 +1144,12 @@
12376 #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
12377 #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
12379 +/* version register fields */
12380 +#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
12381 +#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
12382 +#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
12383 +#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
12385 #define OP_ALG_ALGSEL_SHIFT 16
12386 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
12387 #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
12388 @@ -1124,6 +1169,8 @@
12389 #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
12390 #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
12391 #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
12392 +#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
12393 +#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
12395 #define OP_ALG_AAI_SHIFT 4
12396 #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
12397 @@ -1171,6 +1218,11 @@
12398 #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
12399 #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
12401 +/* Chacha20 AAI set */
12402 +#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
12403 +#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
12404 +#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
12406 /* hmac/smac AAI set */
12407 #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
12408 #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
12409 @@ -1359,6 +1411,7 @@
12410 #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
12411 #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
12412 #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
12413 +#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
12415 #define MOVE_DEST_SHIFT 16
12416 #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
12417 @@ -1385,6 +1438,10 @@
12419 #define MOVELEN_MRSEL_SHIFT 0
12420 #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
12421 +#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
12422 +#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
12423 +#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
12424 +#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
12427 * MATH Command Constructs
12428 @@ -1440,10 +1497,11 @@
12429 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
12430 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
12431 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
12432 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
12433 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
12434 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
12435 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
12436 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
12437 +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
12439 /* Destination selectors */
12440 #define MATH_DEST_SHIFT 8
12441 @@ -1452,6 +1510,7 @@
12442 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
12443 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
12444 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
12445 +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
12446 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
12447 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
12448 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
12449 @@ -1560,6 +1619,7 @@
12450 #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
12451 #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
12452 #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
12453 +#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
12454 #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
12455 #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
12457 @@ -1624,4 +1684,31 @@
12458 /* Frame Descriptor Command for Replacement Job Descriptor */
12459 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
12461 +/* CHA Control Register bits */
12462 +#define CCTRL_RESET_CHA_ALL 0x1
12463 +#define CCTRL_RESET_CHA_AESA 0x2
12464 +#define CCTRL_RESET_CHA_DESA 0x4
12465 +#define CCTRL_RESET_CHA_AFHA 0x8
12466 +#define CCTRL_RESET_CHA_KFHA 0x10
12467 +#define CCTRL_RESET_CHA_SF8A 0x20
12468 +#define CCTRL_RESET_CHA_PKHA 0x40
12469 +#define CCTRL_RESET_CHA_MDHA 0x80
12470 +#define CCTRL_RESET_CHA_CRCA 0x100
12471 +#define CCTRL_RESET_CHA_RNG 0x200
12472 +#define CCTRL_RESET_CHA_SF9A 0x400
12473 +#define CCTRL_RESET_CHA_ZUCE 0x800
12474 +#define CCTRL_RESET_CHA_ZUCA 0x1000
12475 +#define CCTRL_UNLOAD_PK_A0 0x10000
12476 +#define CCTRL_UNLOAD_PK_A1 0x20000
12477 +#define CCTRL_UNLOAD_PK_A2 0x40000
12478 +#define CCTRL_UNLOAD_PK_A3 0x80000
12479 +#define CCTRL_UNLOAD_PK_B0 0x100000
12480 +#define CCTRL_UNLOAD_PK_B1 0x200000
12481 +#define CCTRL_UNLOAD_PK_B2 0x400000
12482 +#define CCTRL_UNLOAD_PK_B3 0x800000
12483 +#define CCTRL_UNLOAD_PK_N 0x1000000
12484 +#define CCTRL_UNLOAD_PK_A 0x4000000
12485 +#define CCTRL_UNLOAD_PK_B 0x8000000
12486 +#define CCTRL_UNLOAD_SBOX 0x10000000
12488 #endif /* DESC_H */
12489 --- a/drivers/crypto/caam/desc_constr.h
12490 +++ b/drivers/crypto/caam/desc_constr.h
12491 @@ -109,7 +109,7 @@ static inline void init_job_desc_shared(
12492 append_ptr(desc, ptr);
12495 -static inline void append_data(u32 * const desc, void *data, int len)
12496 +static inline void append_data(u32 * const desc, const void *data, int len)
12498 u32 *offset = desc_end(desc);
12500 @@ -172,7 +172,7 @@ static inline void append_cmd_ptr_extlen
12501 append_cmd(desc, len);
12504 -static inline void append_cmd_data(u32 * const desc, void *data, int len,
12505 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
12508 append_cmd(desc, command | IMMEDIATE | len);
12509 @@ -189,6 +189,8 @@ static inline u32 *append_##cmd(u32 * co
12511 APPEND_CMD_RET(jump, JUMP)
12512 APPEND_CMD_RET(move, MOVE)
12513 +APPEND_CMD_RET(moveb, MOVEB)
12514 +APPEND_CMD_RET(move_len, MOVE_LEN)
12516 static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
12518 @@ -271,7 +273,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
12519 APPEND_SEQ_PTR_INTLEN(out, OUT)
12521 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
12522 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
12523 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
12524 unsigned int len, u32 options) \
12527 @@ -312,7 +314,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
12528 * from length of immediate data provided, e.g., split keys
12530 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
12531 -static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
12532 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
12533 unsigned int data_len, \
12534 unsigned int len, u32 options) \
12536 @@ -327,7 +329,11 @@ static inline void append_##cmd##_imm_##
12540 - append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
12541 + if (options & LDST_LEN_MASK) \
12542 + append_cmd(desc, CMD_##op | IMMEDIATE | options); \
12544 + append_cmd(desc, CMD_##op | IMMEDIATE | options | \
12546 append_cmd(desc, immediate); \
12548 APPEND_CMD_RAW_IMM(load, LOAD, u32);
12549 @@ -452,7 +458,7 @@ struct alginfo {
12550 unsigned int keylen_pad;
12552 dma_addr_t key_dma;
12554 + const void *key_virt;
12558 @@ -496,4 +502,45 @@ static inline int desc_inline_query(unsi
12559 return (rem_bytes >= 0) ? 0 : -1;
12563 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
12564 + * @desc: pointer to buffer used for descriptor construction
12565 + * @adata: pointer to authentication transform definitions.
12566 + * keylen should be the length of initial key, while keylen_pad
12567 + * the length of the derived (split) key.
12568 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
12569 + * SHA256, SHA384, SHA512}.
12571 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
12576 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
12577 + * to OP_PCLID_DKP_{MD5, SHA*}
12579 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
12580 + (0x20 << OP_ALG_ALGSEL_SHIFT);
12582 + if (adata->key_inline) {
12585 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
12586 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
12588 + append_data(desc, adata->key_virt, adata->keylen);
12590 + /* Reserve space in descriptor buffer for the derived key */
12591 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
12592 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
12594 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
12596 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
12597 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
12599 + append_ptr(desc, adata->key_dma);
12603 #endif /* DESC_CONSTR_H */
12605 +++ b/drivers/crypto/caam/dpseci.c
12608 + * Copyright 2013-2016 Freescale Semiconductor Inc.
12609 + * Copyright 2017 NXP
12611 + * Redistribution and use in source and binary forms, with or without
12612 + * modification, are permitted provided that the following conditions are met:
12613 + * * Redistributions of source code must retain the above copyright
12614 + * notice, this list of conditions and the following disclaimer.
12615 + * * Redistributions in binary form must reproduce the above copyright
12616 + * notice, this list of conditions and the following disclaimer in the
12617 + * documentation and/or other materials provided with the distribution.
12618 + * * Neither the names of the above-listed copyright holders nor the
12619 + * names of any contributors may be used to endorse or promote products
12620 + * derived from this software without specific prior written permission.
12623 + * ALTERNATIVELY, this software may be distributed under the terms of the
12624 + * GNU General Public License ("GPL") as published by the Free Software
12625 + * Foundation, either version 2 of that License or (at your option) any
12628 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
12629 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
12630 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
12631 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
12632 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
12633 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
12634 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
12635 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
12636 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
12637 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
12638 + * POSSIBILITY OF SUCH DAMAGE.
12641 +#include <linux/fsl/mc.h>
12642 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
12643 +#include "dpseci.h"
12644 +#include "dpseci_cmd.h"
12647 + * dpseci_open() - Open a control session for the specified object
12648 + * @mc_io: Pointer to MC portal's I/O object
12649 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12650 + * @dpseci_id: DPSECI unique ID
12651 + * @token: Returned token; use in subsequent API calls
12653 + * This function can be used to open a control session for an already created
12654 + * object; an object may have been declared in the DPL or by calling the
12655 + * dpseci_create() function.
12656 + * This function returns a unique authentication token, associated with the
12657 + * specific object ID and the specific MC portal; this token must be used in all
12658 + * subsequent commands for this specific object.
12660 + * Return: '0' on success, error code otherwise
12662 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
12665 + struct fsl_mc_command cmd = { 0 };
12666 + struct dpseci_cmd_open *cmd_params;
12669 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
12672 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
12673 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
12674 + err = mc_send_command(mc_io, &cmd);
12678 + *token = mc_cmd_hdr_read_token(&cmd);
12684 + * dpseci_close() - Close the control session of the object
12685 + * @mc_io: Pointer to MC portal's I/O object
12686 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12687 + * @token: Token of DPSECI object
12689 + * After this function is called, no further operations are allowed on the
12690 + * object without opening a new control session.
12692 + * Return: '0' on success, error code otherwise
12694 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12696 + struct fsl_mc_command cmd = { 0 };
12698 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
12701 + return mc_send_command(mc_io, &cmd);
12705 + * dpseci_create() - Create the DPSECI object
12706 + * @mc_io: Pointer to MC portal's I/O object
12707 + * @dprc_token: Parent container token; '0' for default container
12708 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12709 + * @cfg: Configuration structure
12710 + * @obj_id: returned object id
12712 + * Create the DPSECI object, allocate required resources and perform required
12713 + * initialization.
12715 + * The object can be created either by declaring it in the DPL file, or by
12716 + * calling this function.
12718 + * The function accepts an authentication token of a parent container that this
12719 + * object should be assigned to. The token can be '0' so the object will be
12720 + * assigned to the default container.
12721 + * The newly created object can be opened with the returned object id and using
12722 + * the container's associated tokens and MC portals.
12724 + * Return: '0' on success, error code otherwise
12726 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
12727 + const struct dpseci_cfg *cfg, u32 *obj_id)
12729 + struct fsl_mc_command cmd = { 0 };
12730 + struct dpseci_cmd_create *cmd_params;
12733 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
12736 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
12737 + for (i = 0; i < 8; i++)
12738 + cmd_params->priorities[i] = cfg->priorities[i];
12739 + for (i = 0; i < 8; i++)
12740 + cmd_params->priorities2[i] = cfg->priorities[8 + i];
12741 + cmd_params->num_tx_queues = cfg->num_tx_queues;
12742 + cmd_params->num_rx_queues = cfg->num_rx_queues;
12743 + cmd_params->options = cpu_to_le32(cfg->options);
12744 + err = mc_send_command(mc_io, &cmd);
12748 + *obj_id = mc_cmd_read_object_id(&cmd);
12754 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
12755 + * @mc_io: Pointer to MC portal's I/O object
12756 + * @dprc_token: Parent container token; '0' for default container
12757 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12758 + * @object_id: The object id; it must be a valid id within the container that
12759 + * created this object
12761 + * The function accepts the authentication token of the parent container that
12762 + * created the object (not the one that currently owns the object). The object
12763 + * is searched within parent using the provided 'object_id'.
12764 + * All tokens to the object must be closed before calling destroy.
12766 + * Return: '0' on success, error code otherwise
12768 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
12771 + struct fsl_mc_command cmd = { 0 };
12772 + struct dpseci_cmd_destroy *cmd_params;
12774 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
12777 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
12778 + cmd_params->object_id = cpu_to_le32(object_id);
12780 + return mc_send_command(mc_io, &cmd);
12784 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
12785 + * @mc_io: Pointer to MC portal's I/O object
12786 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12787 + * @token: Token of DPSECI object
12789 + * Return: '0' on success, error code otherwise
12791 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12793 + struct fsl_mc_command cmd = { 0 };
12795 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
12798 + return mc_send_command(mc_io, &cmd);
12802 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
12803 + * @mc_io: Pointer to MC portal's I/O object
12804 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12805 + * @token: Token of DPSECI object
12807 + * Return: '0' on success, error code otherwise
12809 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12811 + struct fsl_mc_command cmd = { 0 };
12813 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
12817 + return mc_send_command(mc_io, &cmd);
12821 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
12822 + * @mc_io: Pointer to MC portal's I/O object
12823 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12824 + * @token: Token of DPSECI object
12825 + * @en: Returns '1' if object is enabled; '0' otherwise
12827 + * Return: '0' on success, error code otherwise
12829 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12832 + struct fsl_mc_command cmd = { 0 };
12833 + struct dpseci_rsp_is_enabled *rsp_params;
12836 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
12839 + err = mc_send_command(mc_io, &cmd);
12843 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
12844 + *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
12850 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
12851 + * @mc_io: Pointer to MC portal's I/O object
12852 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12853 + * @token: Token of DPSECI object
12855 + * Return: '0' on success, error code otherwise
12857 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
12859 + struct fsl_mc_command cmd = { 0 };
12861 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
12865 + return mc_send_command(mc_io, &cmd);
12869 + * dpseci_get_irq_enable() - Get overall interrupt state
12870 + * @mc_io: Pointer to MC portal's I/O object
12871 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12872 + * @token: Token of DPSECI object
12873 + * @irq_index: The interrupt index to configure
12874 + * @en: Returned Interrupt state - enable = 1, disable = 0
12876 + * Return: '0' on success, error code otherwise
12878 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12879 + u8 irq_index, u8 *en)
12881 + struct fsl_mc_command cmd = { 0 };
12882 + struct dpseci_cmd_irq_enable *cmd_params;
12883 + struct dpseci_rsp_get_irq_enable *rsp_params;
12886 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
12889 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
12890 + cmd_params->irq_index = irq_index;
12891 + err = mc_send_command(mc_io, &cmd);
12895 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
12896 + *en = rsp_params->enable_state;
12902 + * dpseci_set_irq_enable() - Set overall interrupt state.
12903 + * @mc_io: Pointer to MC portal's I/O object
12904 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12905 + * @token: Token of DPSECI object
12906 + * @irq_index: The interrupt index to configure
12907 + * @en: Interrupt state - enable = 1, disable = 0
12909 + * Allows GPP software to control when interrupts are generated.
12910 + * Each interrupt can have up to 32 causes. The enable/disable control's the
12911 + * overall interrupt state. If the interrupt is disabled no causes will cause
12914 + * Return: '0' on success, error code otherwise
12916 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12917 + u8 irq_index, u8 en)
12919 + struct fsl_mc_command cmd = { 0 };
12920 + struct dpseci_cmd_irq_enable *cmd_params;
12922 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
12925 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
12926 + cmd_params->irq_index = irq_index;
12927 + cmd_params->enable_state = en;
12929 + return mc_send_command(mc_io, &cmd);
12933 + * dpseci_get_irq_mask() - Get interrupt mask.
12934 + * @mc_io: Pointer to MC portal's I/O object
12935 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12936 + * @token: Token of DPSECI object
12937 + * @irq_index: The interrupt index to configure
12938 + * @mask: Returned event mask to trigger interrupt
12940 + * Every interrupt can have up to 32 causes and the interrupt model supports
12941 + * masking/unmasking each cause independently.
12943 + * Return: '0' on success, error code otherwise
12945 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12946 + u8 irq_index, u32 *mask)
12948 + struct fsl_mc_command cmd = { 0 };
12949 + struct dpseci_cmd_irq_mask *cmd_params;
12952 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
12955 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
12956 + cmd_params->irq_index = irq_index;
12957 + err = mc_send_command(mc_io, &cmd);
12961 + *mask = le32_to_cpu(cmd_params->mask);
12967 + * dpseci_set_irq_mask() - Set interrupt mask.
12968 + * @mc_io: Pointer to MC portal's I/O object
12969 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
12970 + * @token: Token of DPSECI object
12971 + * @irq_index: The interrupt index to configure
12972 + * @mask: event mask to trigger interrupt;
12974 + * 0 = ignore event
12975 + * 1 = consider event for asserting IRQ
12977 + * Every interrupt can have up to 32 causes and the interrupt model supports
12978 + * masking/unmasking each cause independently
12980 + * Return: '0' on success, error code otherwise
12982 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
12983 + u8 irq_index, u32 mask)
12985 + struct fsl_mc_command cmd = { 0 };
12986 + struct dpseci_cmd_irq_mask *cmd_params;
12988 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
12991 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
12992 + cmd_params->mask = cpu_to_le32(mask);
12993 + cmd_params->irq_index = irq_index;
12995 + return mc_send_command(mc_io, &cmd);
12999 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
13000 + * @mc_io: Pointer to MC portal's I/O object
13001 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13002 + * @token: Token of DPSECI object
13003 + * @irq_index: The interrupt index to configure
13004 + * @status: Returned interrupts status - one bit per cause:
13005 + * 0 = no interrupt pending
13006 + * 1 = interrupt pending
13008 + * Return: '0' on success, error code otherwise
13010 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13011 + u8 irq_index, u32 *status)
13013 + struct fsl_mc_command cmd = { 0 };
13014 + struct dpseci_cmd_irq_status *cmd_params;
13017 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
13020 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
13021 + cmd_params->status = cpu_to_le32(*status);
13022 + cmd_params->irq_index = irq_index;
13023 + err = mc_send_command(mc_io, &cmd);
13027 + *status = le32_to_cpu(cmd_params->status);
13033 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
13034 + * @mc_io: Pointer to MC portal's I/O object
13035 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13036 + * @token: Token of DPSECI object
13037 + * @irq_index: The interrupt index to configure
13038 + * @status: bits to clear (W1C) - one bit per cause:
13039 + * 0 = don't change
13040 + * 1 = clear status bit
13042 + * Return: '0' on success, error code otherwise
13044 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13045 + u8 irq_index, u32 status)
13047 + struct fsl_mc_command cmd = { 0 };
13048 + struct dpseci_cmd_irq_status *cmd_params;
13050 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
13053 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
13054 + cmd_params->status = cpu_to_le32(status);
13055 + cmd_params->irq_index = irq_index;
13057 + return mc_send_command(mc_io, &cmd);
13061 + * dpseci_get_attributes() - Retrieve DPSECI attributes
13062 + * @mc_io: Pointer to MC portal's I/O object
13063 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13064 + * @token: Token of DPSECI object
13065 + * @attr: Returned object's attributes
13067 + * Return: '0' on success, error code otherwise
13069 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13070 + struct dpseci_attr *attr)
13072 + struct fsl_mc_command cmd = { 0 };
13073 + struct dpseci_rsp_get_attributes *rsp_params;
13076 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
13079 + err = mc_send_command(mc_io, &cmd);
13083 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
13084 + attr->id = le32_to_cpu(rsp_params->id);
13085 + attr->num_tx_queues = rsp_params->num_tx_queues;
13086 + attr->num_rx_queues = rsp_params->num_rx_queues;
13087 + attr->options = le32_to_cpu(rsp_params->options);
13093 + * dpseci_set_rx_queue() - Set Rx queue configuration
13094 + * @mc_io: Pointer to MC portal's I/O object
13095 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13096 + * @token: Token of DPSECI object
13097 + * @queue: Select the queue relative to number of priorities configured at
13098 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
13099 + * Rx queues identically.
13100 + * @cfg: Rx queue configuration
13102 + * Return: '0' on success, error code otherwise
13104 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13105 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
13107 + struct fsl_mc_command cmd = { 0 };
13108 + struct dpseci_cmd_queue *cmd_params;
13110 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
13113 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13114 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
13115 + cmd_params->priority = cfg->dest_cfg.priority;
13116 + cmd_params->queue = queue;
13117 + dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
13118 + cfg->dest_cfg.dest_type);
13119 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
13120 + cmd_params->options = cpu_to_le32(cfg->options);
13121 + dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
13122 + cfg->order_preservation_en);
13124 + return mc_send_command(mc_io, &cmd);
13128 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
13129 + * @mc_io: Pointer to MC portal's I/O object
13130 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13131 + * @token: Token of DPSECI object
13132 + * @queue: Select the queue relative to number of priorities configured at
13133 + * DPSECI creation
13134 + * @attr: Returned Rx queue attributes
13136 + * Return: '0' on success, error code otherwise
13138 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13139 + u8 queue, struct dpseci_rx_queue_attr *attr)
13141 + struct fsl_mc_command cmd = { 0 };
13142 + struct dpseci_cmd_queue *cmd_params;
13145 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
13148 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13149 + cmd_params->queue = queue;
13150 + err = mc_send_command(mc_io, &cmd);
13154 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
13155 + attr->dest_cfg.priority = cmd_params->priority;
13156 + attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
13158 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
13159 + attr->fqid = le32_to_cpu(cmd_params->fqid);
13160 + attr->order_preservation_en =
13161 + dpseci_get_field(cmd_params->order_preservation_en,
13162 + ORDER_PRESERVATION);
13168 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
13169 + * @mc_io: Pointer to MC portal's I/O object
13170 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13171 + * @token: Token of DPSECI object
13172 + * @queue: Select the queue relative to number of priorities configured at
13173 + * DPSECI creation
13174 + * @attr: Returned Tx queue attributes
13176 + * Return: '0' on success, error code otherwise
13178 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13179 + u8 queue, struct dpseci_tx_queue_attr *attr)
13181 + struct fsl_mc_command cmd = { 0 };
13182 + struct dpseci_cmd_queue *cmd_params;
13183 + struct dpseci_rsp_get_tx_queue *rsp_params;
13186 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
13189 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
13190 + cmd_params->queue = queue;
13191 + err = mc_send_command(mc_io, &cmd);
13195 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
13196 + attr->fqid = le32_to_cpu(rsp_params->fqid);
13197 + attr->priority = rsp_params->priority;
13203 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
13204 + * @mc_io: Pointer to MC portal's I/O object
13205 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13206 + * @token: Token of DPSECI object
13207 + * @attr: Returned SEC attributes
13209 + * Return: '0' on success, error code otherwise
13211 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13212 + struct dpseci_sec_attr *attr)
13214 + struct fsl_mc_command cmd = { 0 };
13215 + struct dpseci_rsp_get_sec_attr *rsp_params;
13218 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
13221 + err = mc_send_command(mc_io, &cmd);
13225 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
13226 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
13227 + attr->major_rev = rsp_params->major_rev;
13228 + attr->minor_rev = rsp_params->minor_rev;
13229 + attr->era = rsp_params->era;
13230 + attr->deco_num = rsp_params->deco_num;
13231 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
13232 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
13233 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
13234 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
13235 + attr->crc_acc_num = rsp_params->crc_acc_num;
13236 + attr->pk_acc_num = rsp_params->pk_acc_num;
13237 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
13238 + attr->rng_acc_num = rsp_params->rng_acc_num;
13239 + attr->md_acc_num = rsp_params->md_acc_num;
13240 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
13241 + attr->des_acc_num = rsp_params->des_acc_num;
13242 + attr->aes_acc_num = rsp_params->aes_acc_num;
13243 + attr->ccha_acc_num = rsp_params->ccha_acc_num;
13244 + attr->ptha_acc_num = rsp_params->ptha_acc_num;
13250 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
13251 + * @mc_io: Pointer to MC portal's I/O object
13252 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13253 + * @token: Token of DPSECI object
13254 + * @counters: Returned SEC counters
13256 + * Return: '0' on success, error code otherwise
13258 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13259 + struct dpseci_sec_counters *counters)
13261 + struct fsl_mc_command cmd = { 0 };
13262 + struct dpseci_rsp_get_sec_counters *rsp_params;
13265 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
13268 + err = mc_send_command(mc_io, &cmd);
13272 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
13273 + counters->dequeued_requests =
13274 + le64_to_cpu(rsp_params->dequeued_requests);
13275 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
13276 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
13277 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
13278 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
13279 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
13280 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
13286 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
13287 + * @mc_io: Pointer to MC portal's I/O object
13288 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13289 + * @major_ver: Major version of data path sec API
13290 + * @minor_ver: Minor version of data path sec API
13292 + * Return: '0' on success, error code otherwise
13294 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
13295 + u16 *major_ver, u16 *minor_ver)
13297 + struct fsl_mc_command cmd = { 0 };
13298 + struct dpseci_rsp_get_api_version *rsp_params;
13301 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
13303 + err = mc_send_command(mc_io, &cmd);
13307 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
13308 + *major_ver = le16_to_cpu(rsp_params->major);
13309 + *minor_ver = le16_to_cpu(rsp_params->minor);
13315 + * dpseci_set_opr() - Set Order Restoration configuration
13316 + * @mc_io: Pointer to MC portal's I/O object
13317 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13318 + * @token: Token of DPSECI object
13319 + * @index: The queue index
13320 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
13322 + * @cfg: Configuration options for the OPR
13324 + * Return: '0' on success, error code otherwise
13326 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13327 + u8 options, struct opr_cfg *cfg)
13329 + struct fsl_mc_command cmd = { 0 };
13330 + struct dpseci_cmd_opr *cmd_params;
13332 + cmd.header = mc_encode_cmd_header(
13333 + DPSECI_CMDID_SET_OPR,
13336 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
13337 + cmd_params->index = index;
13338 + cmd_params->options = options;
13339 + cmd_params->oloe = cfg->oloe;
13340 + cmd_params->oeane = cfg->oeane;
13341 + cmd_params->olws = cfg->olws;
13342 + cmd_params->oa = cfg->oa;
13343 + cmd_params->oprrws = cfg->oprrws;
13345 + return mc_send_command(mc_io, &cmd);
13349 + * dpseci_get_opr() - Retrieve Order Restoration config and query
13350 + * @mc_io: Pointer to MC portal's I/O object
13351 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13352 + * @token: Token of DPSECI object
13353 + * @index: The queue index
13354 + * @cfg: Returned OPR configuration
13355 + * @qry: Returned OPR query
13357 + * Return: '0' on success, error code otherwise
13359 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13360 + struct opr_cfg *cfg, struct opr_qry *qry)
13362 + struct fsl_mc_command cmd = { 0 };
13363 + struct dpseci_cmd_opr *cmd_params;
13364 + struct dpseci_rsp_get_opr *rsp_params;
13367 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
13370 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
13371 + cmd_params->index = index;
13372 + err = mc_send_command(mc_io, &cmd);
13376 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
13377 + qry->rip = dpseci_get_field(rsp_params->flags, OPR_RIP);
13378 + qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
13379 + cfg->oloe = rsp_params->oloe;
13380 + cfg->oeane = rsp_params->oeane;
13381 + cfg->olws = rsp_params->olws;
13382 + cfg->oa = rsp_params->oa;
13383 + cfg->oprrws = rsp_params->oprrws;
13384 + qry->nesn = le16_to_cpu(rsp_params->nesn);
13385 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
13386 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
13387 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
13388 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
13389 + qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, OPR_HSEQ_NLIS);
13390 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
13391 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
13392 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
13393 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
13399 + * dpseci_set_congestion_notification() - Set congestion group
13400 + * notification configuration
13401 + * @mc_io: Pointer to MC portal's I/O object
13402 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13403 + * @token: Token of DPSECI object
13404 + * @cfg: congestion notification configuration
13406 + * Return: '0' on success, error code otherwise
13408 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13409 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
13411 + struct fsl_mc_command cmd = { 0 };
13412 + struct dpseci_cmd_congestion_notification *cmd_params;
13414 + cmd.header = mc_encode_cmd_header(
13415 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
13418 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
13419 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
13420 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
13421 + cmd_params->priority = cfg->dest_cfg.priority;
13422 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
13423 + cfg->dest_cfg.dest_type);
13424 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
13425 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
13426 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
13427 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
13428 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
13430 + return mc_send_command(mc_io, &cmd);
13434 + * dpseci_get_congestion_notification() - Get congestion group notification
13436 + * @mc_io: Pointer to MC portal's I/O object
13437 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
13438 + * @token: Token of DPSECI object
13439 + * @cfg: congestion notification configuration
13441 + * Return: '0' on success, error code otherwise
13443 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13444 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
13446 + struct fsl_mc_command cmd = { 0 };
13447 + struct dpseci_cmd_congestion_notification *rsp_params;
13450 + cmd.header = mc_encode_cmd_header(
13451 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
13454 + err = mc_send_command(mc_io, &cmd);
13458 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
13459 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
13460 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
13461 + cfg->dest_cfg.priority = rsp_params->priority;
13462 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
13464 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
13465 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
13466 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
13467 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
13468 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
13473 +++ b/drivers/crypto/caam/dpseci.h
13476 + * Copyright 2013-2016 Freescale Semiconductor Inc.
13477 + * Copyright 2017 NXP
13479 + * Redistribution and use in source and binary forms, with or without
13480 + * modification, are permitted provided that the following conditions are met:
13481 + * * Redistributions of source code must retain the above copyright
13482 + * notice, this list of conditions and the following disclaimer.
13483 + * * Redistributions in binary form must reproduce the above copyright
13484 + * notice, this list of conditions and the following disclaimer in the
13485 + * documentation and/or other materials provided with the distribution.
13486 + * * Neither the names of the above-listed copyright holders nor the
13487 + * names of any contributors may be used to endorse or promote products
13488 + * derived from this software without specific prior written permission.
13491 + * ALTERNATIVELY, this software may be distributed under the terms of the
13492 + * GNU General Public License ("GPL") as published by the Free Software
13493 + * Foundation, either version 2 of that License or (at your option) any
13496 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13497 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13498 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13499 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13500 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13501 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13502 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13503 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13504 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13505 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13506 + * POSSIBILITY OF SUCH DAMAGE.
13508 +#ifndef _DPSECI_H_
13509 +#define _DPSECI_H_
13512 + * Data Path SEC Interface API
13513 + * Contains initialization APIs and runtime control APIs for DPSECI
13521 + * General DPSECI macros
13525 + * Maximum number of Tx/Rx queues per DPSECI object
13527 +#define DPSECI_MAX_QUEUE_NUM 16
13530 + * All queues considered; see dpseci_set_rx_queue()
13532 +#define DPSECI_ALL_QUEUES (u8)(-1)
13534 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
13537 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13540 + * Enable the Congestion Group support
13542 +#define DPSECI_OPT_HAS_CG 0x000020
13545 + * Enable the Order Restoration support
13547 +#define DPSECI_OPT_HAS_OPR 0x000040
13550 + * Order Point Records are shared for the entire DPSECI
13552 +#define DPSECI_OPT_OPR_SHARED 0x000080
13555 + * struct dpseci_cfg - Structure representing DPSECI configuration
13556 + * @options: Any combination of the following options:
13557 + * DPSECI_OPT_HAS_CG
13558 + * DPSECI_OPT_HAS_OPR
13559 + * DPSECI_OPT_OPR_SHARED
13560 + * @num_tx_queues: num of queues towards the SEC
13561 + * @num_rx_queues: num of queues back from the SEC
13562 + * @priorities: Priorities for the SEC hardware processing;
13563 + * each place in the array is the priority of the tx queue
13564 + * towards the SEC;
13565 + * valid priorities are configured with values 1-8;
13567 +struct dpseci_cfg {
13569 + u8 num_tx_queues;
13570 + u8 num_rx_queues;
13571 + u8 priorities[DPSECI_MAX_QUEUE_NUM];
13574 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
13575 + const struct dpseci_cfg *cfg, u32 *obj_id);
13577 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
13580 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13582 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13584 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13587 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
13589 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13590 + u8 irq_index, u8 *en);
13592 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13593 + u8 irq_index, u8 en);
13595 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13596 + u8 irq_index, u32 *mask);
13598 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13599 + u8 irq_index, u32 mask);
13601 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13602 + u8 irq_index, u32 *status);
13604 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13605 + u8 irq_index, u32 status);
13608 + * struct dpseci_attr - Structure representing DPSECI attributes
13609 + * @id: DPSECI object ID
13610 + * @num_tx_queues: number of queues towards the SEC
13611 + * @num_rx_queues: number of queues back from the SEC
13612 + * @options: any combination of the following options:
13613 + * DPSECI_OPT_HAS_CG
13614 + * DPSECI_OPT_HAS_OPR
13615 + * DPSECI_OPT_OPR_SHARED
13617 +struct dpseci_attr {
13619 + u8 num_tx_queues;
13620 + u8 num_rx_queues;
13624 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13625 + struct dpseci_attr *attr);
13628 + * enum dpseci_dest - DPSECI destination types
13629 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
13630 + * and does not generate FQDAN notifications; user is expected to dequeue
13631 + * from the queue based on polling or other user-defined method
13632 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
13633 + * notifications to the specified DPIO; user is expected to dequeue from
13634 + * the queue only after notification is received
13635 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
13636 + * FQDAN notifications, but is connected to the specified DPCON object;
13637 + * user is expected to dequeue from the DPCON channel
13639 +enum dpseci_dest {
13640 + DPSECI_DEST_NONE = 0,
13641 + DPSECI_DEST_DPIO,
13642 + DPSECI_DEST_DPCON
13646 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
13647 + * @dest_type: Destination type
13648 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
13649 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
13650 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
13651 + * not relevant for 'DPSECI_DEST_NONE' option
13653 +struct dpseci_dest_cfg {
13654 + enum dpseci_dest dest_type;
13660 + * DPSECI queue modification options
13664 + * Select to modify the user's context associated with the queue
13666 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
13669 + * Select to modify the queue's destination
13671 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
13674 + * Select to modify the queue's order preservation
13676 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
13679 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
13680 + * @options: Flags representing the suggested modifications to the queue;
13681 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
13682 + * @order_preservation_en: order preservation configuration for the rx queue
13683 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
13684 + * @user_ctx: User context value provided in the frame descriptor of each
13685 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
13687 + * @dest_cfg: Queue destination parameters; valid only if
13688 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
13690 +struct dpseci_rx_queue_cfg {
13692 + int order_preservation_en;
13694 + struct dpseci_dest_cfg dest_cfg;
13697 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13698 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
13701 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
13702 + * @user_ctx: User context value provided in the frame descriptor of each
13704 + * @order_preservation_en: Status of the order preservation configuration on the
13706 + * @dest_cfg: Queue destination configuration
13707 + * @fqid: Virtual FQID value to be used for dequeue operations
13709 +struct dpseci_rx_queue_attr {
13711 + int order_preservation_en;
13712 + struct dpseci_dest_cfg dest_cfg;
13716 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13717 + u8 queue, struct dpseci_rx_queue_attr *attr);
13720 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
13721 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
13722 + * @priority: SEC hardware processing priority for the queue
13724 +struct dpseci_tx_queue_attr {
13729 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13730 + u8 queue, struct dpseci_tx_queue_attr *attr);
13733 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
13734 + * hardware accelerator
13735 + * @ip_id: ID for SEC
13736 + * @major_rev: Major revision number for SEC
13737 + * @minor_rev: Minor revision number for SEC
13739 + * @deco_num: The number of copies of the DECO that are implemented in this
13741 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
13743 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
13745 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
13746 + * implemented in this version of SEC
13747 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
13748 + * implemented in this version of SEC
13749 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
13750 + * this version of SEC
13751 + * @pk_acc_num: The number of copies of the Public Key module that are
13752 + * implemented in this version of SEC
13753 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
13754 + * implemented in this version of SEC
13755 + * @rng_acc_num: The number of copies of the Random Number Generator that are
13756 + * implemented in this version of SEC
13757 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
13758 + * implemented in this version of SEC
13759 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
13760 + * in this version of SEC
13761 + * @des_acc_num: The number of copies of the DES module that are implemented in
13762 + * this version of SEC
13763 + * @aes_acc_num: The number of copies of the AES module that are implemented in
13764 + * this version of SEC
13765 + * @ccha_acc_num: The number of copies of the ChaCha20 module that are
13766 + * implemented in this version of SEC.
13767 + * @ptha_acc_num: The number of copies of the Poly1305 module that are
13768 + * implemented in this version of SEC.
13770 +struct dpseci_sec_attr {
13776 + u8 zuc_auth_acc_num;
13777 + u8 zuc_enc_acc_num;
13778 + u8 snow_f8_acc_num;
13779 + u8 snow_f9_acc_num;
13782 + u8 kasumi_acc_num;
13792 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13793 + struct dpseci_sec_attr *attr);
13796 + * struct dpseci_sec_counters - Structure representing global SEC counters and
13797 + * not per dpseci counters
13798 + * @dequeued_requests: Number of Requests Dequeued
13799 + * @ob_enc_requests: Number of Outbound Encrypt Requests
13800 + * @ib_dec_requests: Number of Inbound Decrypt Requests
13801 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
13802 + * @ob_prot_bytes: Number of Outbound Bytes Protected
13803 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
13804 + * @ib_valid_bytes: Number of Inbound Bytes Validated
13806 +struct dpseci_sec_counters {
13807 + u64 dequeued_requests;
13808 + u64 ob_enc_requests;
13809 + u64 ib_dec_requests;
13810 + u64 ob_enc_bytes;
13811 + u64 ob_prot_bytes;
13812 + u64 ib_dec_bytes;
13813 + u64 ib_valid_bytes;
13816 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
13817 + struct dpseci_sec_counters *counters);
13819 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
13820 + u16 *major_ver, u16 *minor_ver);
13822 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13823 + u8 options, struct opr_cfg *cfg);
13825 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
13826 + struct opr_cfg *cfg, struct opr_qry *qry);
13829 + * enum dpseci_congestion_unit - DPSECI congestion units
13830 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
13831 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
13833 +enum dpseci_congestion_unit {
13834 + DPSECI_CONGESTION_UNIT_BYTES = 0,
13835 + DPSECI_CONGESTION_UNIT_FRAMES
13839 + * CSCN message is written to message_iova once entering a
13840 + * congestion state (see 'threshold_entry')
13842 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
13845 + * CSCN message is written to message_iova once exiting a
13846 + * congestion state (see 'threshold_exit')
13848 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
13851 + * CSCN write will attempt to allocate into a cache (coherent write);
13852 + * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
13854 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
13857 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
13858 + * DPIO/DPCON's WQ channel once entering a congestion state
13859 + * (see 'threshold_entry')
13861 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
13864 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
13865 + * DPIO/DPCON's WQ channel once exiting a congestion state
13866 + * (see 'threshold_exit')
13868 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
13871 + * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
13872 + * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
13875 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
13878 + * struct dpseci_congestion_notification_cfg - congestion notification
13880 + * @units: units type
13881 + * @threshold_entry: above this threshold we enter a congestion state.
13882 + * set it to '0' to disable it
13883 + * @threshold_exit: below this threshold we exit the congestion state.
13884 + * @message_ctx: The context that will be part of the CSCN message
13885 + * @message_iova: I/O virtual address (must be in DMA-able memory),
13886 + * must be 16B aligned;
13887 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
13888 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
13891 +struct dpseci_congestion_notification_cfg {
13892 + enum dpseci_congestion_unit units;
13893 + u32 threshold_entry;
13894 + u32 threshold_exit;
13896 + u64 message_iova;
13897 + struct dpseci_dest_cfg dest_cfg;
13898 + u16 notification_mode;
13901 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13902 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
13904 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
13905 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
13907 +#endif /* _DPSECI_H_ */
13909 +++ b/drivers/crypto/caam/dpseci_cmd.h
13912 + * Copyright 2013-2016 Freescale Semiconductor Inc.
13913 + * Copyright 2017 NXP
13915 + * Redistribution and use in source and binary forms, with or without
13916 + * modification, are permitted provided that the following conditions are met:
13917 + * * Redistributions of source code must retain the above copyright
13918 + * notice, this list of conditions and the following disclaimer.
13919 + * * Redistributions in binary form must reproduce the above copyright
13920 + * notice, this list of conditions and the following disclaimer in the
13921 + * documentation and/or other materials provided with the distribution.
13922 + * * Neither the names of the above-listed copyright holders nor the
13923 + * names of any contributors may be used to endorse or promote products
13924 + * derived from this software without specific prior written permission.
13927 + * ALTERNATIVELY, this software may be distributed under the terms of the
13928 + * GNU General Public License ("GPL") as published by the Free Software
13929 + * Foundation, either version 2 of that License or (at your option) any
13932 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
13933 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
13934 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
13935 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
13936 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13937 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13938 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
13939 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
13940 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
13941 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
13942 + * POSSIBILITY OF SUCH DAMAGE.
13945 +#ifndef _DPSECI_CMD_H_
13946 +#define _DPSECI_CMD_H_
13948 +/* DPSECI Version */
13949 +#define DPSECI_VER_MAJOR 5
13950 +#define DPSECI_VER_MINOR 3
13952 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
13953 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
13955 +/* Command versioning */
13956 +#define DPSECI_CMD_BASE_VERSION 1
13957 +#define DPSECI_CMD_BASE_VERSION_V2 2
13958 +#define DPSECI_CMD_BASE_VERSION_V3 3
13959 +#define DPSECI_CMD_ID_OFFSET 4
13961 +#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13962 + DPSECI_CMD_BASE_VERSION)
13964 +#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13965 + DPSECI_CMD_BASE_VERSION_V2)
13967 +#define DPSECI_CMD_V3(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
13968 + DPSECI_CMD_BASE_VERSION_V3)
13971 +#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
13972 +#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
13973 +#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
13974 +#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
13975 +#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
13977 +#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
13978 +#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
13979 +#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
13980 +#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
13981 +#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
13983 +#define DPSECI_CMDID_SET_IRQ_ENABLE DPSECI_CMD_V1(0x012)
13984 +#define DPSECI_CMDID_GET_IRQ_ENABLE DPSECI_CMD_V1(0x013)
13985 +#define DPSECI_CMDID_SET_IRQ_MASK DPSECI_CMD_V1(0x014)
13986 +#define DPSECI_CMDID_GET_IRQ_MASK DPSECI_CMD_V1(0x015)
13987 +#define DPSECI_CMDID_GET_IRQ_STATUS DPSECI_CMD_V1(0x016)
13988 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS DPSECI_CMD_V1(0x017)
13990 +#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
13991 +#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
13992 +#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
13993 +#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
13994 +#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
13995 +#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
13996 +#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
13997 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
13998 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
14000 +/* Macros for accessing command fields smaller than 1 byte */
14001 +#define DPSECI_MASK(field) \
14002 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
14003 + DPSECI_##field##_SHIFT)
14005 +#define dpseci_set_field(var, field, val) \
14006 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
14008 +#define dpseci_get_field(var, field) \
14009 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
14011 +struct dpseci_cmd_open {
14012 + __le32 dpseci_id;
14015 +struct dpseci_cmd_create {
14016 + u8 priorities[8];
14017 + u8 num_tx_queues;
14018 + u8 num_rx_queues;
14022 + u8 priorities2[8];
14025 +struct dpseci_cmd_destroy {
14026 + __le32 object_id;
14029 +#define DPSECI_ENABLE_SHIFT 0
14030 +#define DPSECI_ENABLE_SIZE 1
14032 +struct dpseci_rsp_is_enabled {
14036 +struct dpseci_cmd_irq_enable {
14042 +struct dpseci_rsp_get_irq_enable {
14046 +struct dpseci_cmd_irq_mask {
14051 +struct dpseci_cmd_irq_status {
14056 +struct dpseci_rsp_get_attributes {
14059 + u8 num_tx_queues;
14060 + u8 num_rx_queues;
14065 +#define DPSECI_DEST_TYPE_SHIFT 0
14066 +#define DPSECI_DEST_TYPE_SIZE 4
14068 +#define DPSECI_ORDER_PRESERVATION_SHIFT 0
14069 +#define DPSECI_ORDER_PRESERVATION_SIZE 1
14071 +struct dpseci_cmd_queue {
14082 + u8 order_preservation_en;
14085 +struct dpseci_rsp_get_tx_queue {
14091 +struct dpseci_rsp_get_sec_attr {
14098 + u8 zuc_auth_acc_num;
14099 + u8 zuc_enc_acc_num;
14101 + u8 snow_f8_acc_num;
14102 + u8 snow_f9_acc_num;
14106 + u8 kasumi_acc_num;
14117 +struct dpseci_rsp_get_sec_counters {
14118 + __le64 dequeued_requests;
14119 + __le64 ob_enc_requests;
14120 + __le64 ib_dec_requests;
14121 + __le64 ob_enc_bytes;
14122 + __le64 ob_prot_bytes;
14123 + __le64 ib_dec_bytes;
14124 + __le64 ib_valid_bytes;
14127 +struct dpseci_rsp_get_api_version {
14132 +struct dpseci_cmd_opr {
14144 +#define DPSECI_OPR_RIP_SHIFT 0
14145 +#define DPSECI_OPR_RIP_SIZE 1
14146 +#define DPSECI_OPR_ENABLE_SHIFT 1
14147 +#define DPSECI_OPR_ENABLE_SIZE 1
14148 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 0
14149 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
14150 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 0
14151 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
14153 +struct dpseci_rsp_get_opr {
14181 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
14182 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
14183 +#define DPSECI_CGN_UNITS_SHIFT 4
14184 +#define DPSECI_CGN_UNITS_SIZE 2
14186 +struct dpseci_cmd_congestion_notification {
14188 + __le16 notification_mode;
14191 + __le64 message_iova;
14192 + __le64 message_ctx;
14193 + __le32 threshold_entry;
14194 + __le32 threshold_exit;
14197 +#endif /* _DPSECI_CMD_H_ */
14198 --- a/drivers/crypto/caam/error.c
14199 +++ b/drivers/crypto/caam/error.c
14200 @@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, con
14202 EXPORT_SYMBOL(caam_dump_sg);
14204 +bool caam_little_end;
14205 +EXPORT_SYMBOL(caam_little_end);
14208 +EXPORT_SYMBOL(caam_imx);
14210 static const struct {
14212 const char *error_text;
14213 @@ -108,6 +114,54 @@ static const struct {
14214 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
14217 +static const struct {
14219 + const char *error_text;
14220 +} qi_error_list[] = {
14221 + { 0x1F, "Job terminated by FQ or ICID flush" },
14222 + { 0x20, "FD format error"},
14223 + { 0x21, "FD command format error"},
14224 + { 0x23, "FL format error"},
14225 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
14226 + { 0x30, "Max. buffer size too small"},
14227 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
14228 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
14229 + { 0x33, "Size over/underflow (allocate mode)"},
14230 + { 0x34, "Size over/underflow (reuse mode)"},
14231 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
14232 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
14233 + { 0x41, "SBC frame format not supported (allocate mode)"},
14234 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
14235 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
14236 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
14237 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
14238 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
14239 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
14240 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
14241 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
14242 + { 0x51, "Unsupported IF reuse mode"},
14243 + { 0x52, "Unsupported FL use mode"},
14244 + { 0x53, "Unsupported RJD use mode"},
14245 + { 0x54, "Unsupported inline descriptor use mode"},
14246 + { 0xC0, "Table buffer pool 0 depletion"},
14247 + { 0xC1, "Table buffer pool 1 depletion"},
14248 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
14249 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
14250 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
14251 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
14252 + { 0xD0, "FLC read error"},
14253 + { 0xD1, "FL read error"},
14254 + { 0xD2, "FL write error"},
14255 + { 0xD3, "OF SGT write error"},
14256 + { 0xD4, "PTA read error"},
14257 + { 0xD5, "PTA write error"},
14258 + { 0xD6, "OF SGT F-bit write error"},
14259 + { 0xD7, "ASA write error"},
14260 + { 0xE1, "FLC[ICR]=0 ICID error"},
14261 + { 0xE2, "FLC[ICR]=1 ICID error"},
14262 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
14265 static const char * const cha_id_list[] = {
14268 @@ -236,6 +290,27 @@ static void report_deco_status(struct de
14269 status, error, idx_str, idx, err_str, err_err_code);
14272 +static void report_qi_status(struct device *qidev, const u32 status,
14273 + const char *error)
14275 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
14276 + const char *err_str = "unidentified error value 0x";
14277 + char err_err_code[3] = { 0 };
14280 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
14281 + if (qi_error_list[i].value == err_id)
14284 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
14285 + err_str = qi_error_list[i].error_text;
14287 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
14289 + dev_err(qidev, "%08x: %s: %s%s\n",
14290 + status, error, err_str, err_err_code);
14293 static void report_jr_status(struct device *jrdev, const u32 status,
14296 @@ -250,7 +325,7 @@ static void report_cond_code_status(stru
14297 status, error, __func__);
14300 -void caam_jr_strstatus(struct device *jrdev, u32 status)
14301 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
14303 static const struct stat_src {
14304 void (*report_ssed)(struct device *jrdev, const u32 status,
14305 @@ -262,7 +337,7 @@ void caam_jr_strstatus(struct device *jr
14306 { report_ccb_status, "CCB" },
14307 { report_jump_status, "Jump" },
14308 { report_deco_status, "DECO" },
14309 - { NULL, "Queue Manager Interface" },
14310 + { report_qi_status, "Queue Manager Interface" },
14311 { report_jr_status, "Job Ring" },
14312 { report_cond_code_status, "Condition Code" },
14314 @@ -288,4 +363,4 @@ void caam_jr_strstatus(struct device *jr
14316 dev_err(jrdev, "%d: unknown error source\n", ssrc);
14318 -EXPORT_SYMBOL(caam_jr_strstatus);
14319 +EXPORT_SYMBOL(caam_strstatus);
14320 --- a/drivers/crypto/caam/error.h
14321 +++ b/drivers/crypto/caam/error.h
14323 #ifndef CAAM_ERROR_H
14324 #define CAAM_ERROR_H
14325 #define CAAM_ERROR_STR_MAX 302
14326 -void caam_jr_strstatus(struct device *jrdev, u32 status);
14328 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
14330 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
14331 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
14333 void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
14334 int rowsize, int groupsize, struct scatterlist *sg,
14335 --- a/drivers/crypto/caam/intern.h
14336 +++ b/drivers/crypto/caam/intern.h
14337 @@ -65,10 +65,6 @@ struct caam_drv_private_jr {
14338 * Driver-private storage for a single CAAM block instance
14340 struct caam_drv_private {
14341 -#ifdef CONFIG_CAAM_QI
14342 - struct device *qidev;
14345 /* Physical-presence section */
14346 struct caam_ctrl __iomem *ctrl; /* controller region */
14347 struct caam_deco __iomem *deco; /* DECO/CCB views */
14348 @@ -76,14 +72,21 @@ struct caam_drv_private {
14349 struct caam_queue_if __iomem *qi; /* QI control region */
14350 struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
14352 + struct iommu_domain *domain;
14355 * Detected geometry block. Filled in from device tree if powerpc,
14356 * or from register-based version detection code
14358 u8 total_jobrs; /* Total Job Rings in device */
14359 u8 qi_present; /* Nonzero if QI present in device */
14360 +#ifdef CONFIG_CAAM_QI
14361 + u8 qi_init; /* Nonzero if QI has been initialized */
14363 + u8 mc_en; /* Nonzero if MC f/w is active */
14364 int secvio_irq; /* Security violation interrupt number */
14365 int virt_en; /* Virtualization enabled in CAAM */
14366 + int era; /* CAAM Era (internal HW revision) */
14368 #define RNG4_MAX_HANDLES 2
14370 @@ -108,8 +111,95 @@ struct caam_drv_private {
14374 -void caam_jr_algapi_init(struct device *dev);
14375 -void caam_jr_algapi_remove(struct device *dev);
14376 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
14378 +int caam_algapi_init(struct device *dev);
14379 +void caam_algapi_exit(void);
14383 +static inline int caam_algapi_init(struct device *dev)
14388 +static inline void caam_algapi_exit(void)
14392 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
14394 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
14396 +int caam_algapi_hash_init(struct device *dev);
14397 +void caam_algapi_hash_exit(void);
14401 +static inline int caam_algapi_hash_init(struct device *dev)
14406 +static inline void caam_algapi_hash_exit(void)
14410 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
14412 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
14414 +int caam_pkc_init(struct device *dev);
14415 +void caam_pkc_exit(void);
14419 +static inline int caam_pkc_init(struct device *dev)
14424 +static inline void caam_pkc_exit(void)
14428 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
14430 +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
14432 +int caam_rng_init(struct device *dev);
14433 +void caam_rng_exit(void);
14437 +static inline int caam_rng_init(struct device *dev)
14442 +static inline void caam_rng_exit(void)
14446 +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
14448 +#ifdef CONFIG_CAAM_QI
14450 +int caam_qi_algapi_init(struct device *dev);
14451 +void caam_qi_algapi_exit(void);
14455 +static inline int caam_qi_algapi_init(struct device *dev)
14460 +static inline void caam_qi_algapi_exit(void)
14464 +#endif /* CONFIG_CAAM_QI */
14466 #ifdef CONFIG_DEBUG_FS
14467 static int caam_debugfs_u64_get(void *data, u64 *val)
14468 --- a/drivers/crypto/caam/jr.c
14469 +++ b/drivers/crypto/caam/jr.c
14470 @@ -23,6 +23,52 @@ struct jr_driver_data {
14472 static struct jr_driver_data driver_data;
14474 +static int jr_driver_probed;
14476 +int caam_jr_driver_probed(void)
14478 + return jr_driver_probed;
14480 +EXPORT_SYMBOL(caam_jr_driver_probed);
14482 +static DEFINE_MUTEX(algs_lock);
14483 +static unsigned int active_devs;
14485 +static void register_algs(struct device *dev)
14487 + mutex_lock(&algs_lock);
14489 + if (++active_devs != 1)
14490 + goto algs_unlock;
14492 + caam_algapi_init(dev);
14493 + caam_algapi_hash_init(dev);
14494 + caam_pkc_init(dev);
14495 + caam_rng_init(dev);
14496 + caam_qi_algapi_init(dev);
14499 + mutex_unlock(&algs_lock);
14502 +static void unregister_algs(void)
14504 + mutex_lock(&algs_lock);
14506 + if (--active_devs != 0)
14507 + goto algs_unlock;
14509 + caam_qi_algapi_exit();
14513 + caam_algapi_hash_exit();
14514 + caam_algapi_exit();
14517 + mutex_unlock(&algs_lock);
14520 static int caam_reset_hw_jr(struct device *dev)
14522 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
14523 @@ -108,6 +154,9 @@ static int caam_jr_remove(struct platfor
14527 + /* Unregister JR-based RNG & crypto algorithms */
14528 + unregister_algs();
14530 /* Remove the node from Physical JobR list maintained by driver */
14531 spin_lock(&driver_data.jr_alloc_lock);
14532 list_del(&jrpriv->list_node);
14533 @@ -119,6 +168,8 @@ static int caam_jr_remove(struct platfor
14534 dev_err(jrdev, "Failed to shut down job ring\n");
14535 irq_dispose_mapping(jrpriv->irq);
14537 + jr_driver_probed--;
14542 @@ -282,6 +333,36 @@ struct device *caam_jr_alloc(void)
14543 EXPORT_SYMBOL(caam_jr_alloc);
14546 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
14548 + * returns : pointer to the newly allocated physical
14549 + * JobR dev can be written to if successful.
14551 +struct device *caam_jridx_alloc(int idx)
14553 + struct caam_drv_private_jr *jrpriv;
14554 + struct device *dev = ERR_PTR(-ENODEV);
14556 + spin_lock(&driver_data.jr_alloc_lock);
14558 + if (list_empty(&driver_data.jr_list))
14561 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
14562 + if (jrpriv->ridx == idx) {
14563 + atomic_inc(&jrpriv->tfm_count);
14564 + dev = jrpriv->dev;
14570 + spin_unlock(&driver_data.jr_alloc_lock);
14573 +EXPORT_SYMBOL(caam_jridx_alloc);
14576 * caam_jr_free() - Free the Job Ring
14577 * @rdev - points to the dev that identifies the Job ring to
14579 @@ -539,6 +620,9 @@ static int caam_jr_probe(struct platform
14581 atomic_set(&jrpriv->tfm_count, 0);
14583 + register_algs(jrdev->parent);
14584 + jr_driver_probed++;
14589 --- a/drivers/crypto/caam/jr.h
14590 +++ b/drivers/crypto/caam/jr.h
14594 /* Prototypes for backend-level services exposed to APIs */
14595 +int caam_jr_driver_probed(void);
14596 struct device *caam_jr_alloc(void);
14597 +struct device *caam_jridx_alloc(int idx);
14598 void caam_jr_free(struct device *rdev);
14599 int caam_jr_enqueue(struct device *dev, u32 *desc,
14600 void (*cbk)(struct device *dev, u32 *desc, u32 status,
14601 --- a/drivers/crypto/caam/key_gen.c
14602 +++ b/drivers/crypto/caam/key_gen.c
14604 #include "desc_constr.h"
14605 #include "key_gen.h"
14608 - * split_key_len - Compute MDHA split key length for a given algorithm
14609 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14610 - * SHA224, SHA384, SHA512.
14612 - * Return: MDHA split key length
14614 -static inline u32 split_key_len(u32 hash)
14616 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
14617 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
14620 - idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
14622 - return (u32)(mdpadlen[idx] * 2);
14626 - * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
14627 - * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14628 - * SHA224, SHA384, SHA512.
14630 - * Return: MDHA split key pad length
14632 -static inline u32 split_key_pad_len(u32 hash)
14634 - return ALIGN(split_key_len(hash), 16);
14637 void split_key_done(struct device *dev, u32 *desc, u32 err,
14640 --- a/drivers/crypto/caam/key_gen.h
14641 +++ b/drivers/crypto/caam/key_gen.h
14647 + * split_key_len - Compute MDHA split key length for a given algorithm
14648 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14649 + * SHA224, SHA384, SHA512.
14651 + * Return: MDHA split key length
14653 +static inline u32 split_key_len(u32 hash)
14655 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
14656 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
14659 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
14661 + return (u32)(mdpadlen[idx] * 2);
14665 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
14666 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
14667 + * SHA224, SHA384, SHA512.
14669 + * Return: MDHA split key pad length
14671 +static inline u32 split_key_pad_len(u32 hash)
14673 + return ALIGN(split_key_len(hash), 16);
14676 struct split_key_result {
14677 struct completion completion;
14679 --- a/drivers/crypto/caam/qi.c
14680 +++ b/drivers/crypto/caam/qi.c
14683 #include <linux/cpumask.h>
14684 #include <linux/kthread.h>
14685 -#include <soc/fsl/qman.h>
14686 +#include <linux/fsl_qman.h>
14690 @@ -58,11 +58,9 @@ static DEFINE_PER_CPU(int, last_cpu);
14692 * caam_qi_priv - CAAM QI backend private params
14693 * @cgr: QMan congestion group
14694 - * @qi_pdev: platform device for QI backend
14696 struct caam_qi_priv {
14697 struct qman_cgr cgr;
14698 - struct platform_device *qi_pdev;
14701 static struct caam_qi_priv qipriv ____cacheline_aligned;
14702 @@ -102,26 +100,34 @@ static int mod_init_cpu;
14704 static struct kmem_cache *qi_cache;
14706 +static void *caam_iova_to_virt(struct iommu_domain *domain,
14707 + dma_addr_t iova_addr)
14709 + phys_addr_t phys_addr;
14711 + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
14713 + return phys_to_virt(phys_addr);
14716 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
14721 int num_retries = 0;
14723 - qm_fd_clear_fd(&fd);
14724 - qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
14726 - addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
14728 + fd.format = qm_fd_compound;
14729 + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
14730 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
14731 DMA_BIDIRECTIONAL);
14732 - if (dma_mapping_error(qidev, addr)) {
14733 + if (dma_mapping_error(qidev, fd.addr)) {
14734 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
14737 - qm_fd_addr_set64(&fd, addr);
14740 - ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
14741 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
14745 @@ -137,20 +143,21 @@ int caam_qi_enqueue(struct device *qidev
14746 EXPORT_SYMBOL(caam_qi_enqueue);
14748 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
14749 - const union qm_mr_entry *msg)
14750 + const struct qm_mr_entry *msg)
14752 const struct qm_fd *fd;
14753 struct caam_drv_req *drv_req;
14754 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
14755 + struct caam_drv_private *priv = dev_get_drvdata(qidev);
14759 - if (qm_fd_get_format(fd) != qm_fd_compound) {
14760 + if (fd->format != qm_fd_compound) {
14761 dev_err(qidev, "Non-compound FD from CAAM\n");
14765 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
14766 + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
14769 "Can't find original request for CAAM response\n");
14770 @@ -180,20 +187,22 @@ static struct qman_fq *create_caam_req_f
14771 req_fq->cb.fqs = NULL;
14773 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
14774 - QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
14775 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
14778 dev_err(qidev, "Failed to create session req FQ\n");
14779 goto create_req_fq_fail;
14782 - memset(&opts, 0, sizeof(opts));
14783 - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14784 - QM_INITFQ_WE_CONTEXTB |
14785 - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
14786 - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
14787 - qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
14788 - opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
14789 - qm_fqd_context_a_set64(&opts.fqd, hwdesc);
14790 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14791 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
14792 + QM_INITFQ_WE_CGID;
14793 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
14794 + opts.fqd.dest.channel = qm_channel_caam;
14795 + opts.fqd.dest.wq = 2;
14796 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
14797 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
14798 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
14799 opts.fqd.cgid = qipriv.cgr.cgrid;
14801 ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
14802 @@ -207,7 +216,7 @@ static struct qman_fq *create_caam_req_f
14806 - qman_destroy_fq(req_fq);
14807 + qman_destroy_fq(req_fq, 0);
14808 create_req_fq_fail:
14810 return ERR_PTR(ret);
14811 @@ -275,7 +284,7 @@ empty_fq:
14813 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
14815 - qman_destroy_fq(fq);
14816 + qman_destroy_fq(fq, 0);
14820 @@ -292,7 +301,7 @@ static int empty_caam_fq(struct qman_fq
14824 - if (!qm_mcr_np_get(&np, frm_cnt))
14829 @@ -495,7 +504,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
14830 int caam_qi_shutdown(struct device *qidev)
14833 - struct caam_qi_priv *priv = dev_get_drvdata(qidev);
14834 + struct caam_qi_priv *priv = &qipriv;
14835 const cpumask_t *cpus = qman_affine_cpus();
14836 struct cpumask old_cpumask = current->cpus_allowed;
14838 @@ -528,7 +537,6 @@ int caam_qi_shutdown(struct device *qide
14839 /* Now that we're done with the CGRs, restore the cpus allowed mask */
14840 set_cpus_allowed_ptr(current, &old_cpumask);
14842 - platform_device_unregister(priv->qi_pdev);
14846 @@ -572,22 +580,28 @@ static enum qman_cb_dqrr_result caam_rsp
14847 struct caam_drv_req *drv_req;
14848 const struct qm_fd *fd;
14849 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
14851 + struct caam_drv_private *priv = dev_get_drvdata(qidev);
14853 if (caam_qi_napi_schedule(p, caam_napi))
14854 return qman_cb_dqrr_stop;
14857 - status = be32_to_cpu(fd->status);
14858 - if (unlikely(status))
14859 - dev_err(qidev, "Error: %#x in CAAM response FD\n", status);
14860 + if (unlikely(fd->status)) {
14861 + u32 ssrc = fd->status & JRSTA_SSRC_MASK;
14862 + u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
14864 - if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
14865 + if (ssrc != JRSTA_SSRC_CCB_ERROR ||
14866 + err_id != JRSTA_CCBERR_ERRID_ICVCHK)
14867 + dev_err(qidev, "Error: %#x in CAAM response FD\n",
14871 + if (unlikely(fd->format != qm_fd_compound)) {
14872 dev_err(qidev, "Non-compound FD from CAAM\n");
14873 return qman_cb_dqrr_consume;
14876 - drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
14877 + drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
14878 if (unlikely(!drv_req)) {
14880 "Can't find original request for caam response\n");
14881 @@ -597,7 +611,7 @@ static enum qman_cb_dqrr_result caam_rsp
14882 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
14883 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
14885 - drv_req->cbk(drv_req, status);
14886 + drv_req->cbk(drv_req, fd->status);
14887 return qman_cb_dqrr_consume;
14890 @@ -621,17 +635,18 @@ static int alloc_rsp_fq_cpu(struct devic
14894 - memset(&opts, 0, sizeof(opts));
14895 - opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14896 - QM_INITFQ_WE_CONTEXTB |
14897 - QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
14898 - opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
14899 - QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
14900 - qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
14901 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
14902 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
14903 + QM_INITFQ_WE_CGID;
14904 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
14906 + opts.fqd.dest.channel = qman_affine_channel(cpu);
14907 + opts.fqd.dest.wq = 3;
14908 opts.fqd.cgid = qipriv.cgr.cgrid;
14909 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
14910 QM_STASHING_EXCL_DATA;
14911 - qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
14912 + opts.fqd.context_a.stashing.data_cl = 1;
14913 + opts.fqd.context_a.stashing.context_cl = 1;
14915 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
14917 @@ -650,9 +665,8 @@ static int init_cgr(struct device *qidev
14920 struct qm_mcc_initcgr opts;
14921 - const u64 cpus = *(u64 *)qman_affine_cpus();
14922 - const int num_cpus = hweight64(cpus);
14923 - const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
14924 + const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
14925 + MAX_RSP_FQ_BACKLOG_PER_CPU;
14927 ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
14929 @@ -662,8 +676,7 @@ static int init_cgr(struct device *qidev
14931 qipriv.cgr.cb = cgr_cb;
14932 memset(&opts, 0, sizeof(opts));
14933 - opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
14935 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
14936 opts.cgr.cscn_en = QM_CGR_EN;
14937 opts.cgr.mode = QMAN_CGR_MODE_FRAME;
14938 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
14939 @@ -708,15 +721,10 @@ static void free_rsp_fqs(void)
14940 int caam_qi_init(struct platform_device *caam_pdev)
14943 - struct platform_device *qi_pdev;
14944 struct device *ctrldev = &caam_pdev->dev, *qidev;
14945 struct caam_drv_private *ctrlpriv;
14946 const cpumask_t *cpus = qman_affine_cpus();
14947 struct cpumask old_cpumask = current->cpus_allowed;
14948 - static struct platform_device_info qi_pdev_info = {
14949 - .name = "caam_qi",
14950 - .id = PLATFORM_DEVID_NONE
14954 * QMAN requires CGRs to be removed from same CPU+portal from where it
14955 @@ -728,24 +736,13 @@ int caam_qi_init(struct platform_device
14956 mod_init_cpu = cpumask_first(cpus);
14957 set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
14959 - qi_pdev_info.parent = ctrldev;
14960 - qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
14961 - qi_pdev = platform_device_register_full(&qi_pdev_info);
14962 - if (IS_ERR(qi_pdev))
14963 - return PTR_ERR(qi_pdev);
14964 - set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
14966 ctrlpriv = dev_get_drvdata(ctrldev);
14967 - qidev = &qi_pdev->dev;
14969 - qipriv.qi_pdev = qi_pdev;
14970 - dev_set_drvdata(qidev, &qipriv);
14973 /* Initialize the congestion detection */
14974 err = init_cgr(qidev);
14976 dev_err(qidev, "CGR initialization failed: %d\n", err);
14977 - platform_device_unregister(qi_pdev);
14981 @@ -754,7 +751,6 @@ int caam_qi_init(struct platform_device
14983 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
14985 - platform_device_unregister(qi_pdev);
14989 @@ -777,15 +773,11 @@ int caam_qi_init(struct platform_device
14990 napi_enable(irqtask);
14993 - /* Hook up QI device to parent controlling caam device */
14994 - ctrlpriv->qidev = qidev;
14996 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
14997 SLAB_CACHE_DMA, NULL);
14999 dev_err(qidev, "Can't allocate CAAM cache\n");
15001 - platform_device_unregister(qi_pdev);
15005 @@ -795,6 +787,8 @@ int caam_qi_init(struct platform_device
15006 debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
15007 ×_congested, &caam_fops_u64_ro);
15010 + ctrlpriv->qi_init = 1;
15011 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
15014 --- a/drivers/crypto/caam/qi.h
15015 +++ b/drivers/crypto/caam/qi.h
15020 -#include <soc/fsl/qman.h>
15021 +#include <linux/fsl_qman.h>
15022 #include "compat.h"
15024 #include "desc_constr.h"
15025 --- a/drivers/crypto/caam/regs.h
15026 +++ b/drivers/crypto/caam/regs.h
15028 * CAAM hardware register-level view
15030 * Copyright 2008-2011 Freescale Semiconductor, Inc.
15031 + * Copyright 2018 NXP
15035 @@ -211,6 +212,47 @@ struct jr_outentry {
15036 u32 jrstatus; /* Status for completed descriptor */
15039 +/* Version registers (Era 10+) e80-eff */
15040 +struct version_regs {
15041 + u32 crca; /* CRCA_VERSION */
15042 + u32 afha; /* AFHA_VERSION */
15043 + u32 kfha; /* KFHA_VERSION */
15044 + u32 pkha; /* PKHA_VERSION */
15045 + u32 aesa; /* AESA_VERSION */
15046 + u32 mdha; /* MDHA_VERSION */
15047 + u32 desa; /* DESA_VERSION */
15048 + u32 snw8a; /* SNW8A_VERSION */
15049 + u32 snw9a; /* SNW9A_VERSION */
15050 + u32 zuce; /* ZUCE_VERSION */
15051 + u32 zuca; /* ZUCA_VERSION */
15052 + u32 ccha; /* CCHA_VERSION */
15053 + u32 ptha; /* PTHA_VERSION */
15054 + u32 rng; /* RNG_VERSION */
15055 + u32 trng; /* TRNG_VERSION */
15056 + u32 aaha; /* AAHA_VERSION */
15058 + u32 sr; /* SR_VERSION */
15059 + u32 dma; /* DMA_VERSION */
15060 + u32 ai; /* AI_VERSION */
15061 + u32 qi; /* QI_VERSION */
15062 + u32 jr; /* JR_VERSION */
15063 + u32 deco; /* DECO_VERSION */
15066 +/* Version registers bitfields */
15068 +/* Number of CHAs instantiated */
15069 +#define CHA_VER_NUM_MASK 0xffull
15070 +/* CHA Miscellaneous Information */
15071 +#define CHA_VER_MISC_SHIFT 8
15072 +#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
15073 +/* CHA Revision Number */
15074 +#define CHA_VER_REV_SHIFT 16
15075 +#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
15076 +/* CHA Version ID */
15077 +#define CHA_VER_VID_SHIFT 24
15078 +#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
15081 * caam_perfmon - Performance Monitor/Secure Memory Status/
15082 * CAAM Global Status/Component Version IDs
15083 @@ -223,15 +265,13 @@ struct jr_outentry {
15084 #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
15087 - * CHA version IDs / instantiation bitfields
15088 + * CHA version IDs / instantiation bitfields (< Era 10)
15089 * Defined for use with the cha_id fields in perfmon, but the same shift/mask
15090 * selectors can be used to pull out the number of instantiated blocks within
15091 * cha_num fields in perfmon because the locations are the same.
15093 #define CHA_ID_LS_AES_SHIFT 0
15094 #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
15095 -#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
15096 -#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
15098 #define CHA_ID_LS_DES_SHIFT 4
15099 #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
15100 @@ -241,9 +281,6 @@ struct jr_outentry {
15102 #define CHA_ID_LS_MD_SHIFT 12
15103 #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
15104 -#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
15105 -#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
15106 -#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
15108 #define CHA_ID_LS_RNG_SHIFT 16
15109 #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
15110 @@ -269,6 +306,13 @@ struct jr_outentry {
15111 #define CHA_ID_MS_JR_SHIFT 28
15112 #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
15114 +/* Specific CHA version IDs */
15115 +#define CHA_VER_VID_AES_LP 0x3ull
15116 +#define CHA_VER_VID_AES_HP 0x4ull
15117 +#define CHA_VER_VID_MD_LP256 0x0ull
15118 +#define CHA_VER_VID_MD_LP512 0x1ull
15119 +#define CHA_VER_VID_MD_HP 0x2ull
15124 @@ -473,8 +517,10 @@ struct caam_ctrl {
15125 struct rng4tst r4tst[2];
15131 + /* Version registers - introduced with era 10 e80-eff */
15132 + struct version_regs vreg;
15133 /* Performance Monitor f00-fff */
15134 struct caam_perfmon perfmon;
15136 @@ -564,8 +610,10 @@ struct caam_job_ring {
15138 u32 jrcommand; /* JRCRx - JobR command */
15143 + /* Version registers - introduced with era 10 e80-eff */
15144 + struct version_regs vreg;
15145 /* Performance Monitor f00-fff */
15146 struct caam_perfmon perfmon;
15148 @@ -627,6 +675,8 @@ struct caam_job_ring {
15149 #define JRSTA_DECOERR_INVSIGN 0x86
15150 #define JRSTA_DECOERR_DSASIGN 0x87
15152 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
15154 #define JRSTA_CCBERR_JUMP 0x08000000
15155 #define JRSTA_CCBERR_INDEX_MASK 0xff00
15156 #define JRSTA_CCBERR_INDEX_SHIFT 8
15157 @@ -870,13 +920,19 @@ struct caam_deco {
15159 u32 descbuf[64]; /* DxDESB - Descriptor buffer */
15161 -#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
15162 #define DESC_DBG_DECO_STAT_VALID 0x80000000
15163 #define DESC_DBG_DECO_STAT_MASK 0x00F00000
15164 +#define DESC_DBG_DECO_STAT_SHIFT 20
15165 u32 desc_dbg; /* DxDDR - DECO Debug Register */
15168 +#define DESC_DER_DECO_STAT_MASK 0x000F0000
15169 +#define DESC_DER_DECO_STAT_SHIFT 16
15170 + u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
15174 +#define DECO_STAT_HOST_ERR 0xD
15176 #define DECO_JQCR_WHL 0x20000000
15177 #define DECO_JQCR_FOUR 0x10000000
15179 --- a/drivers/crypto/caam/sg_sw_qm.h
15180 +++ b/drivers/crypto/caam/sg_sw_qm.h
15181 @@ -34,46 +34,61 @@
15182 #ifndef __SG_SW_QM_H
15183 #define __SG_SW_QM_H
15185 -#include <soc/fsl/qman.h>
15186 +#include <linux/fsl_qman.h>
15189 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
15191 + dma_addr_t addr = qm_sg_ptr->opaque;
15193 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
15194 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
15197 static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
15199 + u32 len, u16 offset)
15201 - qm_sg_entry_set64(qm_sg_ptr, dma);
15202 + qm_sg_ptr->addr = dma;
15203 + qm_sg_ptr->length = len;
15204 qm_sg_ptr->__reserved2 = 0;
15205 qm_sg_ptr->bpid = 0;
15206 - qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
15207 + qm_sg_ptr->__reserved3 = 0;
15208 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
15210 + cpu_to_hw_sg(qm_sg_ptr);
15213 static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
15214 dma_addr_t dma, u32 len, u16 offset)
15216 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15217 - qm_sg_entry_set_len(qm_sg_ptr, len);
15218 + qm_sg_ptr->extension = 0;
15219 + qm_sg_ptr->final = 0;
15220 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15223 static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
15224 dma_addr_t dma, u32 len, u16 offset)
15226 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15227 - qm_sg_entry_set_f(qm_sg_ptr, len);
15228 + qm_sg_ptr->extension = 0;
15229 + qm_sg_ptr->final = 1;
15230 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15233 static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
15234 dma_addr_t dma, u32 len, u16 offset)
15236 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15237 - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
15238 + qm_sg_ptr->extension = 1;
15239 + qm_sg_ptr->final = 0;
15240 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15243 static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
15244 dma_addr_t dma, u32 len,
15247 - __dma_to_qm_sg(qm_sg_ptr, dma, offset);
15248 - qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
15249 - (len & QM_SG_LEN_MASK));
15250 + qm_sg_ptr->extension = 1;
15251 + qm_sg_ptr->final = 1;
15252 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
15256 @@ -102,7 +117,10 @@ static inline void sg_to_qm_sg_last(stru
15257 struct qm_sg_entry *qm_sg_ptr, u16 offset)
15259 qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
15260 - qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
15262 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
15263 + qm_sg_ptr->final = 1;
15264 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
15267 #endif /* __SG_SW_QM_H */
15268 --- a/drivers/crypto/talitos.c
15269 +++ b/drivers/crypto/talitos.c
15270 @@ -1250,6 +1250,14 @@ static int ipsec_esp(struct talitos_edes
15271 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
15272 sg_count, areq->assoclen, tbl_off, elen);
15275 + * In case of SEC 2.x+, cipher in len must include only the ciphertext,
15276 + * while extent is used for ICV len.
15278 + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
15279 + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
15280 + desc->ptr[4].len = cpu_to_be16(cryptlen);
15284 sync_needed = true;
15285 --- a/include/crypto/chacha20.h
15286 +++ b/include/crypto/chacha20.h
15288 #define CHACHA20_IV_SIZE 16
15289 #define CHACHA20_KEY_SIZE 32
15290 #define CHACHA20_BLOCK_SIZE 64
15291 +#define CHACHAPOLY_IV_SIZE 12
15293 struct chacha20_ctx {