-From 9c9579d76ccd6e738ab98c9b4c73c168912cdb8a Mon Sep 17 00:00:00 2001
+From 2a0aa9bd187f6f5693982a8f79665585af772237 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 27 Sep 2017 15:02:01 +0800
-Subject: [PATCH] crypto: support layerscape
+Date: Thu, 5 Jul 2018 17:29:41 +0800
+Subject: [PATCH 16/32] crypto: support layerscape
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-This is a integrated patch for layerscape sec support.
+This is an integrated patch for layerscape sec support.
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
Signed-off-by: Fabio Estevam <festevam@gmail.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Laura Abbott <labbott@redhat.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Singed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
---
- crypto/Kconfig | 30 +
- crypto/Makefile | 4 +
- crypto/acompress.c | 169 +
- crypto/algboss.c | 12 +-
- crypto/crypto_user.c | 19 +
- crypto/scompress.c | 356 ++
- crypto/tcrypt.c | 17 +-
- crypto/testmgr.c | 1701 ++++----
- crypto/testmgr.h | 1125 +++---
- crypto/tls.c | 607 +++
- drivers/crypto/caam/Kconfig | 72 +-
- drivers/crypto/caam/Makefile | 15 +-
- drivers/crypto/caam/caamalg.c | 2125 +++-------
- drivers/crypto/caam/caamalg_desc.c | 1913 +++++++++
- drivers/crypto/caam/caamalg_desc.h | 127 +
- drivers/crypto/caam/caamalg_qi.c | 2877 +++++++++++++
- drivers/crypto/caam/caamalg_qi2.c | 4428 +++++++++++++++++++++
- drivers/crypto/caam/caamalg_qi2.h | 265 ++
- drivers/crypto/caam/caamhash.c | 521 +--
- drivers/crypto/caam/caampkc.c | 471 ++-
- drivers/crypto/caam/caampkc.h | 58 +
- drivers/crypto/caam/caamrng.c | 16 +-
- drivers/crypto/caam/compat.h | 1 +
- drivers/crypto/caam/ctrl.c | 356 +-
- drivers/crypto/caam/ctrl.h | 2 +
- drivers/crypto/caam/desc.h | 55 +-
- drivers/crypto/caam/desc_constr.h | 139 +-
- drivers/crypto/caam/dpseci.c | 859 ++++
- drivers/crypto/caam/dpseci.h | 395 ++
- drivers/crypto/caam/dpseci_cmd.h | 261 ++
- drivers/crypto/caam/error.c | 127 +-
- drivers/crypto/caam/error.h | 10 +-
- drivers/crypto/caam/intern.h | 31 +-
- drivers/crypto/caam/jr.c | 97 +-
- drivers/crypto/caam/jr.h | 2 +
- drivers/crypto/caam/key_gen.c | 32 +-
- drivers/crypto/caam/key_gen.h | 36 +-
- drivers/crypto/caam/pdb.h | 62 +
- drivers/crypto/caam/pkc_desc.c | 36 +
- drivers/crypto/caam/qi.c | 797 ++++
- drivers/crypto/caam/qi.h | 204 +
- drivers/crypto/caam/regs.h | 63 +-
- drivers/crypto/caam/sg_sw_qm.h | 126 +
- drivers/crypto/caam/sg_sw_qm2.h | 81 +
- drivers/crypto/caam/sg_sw_sec4.h | 60 +-
- drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
- drivers/staging/wilc1000/linux_wlan.c | 2 +-
- drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
- include/crypto/acompress.h | 269 ++
- include/crypto/internal/acompress.h | 81 +
- include/crypto/internal/scompress.h | 136 +
- include/linux/crypto.h | 3 +
- include/uapi/linux/cryptouser.h | 5 +
- scripts/spelling.txt | 3 +
- sound/soc/amd/acp-pcm-dma.c | 2 +-
- 55 files changed, 17310 insertions(+), 3955 deletions(-)
+ crypto/Kconfig | 30 +
+ crypto/Makefile | 4 +
+ crypto/acompress.c | 169 +
+ crypto/algboss.c | 12 +-
+ crypto/crypto_user.c | 19 +
+ crypto/scompress.c | 356 +
+ crypto/tcrypt.c | 17 +-
+ crypto/testmgr.c | 1708 ++---
+ crypto/testmgr.h | 1125 ++--
+ crypto/tls.c | 607 ++
+ drivers/crypto/caam/Kconfig | 77 +-
+ drivers/crypto/caam/Makefile | 16 +-
+ drivers/crypto/caam/caamalg.c | 2185 ++----
+ drivers/crypto/caam/caamalg_desc.c | 1961 ++++++
+ drivers/crypto/caam/caamalg_desc.h | 127 +
+ drivers/crypto/caam/caamalg_qi.c | 3321 +++++++++
+ drivers/crypto/caam/caamalg_qi2.c | 5938 +++++++++++++++++
+ drivers/crypto/caam/caamalg_qi2.h | 283 +
+ drivers/crypto/caam/caamhash.c | 555 +-
+ drivers/crypto/caam/caamhash_desc.c | 108 +
+ drivers/crypto/caam/caamhash_desc.h | 49 +
+ drivers/crypto/caam/caampkc.c | 471 +-
+ drivers/crypto/caam/caampkc.h | 58 +
+ drivers/crypto/caam/caamrng.c | 16 +-
+ drivers/crypto/caam/compat.h | 1 +
+ drivers/crypto/caam/ctrl.c | 358 +-
+ drivers/crypto/caam/ctrl.h | 2 +
+ drivers/crypto/caam/desc.h | 84 +-
+ drivers/crypto/caam/desc_constr.h | 180 +-
+ drivers/crypto/caam/dpseci.c | 858 +++
+ drivers/crypto/caam/dpseci.h | 395 ++
+ drivers/crypto/caam/dpseci_cmd.h | 261 +
+ drivers/crypto/caam/error.c | 127 +-
+ drivers/crypto/caam/error.h | 10 +-
+ drivers/crypto/caam/intern.h | 31 +-
+ drivers/crypto/caam/jr.c | 72 +-
+ drivers/crypto/caam/jr.h | 2 +
+ drivers/crypto/caam/key_gen.c | 32 +-
+ drivers/crypto/caam/key_gen.h | 36 +-
+ drivers/crypto/caam/pdb.h | 62 +
+ drivers/crypto/caam/pkc_desc.c | 36 +
+ drivers/crypto/caam/qi.c | 804 +++
+ drivers/crypto/caam/qi.h | 204 +
+ drivers/crypto/caam/regs.h | 63 +-
+ drivers/crypto/caam/sg_sw_qm.h | 126 +
+ drivers/crypto/caam/sg_sw_qm2.h | 81 +
+ drivers/crypto/caam/sg_sw_sec4.h | 60 +-
+ drivers/crypto/talitos.c | 8 +
+ drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
+ drivers/staging/wilc1000/linux_wlan.c | 2 +-
+ .../staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
+ include/crypto/acompress.h | 269 +
+ include/crypto/internal/acompress.h | 81 +
+ include/crypto/internal/scompress.h | 136 +
+ include/linux/crypto.h | 3 +
+ include/uapi/linux/cryptouser.h | 5 +
+ scripts/spelling.txt | 3 +
+ sound/soc/amd/acp-pcm-dma.c | 2 +-
+ 58 files changed, 19620 insertions(+), 3990 deletions(-)
create mode 100644 crypto/acompress.c
create mode 100644 crypto/scompress.c
create mode 100644 crypto/tls.c
create mode 100644 drivers/crypto/caam/caamalg_qi.c
create mode 100644 drivers/crypto/caam/caamalg_qi2.c
create mode 100644 drivers/crypto/caam/caamalg_qi2.h
+ create mode 100644 drivers/crypto/caam/caamhash_desc.c
+ create mode 100644 drivers/crypto/caam/caamhash_desc.h
create mode 100644 drivers/crypto/caam/dpseci.c
create mode 100644 drivers/crypto/caam/dpseci.h
create mode 100644 drivers/crypto/caam/dpseci_cmd.h
};
struct tcrypt_result {
-@@ -1329,6 +1329,10 @@ static int do_test(const char *alg, u32
+@@ -1333,6 +1333,10 @@ static int do_test(const char *alg, u32
ret += tcrypt_test("hmac(sha3-512)");
break;
case 150:
ret += tcrypt_test("ansi_cprng");
break;
-@@ -1390,6 +1394,9 @@ static int do_test(const char *alg, u32
+@@ -1394,6 +1398,9 @@ static int do_test(const char *alg, u32
case 190:
ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
-@@ -1404,9 +1411,9 @@ static int do_test(const char *alg, u32
+@@ -1408,9 +1415,9 @@ static int do_test(const char *alg, u32
test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
-@@ -1837,9 +1844,9 @@ static int do_test(const char *alg, u32
+@@ -1841,9 +1848,9 @@ static int do_test(const char *alg, u32
test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
const bool diff_dst, const int align_offset)
{
const char *algo =
-@@ -1330,7 +1571,8 @@ out_nobuf:
+@@ -1079,12 +1320,16 @@ static int __test_skcipher(struct crypto
+ const char *e, *d;
+ struct tcrypt_result result;
+ void *data;
+- char iv[MAX_IVLEN];
++ char *iv;
+ char *xbuf[XBUFSIZE];
+ char *xoutbuf[XBUFSIZE];
+ int ret = -ENOMEM;
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+
++ iv = kmalloc(MAX_IVLEN, GFP_KERNEL);
++ if (!iv)
++ return ret;
++
+ if (testmgr_alloc_buf(xbuf))
+ goto out_nobuf;
+
+@@ -1325,12 +1570,14 @@ out:
+ testmgr_free_buf(xoutbuf);
+ out_nooutbuf:
+ testmgr_free_buf(xbuf);
++ kfree(iv);
+ out_nobuf:
+ return ret;
}
static int test_skcipher(struct crypto_skcipher *tfm, int enc,
{
unsigned int alignmask;
int ret;
-@@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_s
+@@ -1362,8 +1609,10 @@ static int test_skcipher(struct crypto_s
return 0;
}
{
const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
unsigned int i;
-@@ -1442,7 +1686,154 @@ out:
+@@ -1442,7 +1691,154 @@ out:
return ret;
}
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
-@@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct al
+@@ -1509,7 +1905,7 @@ static int alg_test_aead(const struct al
struct crypto_aead *tfm;
int err = 0;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
-@@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct
+@@ -1538,7 +1934,7 @@ static int alg_test_cipher(const struct
struct crypto_cipher *tfm;
int err = 0;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
-@@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struc
+@@ -1567,7 +1963,7 @@ static int alg_test_skcipher(const struc
struct crypto_skcipher *tfm;
int err = 0;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: skcipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
-@@ -1593,22 +1984,38 @@ out:
+@@ -1593,22 +1989,38 @@ out:
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
return err;
}
-@@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct al
+@@ -1618,7 +2030,7 @@ static int alg_test_hash(const struct al
struct crypto_ahash *tfm;
int err;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
-@@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct
+@@ -1646,7 +2058,7 @@ static int alg_test_crc32c(const struct
if (err)
goto out;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
-@@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct a
+@@ -1688,7 +2100,7 @@ static int alg_test_cprng(const struct a
struct crypto_rng *rng;
int err;
if (IS_ERR(rng)) {
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
-@@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct a
+@@ -1703,7 +2115,7 @@ static int alg_test_cprng(const struct a
}
const char *driver, u32 type, u32 mask)
{
int ret = -EAGAIN;
-@@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_te
+@@ -1715,7 +2127,7 @@ static int drbg_cavs_test(struct drbg_te
if (!buf)
return -ENOMEM;
if (IS_ERR(drng)) {
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
-@@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct al
+@@ -1777,7 +2189,7 @@ static int alg_test_drbg(const struct al
int err = 0;
int pr = 0;
int i = 0;
unsigned int tcount = desc->suite.drbg.count;
if (0 == memcmp(driver, "drbg_pr_", 8))
-@@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct al
+@@ -1796,7 +2208,7 @@ static int alg_test_drbg(const struct al
}
const char *alg)
{
struct kpp_request *req;
-@@ -1888,7 +2295,7 @@ free_req:
+@@ -1888,7 +2300,7 @@ free_req:
}
static int test_kpp(struct crypto_kpp *tfm, const char *alg,
{
int ret, i;
-@@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg
+@@ -1909,7 +2321,7 @@ static int alg_test_kpp(const struct alg
struct crypto_kpp *tfm;
int err = 0;
if (IS_ERR(tfm)) {
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
-@@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg
+@@ -1924,7 +2336,7 @@ static int alg_test_kpp(const struct alg
}
static int test_akcipher_one(struct crypto_akcipher *tfm,
{
char *xbuf[XBUFSIZE];
struct akcipher_request *req;
-@@ -2044,7 +2451,8 @@ free_xbuf:
+@@ -2044,7 +2456,8 @@ free_xbuf:
}
static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
{
const char *algo =
crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
-@@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struc
+@@ -2068,7 +2481,7 @@ static int alg_test_akcipher(const struc
struct crypto_akcipher *tfm;
int err = 0;
if (IS_ERR(tfm)) {
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
-@@ -2088,112 +2496,23 @@ static int alg_test_null(const struct al
+@@ -2088,112 +2501,23 @@ static int alg_test_null(const struct al
return 0;
}
}
}
}, {
-@@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_te
+@@ -2201,12 +2525,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_te
+@@ -2214,12 +2533,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_te
+@@ -2228,12 +2542,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_te
+@@ -2245,18 +2554,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_te
+@@ -2268,12 +2567,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_te
+@@ -2282,12 +2576,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_te
+@@ -2296,12 +2585,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_te
+@@ -2309,12 +2593,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_te
+@@ -2323,12 +2602,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_te
+@@ -2344,12 +2618,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_te
+@@ -2358,12 +2627,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_te
+@@ -2380,12 +2644,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_te
+@@ -2393,12 +2652,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_te
+@@ -2407,12 +2661,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_te
+@@ -2429,14 +2678,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_te
+@@ -2444,14 +2687,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_te
+@@ -2459,14 +2696,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_te
+@@ -2474,14 +2705,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_te
+@@ -2489,14 +2714,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_te
+@@ -2504,14 +2723,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_te
+@@ -2519,14 +2732,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_te
+@@ -2535,14 +2742,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_te
+@@ -2550,14 +2751,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_te
+@@ -2565,30 +2760,25 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_te
+@@ -2596,14 +2786,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_te
+@@ -2611,20 +2795,14 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.test = alg_test_hash,
.suite = {
}
}, {
.alg = "compress_null",
-@@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_te
+@@ -2633,94 +2811,30 @@ static const struct alg_test_desc alg_te
.alg = "crc32",
.test = alg_test_hash,
.suite = {
}
}
}, {
-@@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_te
+@@ -2728,14 +2842,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_te
+@@ -2743,14 +2851,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_te
+@@ -2758,14 +2860,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_te
+@@ -2773,14 +2869,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_te
+@@ -2788,29 +2878,18 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_te
+@@ -2818,14 +2897,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_te
+@@ -2833,14 +2906,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_te
+@@ -2848,14 +2915,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_te
+@@ -2864,14 +2925,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.comp = {
}
}
}, {
-@@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_te
+@@ -2879,10 +2934,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
}
}, {
.alg = "digest_null",
-@@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_te
+@@ -2892,30 +2944,21 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/*
-@@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_te
+@@ -2930,11 +2973,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_nopr_hmac_sha256 test */
-@@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_te
+@@ -2954,10 +2993,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_nopr_sha256 test */
-@@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_te
+@@ -2973,10 +3009,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_pr_ctr_aes128 test */
-@@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_te
+@@ -2996,10 +3029,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_pr_hmac_sha256 test */
-@@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_te
+@@ -3019,10 +3049,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_pr_sha256 test */
-@@ -3034,23 +3056,13 @@ static const struct alg_test_desc alg_te
+@@ -3034,23 +3061,13 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.test = alg_test_null,
}, {
}
}
}, {
-@@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_te
+@@ -3058,14 +3075,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_te
+@@ -3073,14 +3084,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_te
+@@ -3088,14 +3093,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_te
+@@ -3103,14 +3102,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_te
+@@ -3118,14 +3111,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_te
+@@ -3133,14 +3120,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_te
+@@ -3151,14 +3132,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_te
+@@ -3167,14 +3142,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_te
+@@ -3197,14 +3166,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_te
+@@ -3212,14 +3175,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_te
+@@ -3227,14 +3184,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_te
+@@ -3242,14 +3193,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_te
+@@ -3257,14 +3202,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_te
+@@ -3272,14 +3211,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_te
+@@ -3287,14 +3220,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_te
+@@ -3302,14 +3229,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_te
+@@ -3317,10 +3238,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
}
}, {
.alg = "gcm(aes)",
-@@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_te
+@@ -3328,14 +3246,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_te
+@@ -3343,136 +3255,94 @@ static const struct alg_test_desc alg_te
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
}
}, {
.alg = "jitterentropy_rng",
-@@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_te
+@@ -3484,14 +3354,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_te
+@@ -3499,14 +3363,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_te
+@@ -3514,14 +3372,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_te
+@@ -3529,14 +3381,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_te
+@@ -3544,14 +3390,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_te
+@@ -3559,14 +3399,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_te
+@@ -3575,14 +3409,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.comp = {
}
}
}, {
-@@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_te
+@@ -3591,14 +3419,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.comp = {
}
}
}, {
-@@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_te
+@@ -3607,42 +3429,27 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.comp = {
}
}, {
.alg = "ofb(aes)",
-@@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_te
+@@ -3650,14 +3457,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_te
+@@ -3665,24 +3466,15 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}, {
.alg = "rfc3686(ctr(aes))",
-@@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_te
+@@ -3690,14 +3482,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_te
+@@ -3706,14 +3492,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_te
+@@ -3722,14 +3502,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_te
+@@ -3737,14 +3511,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_te
+@@ -3752,14 +3520,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_te
+@@ -3767,71 +3529,47 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_te
+@@ -3839,162 +3577,120 @@ static const struct alg_test_desc alg_te
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
}
}, {
.alg = "xts(aes)",
-@@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_te
+@@ -4002,14 +3698,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_te
+@@ -4017,14 +3707,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_te
+@@ -4032,14 +3716,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_te
+@@ -4047,14 +3725,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_te
+@@ -4062,14 +3734,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
default y
select CRYPTO_RNG
select HW_RANDOM
-@@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
+@@ -124,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
++ select CRYPTO_HASH
+ ---help---
+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ It handles DPSECI DPAA2 objects that sit on the Management Complex
+ def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
++
++config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
++ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
++ CRYPTO_DEV_FSL_DPAA2_CAAM)
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
-@@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
+@@ -5,13 +5,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
ccflags-y := -DDEBUG
endif
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
bool rfc3686;
bool geniv;
};
-@@ -163,302 +96,67 @@ struct caam_aead_alg {
+@@ -163,302 +96,71 @@ struct caam_aead_alg {
bool registered;
};
- unsigned int enckeylen;
- unsigned int split_key_len;
- unsigned int split_key_pad_len;
++ enum dma_data_direction dir;
+ struct device *jrdev;
+ struct alginfo adata;
+ struct alginfo cdata;
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+ ctx->adata.keylen_pad;
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
++ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
++ ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
- desc_bytes(desc), 1);
-#endif
+ desc = ctx->sh_desc_dec;
-+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
++ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
++ ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
-@@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypt
+@@ -470,11 +172,12 @@ static int aead_set_sh_desc(struct crypt
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline;
- u32 geniv, moveiv;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
- u32 *desc;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
-@@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypt
+@@ -482,7 +185,7 @@ static int aead_set_sh_desc(struct crypt
return 0;
/* NULL encryption / decryption */
return aead_null_set_sh_desc(aead);
/*
-@@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypt
+@@ -497,8 +200,14 @@ static int aead_set_sh_desc(struct crypt
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
if (alg->caam.geniv)
goto skip_enc;
-@@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypt
+@@ -507,146 +216,64 @@ static int aead_set_sh_desc(struct crypt
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Read and write assoclen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (desc_inline_query(DESC_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
-- /* Read and write assoclen bytes */
-- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+- /* Skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
-- /* Skip assoc data */
-- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+- /* read assoc before reading payload */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+- FIFOLDST_VLF);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-- /* read assoc before reading payload */
-- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-- FIFOLDST_VLF);
-+ ctx->adata.key_inline = !!(inl_mask & 1);
-+ ctx->cdata.key_inline = !!(inl_mask & 2);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
--
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
+
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
-+ false);
++ false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
skip_enc:
/*
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
-+ nonce, ctx1_iv_off, false);
++ nonce, ctx1_iv_off, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
if (!alg->caam.geniv)
goto skip_givenc;
-@@ -655,107 +277,32 @@ skip_enc:
+@@ -655,107 +282,32 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, false);
++ ctx1_iv_off, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
skip_givenc:
return 0;
-@@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto
+@@ -776,12 +328,12 @@ static int gcm_set_sh_desc(struct crypto
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto
+@@ -789,175 +341,35 @@ static int gcm_set_sh_desc(struct crypto
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
-#endif
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
-#endif
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
-@@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -976,11 +388,12 @@ static int rfc4106_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -988,148 +401,37 @@ static int rfc4106_set_sh_desc(struct cr
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
-@@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -1149,12 +451,12 @@ static int rfc4543_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -1162,151 +464,37 @@ static int rfc4543_set_sh_desc(struct cr
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/*
* Job Descriptor and Shared Descriptors
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
-@@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct cr
+@@ -1322,74 +510,67 @@ static int rfc4543_setauthsize(struct cr
return 0;
}
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
-@@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aea
+ int ret = 0;
+
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
#endif
- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
++ /*
++ * If DKP is supported, use it in the shared descriptor to generate
++ * the split key.
++ */
++ if (ctrlpriv->era >= 6) {
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++
++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
++
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++ keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ ctx->adata.keylen_pad +
++ keys.enckeylen, ctx->dir);
++ goto skip_split_key;
++ }
++
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
- }
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len + keys.enckeylen, 1);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
--
+
- ctx->enckeylen = keys.enckeylen;
-
- ret = aead_set_sh_desc(aead);
- }
-
- return ret;
++skip_split_key:
+ ctx->cdata.keylen = keys.enckeylen;
+ return aead_set_sh_desc(aead);
badkey:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
-@@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead
+@@ -1400,7 +581,6 @@ static int gcm_setkey(struct crypto_aead
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
-@@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead
+@@ -1408,21 +588,10 @@ static int gcm_setkey(struct crypto_aead
#endif
memcpy(ctx->key, key, keylen);
- return -ENOMEM;
- }
- ctx->enckeylen = keylen;
--
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
++ ctx->cdata.keylen = keylen;
+
- ret = gcm_set_sh_desc(aead);
- if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
- DMA_TO_DEVICE);
- }
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ ctx->cdata.keylen = keylen;
-
+-
- return ret;
+ return gcm_set_sh_desc(aead);
}
static int rfc4106_setkey(struct crypto_aead *aead,
-@@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_
+@@ -1430,7 +599,6 @@ static int rfc4106_setkey(struct crypto_
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
if (keylen < 4)
return -EINVAL;
-@@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_
+@@ -1446,22 +614,10 @@ static int rfc4106_setkey(struct crypto_
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- return ret;
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
++ ctx->dir);
+ return rfc4106_set_sh_desc(aead);
}
static int rfc4543_setkey(struct crypto_aead *aead,
-@@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_
+@@ -1469,7 +625,6 @@ static int rfc4543_setkey(struct crypto_
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
if (keylen < 4)
return -EINVAL;
-@@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_
+@@ -1485,43 +640,28 @@ static int rfc4543_setkey(struct crypto_
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
- return ret;
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
++ ctx->dir);
+ return rfc4543_set_sh_desc(aead);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-@@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct cryp
+@@ -1544,215 +684,33 @@ static int ablkcipher_setkey(struct cryp
keylen -= CTR_RFC3686_NONCE_SIZE;
}
- /* Load iv */
- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
-+ ctx1_iv_off);
-+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
-
+-
- /* Load counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
--
++ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
++ ctx1_iv_off);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), ctx->dir);
+
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-@@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct
+@@ -1760,8 +718,7 @@ static int xts_ablkcipher_setkey(struct
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
-@@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct
+@@ -1771,126 +728,38 @@ static int xts_ablkcipher_setkey(struct
}
memcpy(ctx->key, key, keylen);
-#endif
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
/* xts_ablkcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
-#endif
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
return 0;
}
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
-@@ -1899,12 +739,12 @@ struct aead_edesc {
+@@ -1899,12 +768,12 @@ struct aead_edesc {
/*
* ablkcipher_edesc - s/w-extended ablkcipher descriptor
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
-@@ -1924,10 +764,11 @@ static void caam_unmap(struct device *de
+@@ -1924,10 +793,11 @@ static void caam_unmap(struct device *de
int sec4_sg_bytes)
{
if (dst != src) {
}
if (iv_dma)
-@@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(stru
+@@ -2021,8 +891,7 @@ static void ablkcipher_encrypt_done(stru
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(stru
+@@ -2031,10 +900,10 @@ static void ablkcipher_encrypt_done(stru
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
ablkcipher_unmap(jrdev, edesc, req);
-@@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(stru
+@@ -2062,8 +931,7 @@ static void ablkcipher_decrypt_done(stru
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(stru
+@@ -2071,10 +939,10 @@ static void ablkcipher_decrypt_done(stru
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
ablkcipher_unmap(jrdev, edesc, req);
-@@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_re
+@@ -2114,7 +982,7 @@ static void init_aead_job(struct aead_re
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
-@@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_re
+@@ -2129,7 +997,7 @@ static void init_aead_job(struct aead_re
out_options = in_options;
if (unlikely(req->src != req->dst)) {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_req
+@@ -2147,9 +1015,6 @@ static void init_aead_job(struct aead_re
+ append_seq_out_ptr(desc, dst_dma,
+ req->assoclen + req->cryptlen - authsize,
+ out_options);
+-
+- /* REG3 = assoclen */
+- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ }
+
+ static void init_gcm_job(struct aead_request *req,
+@@ -2164,6 +1029,7 @@ static void init_gcm_job(struct aead_req
+ unsigned int last;
+
+ init_aead_job(req, edesc, all_contig, encrypt);
++ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+
+ /* BUG This should not be specific to generic GCM. */
+ last = 0;
+@@ -2175,7 +1041,7 @@ static void init_gcm_job(struct aead_req
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
-@@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead
+@@ -2190,7 +1056,8 @@ static void init_authenc_job(struct aead
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
-@@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2213,6 +1080,15 @@ static void init_authenc_job(struct aead
+
+ init_aead_job(req, edesc, all_contig, encrypt);
+
++ /*
++ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
++ * having DPOVRD as destination.
++ */
++ if (ctrlpriv->era < 3)
++ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
++ else
++ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
++
+ if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
+ append_load_as_imm(desc, req->iv, ivsize,
+ LDST_CLASS_1_CCB |
+@@ -2236,16 +1112,15 @@ static void init_ablkcipher_job(u32 *sh_
int len, sec4_sg_index = 0;
#ifdef DEBUG
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-@@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2261,7 +1136,7 @@ static void init_ablkcipher_job(u32 *sh_
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
if (likely(req->src == req->dst)) {
dst_dma = sg_dma_address(req->src);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2269,7 +1144,7 @@ static void init_ablkcipher_job(u32 *sh_
out_options = LDST_SGF;
}
} else {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32
+@@ -2296,20 +1171,18 @@ static void init_ablkcipher_giv_job(u32
int len, sec4_sg_index = 0;
#ifdef DEBUG
src_dma = sg_dma_address(req->src);
in_options = 0;
} else {
-@@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_all
+@@ -2340,87 +1213,100 @@ static struct aead_edesc *aead_edesc_all
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
edesc->sec4_sg + sec4_sg_index, 0);
}
-@@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_requ
+@@ -2573,13 +1459,9 @@ static int aead_decrypt(struct aead_requ
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
-@@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2619,51 +1501,80 @@ static struct ablkcipher_edesc *ablkciph
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
return ERR_PTR(-ENOMEM);
}
-@@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2673,23 +1584,24 @@ static struct ablkcipher_edesc *ablkciph
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
return ERR_PTR(-ENOMEM);
}
-@@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2701,7 +1613,7 @@ static struct ablkcipher_edesc *ablkciph
sec4_sg_bytes, 1);
#endif
return edesc;
}
-@@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2792,30 +1704,54 @@ static struct ablkcipher_edesc *ablkciph
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
+ bool out_contig;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int sec4_sg_index;
--
-- src_nents = sg_count(req->src, req->nbytes);
+ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+- src_nents = sg_count(req->src, req->nbytes);
+-
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
}
/*
-@@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2825,21 +1761,29 @@ static struct ablkcipher_edesc *ablkciph
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
return ERR_PTR(-ENOMEM);
}
-@@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2849,24 +1793,24 @@ static struct ablkcipher_edesc *ablkciph
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
-@@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2878,7 +1822,7 @@ static struct ablkcipher_edesc *ablkciph
sec4_sg_bytes, 1);
#endif
return edesc;
}
-@@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct
+@@ -2889,7 +1833,7 @@ static int ablkcipher_givencrypt(struct
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
-@@ -2933,7 +1840,6 @@ struct caam_alg_template {
+@@ -2933,7 +1877,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
};
static struct caam_alg_template driver_algs[] = {
-@@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3118,7 +2061,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3140,7 +2082,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3162,7 +2103,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3184,7 +2124,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3206,7 +2145,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3228,7 +2166,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3250,7 +2187,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3273,7 +2209,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3296,7 +2231,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3319,7 +2253,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3342,7 +2275,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3365,7 +2297,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3388,7 +2319,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3411,7 +2341,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3434,7 +2363,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3457,7 +2385,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3480,7 +2407,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3503,7 +2429,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3526,7 +2451,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
}
},
{
-@@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3549,7 +2473,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
}
},
-@@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3573,7 +2496,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3597,7 +2519,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3621,7 +2542,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3645,7 +2565,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3669,7 +2588,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3693,7 +2611,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3717,7 +2634,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3741,7 +2657,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3765,7 +2680,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3789,7 +2703,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3812,7 +2725,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3835,7 +2747,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3858,7 +2769,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3881,7 +2791,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3904,7 +2813,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3927,7 +2835,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3950,7 +2857,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3973,7 +2879,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3996,7 +2901,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4019,7 +2923,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4042,7 +2945,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4065,7 +2967,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4090,7 +2991,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4115,7 +3015,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4141,7 +3040,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4166,7 +3064,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4192,7 +3089,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4217,7 +3113,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4243,7 +3138,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4268,7 +3162,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4294,7 +3187,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4319,7 +3211,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4345,7 +3236,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4370,7 +3260,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
+@@ -4383,18 +3272,44 @@ struct caam_crypto_alg {
+ struct caam_alg_entry caam;
+ };
- static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
++static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
++ bool uses_dkp)
{
+ dma_addr_t dma_addr;
++ struct caam_drv_private *priv;
+
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
return PTR_ERR(ctx->jrdev);
}
++ priv = dev_get_drvdata(ctx->jrdev->parent);
++ if (priv->era >= 6 && uses_dkp)
++ ctx->dir = DMA_BIDIRECTIONAL;
++ else
++ ctx->dir = DMA_TO_DEVICE;
++
+ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
+ offsetof(struct caam_ctx,
+ sh_desc_enc_dma),
-+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+ dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
+ caam_jr_free(ctx->jrdev);
return 0;
}
-@@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_
+@@ -4406,7 +3321,7 @@ static int caam_cra_init(struct crypto_t
+ container_of(alg, struct caam_crypto_alg, crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
+- return caam_init_common(ctx, &caam_alg->caam);
++ return caam_init_common(ctx, &caam_alg->caam, false);
+ }
+
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -4416,30 +3331,15 @@ static int caam_aead_init(struct crypto_
+ container_of(alg, struct caam_aead_alg, aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
+- return caam_init_common(ctx, &caam_alg->caam);
++ return caam_init_common(ctx, &caam_alg->caam,
++ alg->setkey == aead_setkey);
+ }
static void caam_exit_common(struct caam_ctx *ctx)
{
-
+ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
+ offsetof(struct caam_ctx, sh_desc_enc_dma),
-+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
-@@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_
+@@ -4515,7 +3415,6 @@ static struct caam_crypto_alg *caam_alg_
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
}
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
-@@ -0,0 +1,1913 @@
+@@ -0,0 +1,1961 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
-+ unsigned int icvsize)
++ unsigned int icvsize, int era)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+ KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6) {
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ } else {
++ append_proto_dkp(desc, adata);
++ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* assoclen + cryptlen = seqinlen */
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
-+ unsigned int icvsize)
++ unsigned int icvsize, int era)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+ adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6) {
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ } else {
++ append_proto_dkp(desc, adata);
++ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+static void init_sh_desc_key_aead(u32 * const desc,
+ struct alginfo * const cdata,
+ struct alginfo * const adata,
-+ const bool is_rfc3686, u32 *nonce)
++ const bool is_rfc3686, u32 *nonce, int era)
+{
+ u32 *key_jump_cmd;
+ unsigned int enckeylen = cdata->keylen;
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+ adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6) {
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ } else {
++ append_proto_dkp(desc, adata);
++ }
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, enckeylen,
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
-+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
++ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
++ int era)
+{
+ /* Note: Context registers are saved. */
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ }
+
+ /* Read and write assoclen bytes */
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (is_qi || era < 3) {
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ } else {
++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ }
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
-+ const u32 ctx1_iv_off, const bool is_qi)
++ const u32 ctx1_iv_off, const bool is_qi, int era)
+{
+ /* Note: Context registers are saved. */
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ }
+
+ /* Read and write assoclen bytes */
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ if (geniv)
-+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
-+ else
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (is_qi || era < 3) {
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (geniv)
++ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
++ ivsize);
++ else
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
++ CAAM_CMD_SZ);
++ } else {
++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ if (geniv)
++ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
++ ivsize);
++ else
++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
++ CAAM_CMD_SZ);
++ }
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
-+ const bool is_qi)
++ const bool is_qi, int era)
+{
+ u32 geniv, moveiv;
+
+ /* Note: Context registers are saved. */
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (is_qi || era < 3) {
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ } else {
++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ }
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
+ * with OP_ALG_AAI_CBC
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
-+ * OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
++ * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @assoclen: associated data length
+ * @ivsize: initialization vector size
+ * @authsize: authentication data size
+ * @blocksize: block cipher size
++ * @era: SEC Era
+ */
+void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize)
++ unsigned int blocksize, int era)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd;
+ u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+ KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6) {
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ } else {
++ append_proto_dkp(desc, adata);
++ }
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
+ * with OP_ALG_AAI_CBC
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
-+ * OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
++ * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @assoclen: associated data length
+ * @ivsize: initialization vector size
+ * @authsize: authentication data size
+ * @blocksize: block cipher size
++ * @era: SEC Era
+ */
+void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize)
++ unsigned int blocksize, int era)
+{
+ u32 stidx, jumpback;
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6)
++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ else
++ append_proto_dkp(desc, adata);
+
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+ /* VSOL = payloadlen + icvlen + padlen */
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
+
-+#ifdef __LITTLE_ENDIAN
-+ append_moveb(desc, MOVE_WAITCOMP |
-+ MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
-+#endif
++ if (caam_little_end)
++ append_moveb(desc, MOVE_WAITCOMP |
++ MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
++
+ /* update Len field */
+ append_math_sub(desc, REG0, REG0, REG2, 8);
+
+ * SEQ OUT PTR command, Output Pointer (2 words) and
+ * Output Length into math registers.
+ */
-+#ifdef __LITTLE_ENDIAN
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
-+ 20);
-+#else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+ 20);
-+#endif
++ if (caam_little_end)
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 |
++ (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
++ else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 |
++ (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
++
+ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
+ append_math_and_imm_u32(desc, REG0, REG0, IMM,
+ ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
+ (4 << LDST_OFFSET_SHIFT));
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+ /* Move the updated fields back to the Job Descriptor */
-+#ifdef __LITTLE_ENDIAN
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
-+ 24);
-+#else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+ 24);
-+#endif
++ if (caam_little_end)
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF |
++ (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
++ else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF |
++ (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
++
+ /*
+ * Read the new SEQ IN PTR command, Input Pointer, Input Length
+ * and then jump back to the next command from the
+ * Move the SEQ OUT PTR command, Output Pointer (1 word) and
+ * Output Length into math registers.
+ */
-+#ifdef __LITTLE_ENDIAN
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+ 12);
-+#else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+ MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
-+ 12);
-+#endif
++ if (caam_little_end)
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 |
++ (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
++ else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++ MOVE_DEST_MATH0 |
++ (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
++
+ /* Transform SEQ OUT PTR command in SEQ IN PTR command */
+ append_math_and_imm_u64(desc, REG0, REG0, IMM,
+ ~(((u64)(CMD_SEQ_IN_PTR ^
+ (4 << LDST_OFFSET_SHIFT));
+ append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
+ /* Move the updated fields back to the Job Descriptor */
-+#ifdef __LITTLE_ENDIAN
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+ 16);
-+#else
-+ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+ MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
-+ 16);
-+#endif
++ if (caam_little_end)
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF |
++ (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
++ else
++ append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++ MOVE_DEST_DESCBUF |
++ (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
++
+ /*
+ * Read the new SEQ IN PTR command, Input Pointer, Input Length
+ * and then jump back to the next command from the
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
-+ u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
-+ u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
-+ u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
-+ unsigned int icvsize);
++ unsigned int icvsize, int era);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
-+ unsigned int icvsize);
++ unsigned int icvsize, int era);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
-+ const bool is_qi);
++ const bool is_qi, int era);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
-+ const u32 ctx1_iv_off, const bool is_qi);
++ const u32 ctx1_iv_off, const bool is_qi, int era);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
-+ const bool is_qi);
++ const bool is_qi, int era);
+
+void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize);
++ unsigned int blocksize, int era);
+
+void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize);
++ unsigned int blocksize, int era);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+#endif /* _CAAMALG_DESC_H_ */
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi.c
-@@ -0,0 +1,2877 @@
+@@ -0,0 +1,3321 @@
+/*
+ * Freescale FSL CAAM support for crypto API over QI backend.
+ * Based on caamalg.c
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
++ enum dma_data_direction dir;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, true);
++ ctx1_iv_off, true, ctrlpriv->era);
+
+skip_enc:
+ /* aead_decrypt shared descriptor */
+
+ cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
-+ is_rfc3686, nonce, ctx1_iv_off, true);
++ is_rfc3686, nonce, ctx1_iv_off, true,
++ ctrlpriv->era);
+
+ if (!alg->caam.geniv)
+ goto skip_givenc;
+
+ cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, true);
++ ctx1_iv_off, true, ctrlpriv->era);
+
+skip_givenc:
+ return 0;
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+ struct crypto_authenc_keys keys;
+ int ret = 0;
+
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
++ /*
++ * If DKP is supported, use it in the shared descriptor to generate
++ * the split key.
++ */
++ if (ctrlpriv->era >= 6) {
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++
++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
++
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++ keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ ctx->adata.keylen_pad +
++ keys.enckeylen, ctx->dir);
++ goto skip_split_key;
++ }
++
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
++skip_split_key:
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = aead_set_sh_desc(aead);
+ unsigned int assoclen = 13; /* always 13 bytes for TLS */
+ unsigned int data_len[2];
+ u32 inl_mask;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize);
++ assoclen, ivsize, ctx->authsize, blocksize,
++ ctrlpriv->era);
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ * Keys do not fit inline, regardless of algorithms used
+ */
++ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize);
++ assoclen, ivsize, ctx->authsize, blocksize,
++ ctrlpriv->era);
+
+ return 0;
+}
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct device *jrdev = ctx->jrdev;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+ struct crypto_authenc_keys keys;
+ int ret = 0;
+
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
++ /*
++ * If DKP is supported, use it in the shared descriptor to generate
++ * the split key.
++ */
++ if (ctrlpriv->era >= 6) {
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++
++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
++
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++ keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ ctx->adata.keylen_pad +
++ keys.enckeylen, ctx->dir);
++ goto skip_split_key;
++ }
++
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
++ keys.enckeylen, ctx->dir);
+
+#ifdef DEBUG
+ dev_err(jrdev, "split keylen %d split keylen padded %d\n",
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
++skip_split_key:
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = tls_set_sh_desc(tls);
+ return -EINVAL;
+}
+
-+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-+ const u8 *key, unsigned int keylen)
++static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
-+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
-+ const char *alg_name = crypto_tfm_alg_name(tfm);
-+ struct device *jrdev = ctx->jrdev;
-+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-+ u32 ctx1_iv_off = 0;
-+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_CTR_MOD128);
-+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
-+ int ret = 0;
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
+
-+ memcpy(ctx->key, key, keylen);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
+ /*
-+ * AES-CTR needs to load IV in CONTEXT1 reg
-+ * at an offset of 128bits (16bytes)
-+ * CONTEXT1[255:128] = IV
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
+ */
-+ if (ctr_mode)
-+ ctx1_iv_off = 16;
++ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ ctx->authsize, true);
+
+ /*
-+ * RFC3686 specific:
-+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
-+ * | *key = {KEY, NONCE}
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
+ */
-+ if (is_rfc3686) {
-+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
-+ keylen -= CTR_RFC3686_NONCE_SIZE;
++ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ ctx->cdata.key_virt = ctx->key;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
-+ ctx->cdata.key_inline = true;
++ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ ctx->authsize, true);
+
-+ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
-+ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-+ is_rfc3686, ctx1_iv_off);
-+ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-+ is_rfc3686, ctx1_iv_off);
-+ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
-+ ivsize, is_rfc3686, ctx1_iv_off);
++ return 0;
++}
+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
++static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ gcm_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int gcm_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ int ret;
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ memcpy(ctx->key, key, keylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
++ ctx->cdata.keylen = keylen;
++
++ ret = gcm_set_sh_desc(aead);
++ if (ret)
++ return ret;
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
++ return ret;
+ }
+ }
+
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
++ return ret;
+ }
+ }
+
-+ if (ctx->drv_ctx[GIVENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
-+ ctx->sh_desc_givenc);
-+ if (ret) {
-+ dev_err(jrdev, "driver givenc context update failed\n");
-+ goto badkey;
-+ }
++ return 0;
++}
++
++static int rfc4106_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ ctx->cdata.key_virt = ctx->key;
++
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
-+ return ret;
-+badkey:
-+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
++ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ ctx->authsize, true);
++
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
++
++ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ ctx->authsize, true);
++
++ return 0;
+}
+
-+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-+ const u8 *key, unsigned int keylen)
++static int rfc4106_setauthsize(struct crypto_aead *authenc,
++ unsigned int authsize)
+{
-+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++ ctx->authsize = authsize;
++ rfc4106_set_sh_desc(authenc);
++
++ return 0;
++}
++
++static int rfc4106_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
-+ int ret = 0;
++ int ret;
+
-+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
-+ crypto_ablkcipher_set_flags(ablkcipher,
-+ CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ dev_err(jrdev, "key size mismatch\n");
++ if (keylen < 4)
+ return -EINVAL;
-+ }
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
+
+ memcpy(ctx->key, key, keylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
-+ ctx->cdata.key_inline = true;
++ /*
++ * The last four bytes of the key material are used as the salt value
++ * in the nonce. Update the AES key length.
++ */
++ ctx->cdata.keylen = keylen - 4;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
++ ctx->dir);
+
-+ /* xts ablkcipher encrypt, decrypt shared descriptors */
-+ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
-+ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++ ret = rfc4106_set_sh_desc(aead);
++ if (ret)
++ return ret;
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
++ return ret;
+ }
+ }
+
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
++ return ret;
+ }
+ }
+
-+ return ret;
-+badkey:
-+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return 0;
+}
+
-+/*
-+ * aead_edesc - s/w-extended aead descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @assoclen: associated data length, in CAAM endianness
-+ * @assoclen_dma: bus physical mapped address of req->assoclen
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table
-+ */
-+struct aead_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ unsigned int assoclen;
-+ dma_addr_t assoclen_dma;
-+ struct caam_drv_req drv_req;
-+#define CAAM_QI_MAX_AEAD_SG \
-+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
-+ sizeof(struct qm_sg_entry))
-+ struct qm_sg_entry sgt[0];
-+};
++static int rfc4543_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++ ctx->cdata.keylen;
+
-+/*
-+ * tls_edesc - s/w-extended tls descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table
-+ */
-+struct tls_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct scatterlist tmp[2];
-+ struct scatterlist *dst;
-+ struct caam_drv_req drv_req;
-+ struct qm_sg_entry sgt[0];
-+};
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
+
-+/*
-+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table
-+ */
-+struct ablkcipher_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct caam_drv_req drv_req;
-+#define CAAM_QI_MAX_ABLKCIPHER_SG \
-+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
-+ sizeof(struct qm_sg_entry))
-+ struct qm_sg_entry sgt[0];
-+};
++ ctx->cdata.key_virt = ctx->key;
+
-+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
-+ enum optype type)
-+{
+ /*
-+ * This function is called on the fast path with values of 'type'
-+ * known at compile time. Invalid arguments are not expected and
-+ * thus no checks are made.
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
+ */
-+ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
-+ u32 *desc;
++ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
+
-+ if (unlikely(!drv_ctx)) {
-+ spin_lock(&ctx->lock);
++ cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ ctx->authsize, true);
+
-+ /* Read again to check if some other core init drv_ctx */
-+ drv_ctx = ctx->drv_ctx[type];
-+ if (!drv_ctx) {
-+ int cpu;
++ /*
++ * Job Descriptor and Shared Descriptor
++ * must fit into the 64-word Descriptor h/w Buffer
++ */
++ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
++ ctx->cdata.key_inline = true;
++ } else {
++ ctx->cdata.key_inline = false;
++ ctx->cdata.key_dma = ctx->key_dma;
++ }
+
-+ if (type == ENCRYPT)
-+ desc = ctx->sh_desc_enc;
-+ else if (type == DECRYPT)
-+ desc = ctx->sh_desc_dec;
-+ else /* (type == GIVENCRYPT) */
-+ desc = ctx->sh_desc_givenc;
++ cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ ctx->authsize, true);
+
-+ cpu = smp_processor_id();
-+ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
-+ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
-+ drv_ctx->op_type = type;
-+
-+ ctx->drv_ctx[type] = drv_ctx;
-+ }
-+
-+ spin_unlock(&ctx->lock);
-+ }
-+
-+ return drv_ctx;
++ return 0;
+}
+
-+static void caam_unmap(struct device *dev, struct scatterlist *src,
-+ struct scatterlist *dst, int src_nents,
-+ int dst_nents, dma_addr_t iv_dma, int ivsize,
-+ enum optype op_type, dma_addr_t qm_sg_dma,
-+ int qm_sg_bytes)
++static int rfc4543_setauthsize(struct crypto_aead *authenc,
++ unsigned int authsize)
+{
-+ if (dst != src) {
-+ if (src_nents)
-+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
-+ } else {
-+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
-+ }
-+
-+ if (iv_dma)
-+ dma_unmap_single(dev, iv_dma, ivsize,
-+ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
-+ DMA_TO_DEVICE);
-+ if (qm_sg_bytes)
-+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
-+}
++ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
-+static void aead_unmap(struct device *dev,
-+ struct aead_edesc *edesc,
-+ struct aead_request *req)
-+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ int ivsize = crypto_aead_ivsize(aead);
++ ctx->authsize = authsize;
++ rfc4543_set_sh_desc(authenc);
+
-+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++ return 0;
+}
+
-+static void tls_unmap(struct device *dev,
-+ struct tls_edesc *edesc,
-+ struct aead_request *req)
++static int rfc4543_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ int ivsize = crypto_aead_ivsize(aead);
-+
-+ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
-+ edesc->dst_nents, edesc->iv_dma, ivsize,
-+ edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
-+ edesc->qm_sg_bytes);
-+}
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ int ret;
+
-+static void ablkcipher_unmap(struct device *dev,
-+ struct ablkcipher_edesc *edesc,
-+ struct ablkcipher_request *req)
-+{
-+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ if (keylen < 4)
++ return -EINVAL;
+
-+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+}
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
+
-+static void aead_done(struct caam_drv_req *drv_req, u32 status)
-+{
-+ struct device *qidev;
-+ struct aead_edesc *edesc;
-+ struct aead_request *aead_req = drv_req->app_ctx;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
-+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
-+ int ecode = 0;
++ memcpy(ctx->key, key, keylen);
++ /*
++ * The last four bytes of the key material are used as the salt value
++ * in the nonce. Update the AES key length.
++ */
++ ctx->cdata.keylen = keylen - 4;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
++ ctx->dir);
+
-+ qidev = caam_ctx->qidev;
++ ret = rfc4543_set_sh_desc(aead);
++ if (ret)
++ return ret;
+
-+ if (unlikely(status)) {
-+ caam_jr_strstatus(qidev, status);
-+ ecode = -EIO;
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ return ret;
++ }
+ }
+
-+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+ aead_unmap(qidev, edesc, aead_req);
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ return ret;
++ }
++ }
+
-+ aead_request_complete(aead_req, ecode);
-+ qi_cache_free(edesc);
++ return 0;
+}
+
-+/*
-+ * allocate and map the aead extended descriptor
-+ */
-+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
-+ bool encrypt)
++static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
-+ typeof(*alg), aead);
-+ struct device *qidev = ctx->qidev;
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct aead_edesc *edesc;
-+ dma_addr_t qm_sg_dma, iv_dma = 0;
-+ int ivsize = 0;
-+ unsigned int authsize = ctx->authsize;
-+ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
-+ int in_len, out_len;
-+ struct qm_sg_entry *sg_table, *fd_sgt;
-+ struct caam_drv_ctx *drv_ctx;
-+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
++ const char *alg_name = crypto_tfm_alg_name(tfm);
++ struct device *jrdev = ctx->jrdev;
++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ u32 ctx1_iv_off = 0;
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_CTR_MOD128);
++ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
++ int ret = 0;
+
-+ drv_ctx = get_drv_ctx(ctx, op_type);
-+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+ return (struct aead_edesc *)drv_ctx;
++ memcpy(ctx->key, key, keylen);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++ /*
++ * AES-CTR needs to load IV in CONTEXT1 reg
++ * at an offset of 128bits (16bytes)
++ * CONTEXT1[255:128] = IV
++ */
++ if (ctr_mode)
++ ctx1_iv_off = 16;
+
-+ /* allocate space for base edesc and hw desc commands, link tables */
-+ edesc = qi_cache_alloc(GFP_DMA | flags);
-+ if (unlikely(!edesc)) {
-+ dev_err(qidev, "could not allocate extended descriptor\n");
-+ return ERR_PTR(-ENOMEM);
++ /*
++ * RFC3686 specific:
++ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
++ * | *key = {KEY, NONCE}
++ */
++ if (is_rfc3686) {
++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
-+ if (likely(req->src == req->dst)) {
-+ src_nents = sg_nents_for_len(req->src, req->assoclen +
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-+ req->assoclen + req->cryptlen +
-+ (encrypt ? authsize : 0));
-+ qi_cache_free(edesc);
-+ return ERR_PTR(src_nents);
-+ }
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
+
-+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(qidev, "unable to map source\n");
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
++ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
++ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
++ ivsize, is_rfc3686, ctx1_iv_off);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
+ }
-+ } else {
-+ src_nents = sg_nents_for_len(req->src, req->assoclen +
-+ req->cryptlen);
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-+ req->assoclen + req->cryptlen);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(src_nents);
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
+ }
++ }
+
-+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-+ req->cryptlen +
-+ (encrypt ? authsize :
-+ (-authsize)));
-+ if (unlikely(dst_nents < 0)) {
-+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->assoclen + req->cryptlen +
-+ (encrypt ? authsize : (-authsize)));
-+ qi_cache_free(edesc);
-+ return ERR_PTR(dst_nents);
++ if (ctx->drv_ctx[GIVENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
++ ctx->sh_desc_givenc);
++ if (ret) {
++ dev_err(jrdev, "driver givenc context update failed\n");
++ goto badkey;
+ }
++ }
+
-+ if (src_nents) {
-+ mapped_src_nents = dma_map_sg(qidev, req->src,
-+ src_nents, DMA_TO_DEVICE);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(qidev, "unable to map source\n");
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ } else {
-+ mapped_src_nents = 0;
-+ }
++ return ret;
++badkey:
++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
+
-+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
-+ DMA_FROM_DEVICE);
-+ if (unlikely(!mapped_dst_nents)) {
-+ dev_err(qidev, "unable to map destination\n");
-+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
++static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *jrdev = ctx->jrdev;
++ int ret = 0;
++
++ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
++ crypto_ablkcipher_set_flags(ablkcipher,
++ CRYPTO_TFM_RES_BAD_KEY_LEN);
++ dev_err(jrdev, "key size mismatch\n");
++ return -EINVAL;
+ }
+
-+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
-+ ivsize = crypto_aead_ivsize(aead);
-+ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents,
-+ dst_nents, 0, 0, op_type, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
++ memcpy(ctx->key, key, keylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* xts ablkcipher encrypt, decrypt shared descriptors */
++ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
++ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
+ }
+ }
+
-+ /*
-+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
-+ * Input is not contiguous.
-+ */
-+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
-+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
-+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
-+ qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
+ }
-+ sg_table = &edesc->sgt[0];
-+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
-+ edesc->src_nents = src_nents;
-+ edesc->dst_nents = dst_nents;
-+ edesc->iv_dma = iv_dma;
-+ edesc->drv_req.app_ctx = req;
-+ edesc->drv_req.cbk = aead_done;
-+ edesc->drv_req.drv_ctx = drv_ctx;
++ return ret;
++badkey:
++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return 0;
++}
+
-+ edesc->assoclen = cpu_to_caam32(req->assoclen);
-+ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
-+ dev_err(qidev, "unable to map assoclen\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
++/*
++ * aead_edesc - s/w-extended aead descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen: associated data length, in CAAM endianness
++ * @assoclen_dma: bus physical mapped address of req->assoclen
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct aead_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ unsigned int assoclen;
++ dma_addr_t assoclen_dma;
++ struct caam_drv_req drv_req;
++#define CAAM_QI_MAX_AEAD_SG \
++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
++ sizeof(struct qm_sg_entry))
++ struct qm_sg_entry sgt[0];
++};
+
-+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
-+ qm_sg_index++;
-+ if (ivsize) {
-+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
-+ qm_sg_index++;
-+ }
-+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
-+ qm_sg_index += mapped_src_nents;
++/*
++ * tls_edesc - s/w-extended tls descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct tls_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ struct scatterlist tmp[2];
++ struct scatterlist *dst;
++ struct caam_drv_req drv_req;
++ struct qm_sg_entry sgt[0];
++};
+
-+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-+ qm_sg_index, 0);
++/*
++ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct ablkcipher_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ struct caam_drv_req drv_req;
++#define CAAM_QI_MAX_ABLKCIPHER_SG \
++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
++ sizeof(struct qm_sg_entry))
++ struct qm_sg_entry sgt[0];
++};
+
-+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, qm_sg_dma)) {
-+ dev_err(qidev, "unable to map S/G table\n");
-+ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
++static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
++ enum optype type)
++{
++ /*
++ * This function is called on the fast path with values of 'type'
++ * known at compile time. Invalid arguments are not expected and
++ * thus no checks are made.
++ */
++ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
++ u32 *desc;
+
-+ edesc->qm_sg_dma = qm_sg_dma;
-+ edesc->qm_sg_bytes = qm_sg_bytes;
++ if (unlikely(!drv_ctx)) {
++ spin_lock(&ctx->lock);
+
-+ out_len = req->assoclen + req->cryptlen +
-+ (encrypt ? ctx->authsize : (-ctx->authsize));
-+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
++ /* Read again to check if some other core init drv_ctx */
++ drv_ctx = ctx->drv_ctx[type];
++ if (!drv_ctx) {
++ int cpu;
+
-+ fd_sgt = &edesc->drv_req.fd_sgt[0];
-+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
++ if (type == ENCRYPT)
++ desc = ctx->sh_desc_enc;
++ else if (type == DECRYPT)
++ desc = ctx->sh_desc_dec;
++ else /* (type == GIVENCRYPT) */
++ desc = ctx->sh_desc_givenc;
+
-+ if (req->dst == req->src) {
-+ if (mapped_src_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-+ out_len, 0);
-+ else
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (1 + !!ivsize) * sizeof(*sg_table),
-+ out_len, 0);
-+ } else if (mapped_dst_nents == 1) {
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
-+ 0);
-+ } else {
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
-+ qm_sg_index, out_len, 0);
++ cpu = smp_processor_id();
++ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
++ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
++ drv_ctx->op_type = type;
++
++ ctx->drv_ctx[type] = drv_ctx;
++ }
++
++ spin_unlock(&ctx->lock);
+ }
+
-+ return edesc;
++ return drv_ctx;
+}
+
-+static inline int aead_crypt(struct aead_request *req, bool encrypt)
++static void caam_unmap(struct device *dev, struct scatterlist *src,
++ struct scatterlist *dst, int src_nents,
++ int dst_nents, dma_addr_t iv_dma, int ivsize,
++ enum optype op_type, dma_addr_t qm_sg_dma,
++ int qm_sg_bytes)
+{
-+ struct aead_edesc *edesc;
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ int ret;
-+
-+ if (unlikely(caam_congested))
-+ return -EAGAIN;
-+
-+ /* allocate extended descriptor */
-+ edesc = aead_edesc_alloc(req, encrypt);
-+ if (IS_ERR_OR_NULL(edesc))
-+ return PTR_ERR(edesc);
-+
-+ /* Create and submit job descriptor */
-+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
-+ if (!ret) {
-+ ret = -EINPROGRESS;
++ if (dst != src) {
++ if (src_nents)
++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
-+ aead_unmap(ctx->qidev, edesc, req);
-+ qi_cache_free(edesc);
++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
-+ return ret;
++ if (iv_dma)
++ dma_unmap_single(dev, iv_dma, ivsize,
++ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
++ DMA_TO_DEVICE);
++ if (qm_sg_bytes)
++ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
-+static int aead_encrypt(struct aead_request *req)
++static void aead_unmap(struct device *dev,
++ struct aead_edesc *edesc,
++ struct aead_request *req)
+{
-+ return aead_crypt(req, true);
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ int ivsize = crypto_aead_ivsize(aead);
++
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
-+static int aead_decrypt(struct aead_request *req)
++static void tls_unmap(struct device *dev,
++ struct tls_edesc *edesc,
++ struct aead_request *req)
+{
-+ return aead_crypt(req, false);
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ int ivsize = crypto_aead_ivsize(aead);
++
++ caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
++ edesc->dst_nents, edesc->iv_dma, ivsize,
++ edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
++ edesc->qm_sg_bytes);
+}
+
-+static void tls_done(struct caam_drv_req *drv_req, u32 status)
++static void ablkcipher_unmap(struct device *dev,
++ struct ablkcipher_edesc *edesc,
++ struct ablkcipher_request *req)
++{
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++}
++
++static void aead_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
-+ struct tls_edesc *edesc;
++ struct aead_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
++ u32 ssrc = status & JRSTA_SSRC_MASK;
++ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++
+ caam_jr_strstatus(qidev, status);
-+ ecode = -EIO;
++ /*
++ * verify hw auth check passed else return -EBADMSG
++ */
++ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
++ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
++ ecode = -EBADMSG;
++ else
++ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+ tls_unmap(qidev, edesc, aead_req);
++ aead_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+}
+
+/*
-+ * allocate and map the tls extended descriptor
++ * allocate and map the aead extended descriptor
+ */
-+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
++static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
++ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int blocksize = crypto_aead_blocksize(aead);
-+ unsigned int padsize, authsize;
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
-+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct tls_edesc *edesc;
++ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
-+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
++ unsigned int authsize = ctx->authsize;
++ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
-+ struct scatterlist *dst;
-+
-+ if (encrypt) {
-+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
-+ blocksize);
-+ authsize = ctx->authsize + padsize;
-+ } else {
-+ authsize = ctx->authsize;
-+ }
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+ return (struct tls_edesc *)drv_ctx;
++ return (struct aead_edesc *)drv_ctx;
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
-+ (encrypt ? authsize : 0));
++ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
-+ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
+
-+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
-+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
-+ (encrypt ? authsize : 0));
++ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize :
++ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+ mapped_src_nents = 0;
+ }
+
-+ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ }
+ }
+
-+ ivsize = crypto_aead_ivsize(aead);
-+ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
-+ op_type, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
++ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
++ ivsize = crypto_aead_ivsize(aead);
++ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents,
++ dst_nents, 0, 0, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
+ }
+
+ /*
-+ * Create S/G table: IV, src, dst.
++ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ */
-+ qm_sg_ents = 1 + mapped_src_nents +
++ qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
++ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
++ qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
-+ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
-+ edesc->drv_req.cbk = tls_done;
++ edesc->drv_req.cbk = aead_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ qm_sg_index = 1;
++ edesc->assoclen = cpu_to_caam32(req->assoclen);
++ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
++ dev_err(qidev, "unable to map assoclen\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
+
++ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
++ qm_sg_index++;
++ if (ivsize) {
++ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
++ qm_sg_index++;
++ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
-+ ivsize, op_type, 0, 0);
++ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
-+ out_len = req->cryptlen + (encrypt ? authsize : 0);
-+ in_len = ivsize + req->assoclen + req->cryptlen;
++ out_len = req->assoclen + req->cryptlen +
++ (encrypt ? ctx->authsize : (-ctx->authsize));
++ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
-+
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
-+ if (req->dst == req->src)
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (sg_nents_for_len(req->src, req->assoclen) +
-+ 1) * sizeof(*sg_table), out_len, 0);
-+ else if (mapped_dst_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
-+ else
++ if (req->dst == req->src) {
++ if (mapped_src_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
++ out_len, 0);
++ else
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (1 + !!ivsize) * sizeof(*sg_table),
++ out_len, 0);
++ } else if (mapped_dst_nents == 1) {
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
++ 0);
++ } else {
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
++ }
+
+ return edesc;
+}
+
-+static int tls_crypt(struct aead_request *req, bool encrypt)
++static inline int aead_crypt(struct aead_request *req, bool encrypt)
+{
-+ struct tls_edesc *edesc;
++ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
-+ edesc = tls_edesc_alloc(req, encrypt);
++ /* allocate extended descriptor */
++ edesc = aead_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
++ /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
-+ tls_unmap(ctx->qidev, edesc, req);
++ aead_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
-+static int tls_encrypt(struct aead_request *req)
++static int aead_encrypt(struct aead_request *req)
+{
-+ return tls_crypt(req, true);
++ return aead_crypt(req, true);
+}
+
-+static int tls_decrypt(struct aead_request *req)
++static int aead_decrypt(struct aead_request *req)
+{
-+ return tls_crypt(req, false);
++ return aead_crypt(req, false);
+}
+
-+static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
++static int ipsec_gcm_encrypt(struct aead_request *req)
+{
-+ struct ablkcipher_edesc *edesc;
-+ struct ablkcipher_request *req = drv_req->app_ctx;
-+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+ struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
-+ struct device *qidev = caam_ctx->qidev;
-+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ if (req->assoclen < 8)
++ return -EINVAL;
+
-+#ifdef DEBUG
-+ dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
-+#endif
++ return aead_crypt(req, true);
++}
+
-+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
++static int ipsec_gcm_decrypt(struct aead_request *req)
++{
++ if (req->assoclen < 8)
++ return -EINVAL;
+
-+ if (status)
-+ caam_jr_strstatus(qidev, status);
++ return aead_crypt(req, false);
++}
+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
-+ edesc->src_nents > 1 ? 100 : ivsize, 1);
-+ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
-+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
-+#endif
++static void tls_done(struct caam_drv_req *drv_req, u32 status)
++{
++ struct device *qidev;
++ struct tls_edesc *edesc;
++ struct aead_request *aead_req = drv_req->app_ctx;
++ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
++ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
++ int ecode = 0;
+
-+ ablkcipher_unmap(qidev, edesc, req);
-+ qi_cache_free(edesc);
++ qidev = caam_ctx->qidev;
+
-+ /*
-+ * The crypto API expects us to set the IV (req->info) to the last
-+ * ciphertext block. This is used e.g. by the CTS mode.
-+ */
-+ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
-+ ivsize, 0);
++ if (unlikely(status)) {
++ caam_jr_strstatus(qidev, status);
++ ecode = -EIO;
++ }
+
-+ ablkcipher_request_complete(req, status);
++ edesc = container_of(drv_req, typeof(*edesc), drv_req);
++ tls_unmap(qidev, edesc, aead_req);
++
++ aead_request_complete(aead_req, ecode);
++ qi_cache_free(edesc);
+}
+
-+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
-+ *req, bool encrypt)
++/*
++ * allocate and map the tls extended descriptor
++ */
++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
+{
-+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int blocksize = crypto_aead_blocksize(aead);
++ unsigned int padsize, authsize;
++ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
++ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
++ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
++ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct ablkcipher_edesc *edesc;
-+ dma_addr_t iv_dma;
-+ bool in_contig;
-+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-+ int dst_sg_idx, qm_sg_ents;
++ struct tls_edesc *edesc;
++ dma_addr_t qm_sg_dma, iv_dma = 0;
++ int ivsize = 0;
++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
++ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++ struct scatterlist *dst;
++
++ if (encrypt) {
++ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
++ blocksize);
++ authsize = ctx->authsize + padsize;
++ } else {
++ authsize = ctx->authsize;
++ }
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+ return (struct ablkcipher_edesc *)drv_ctx;
++ return (struct tls_edesc *)drv_ctx;
+
-+ src_nents = sg_nents_for_len(req->src, req->nbytes);
-+ if (unlikely(src_nents < 0)) {
-+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-+ req->nbytes);
-+ return ERR_PTR(src_nents);
++ /* allocate space for base edesc and hw desc commands, link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (unlikely(!edesc)) {
++ dev_err(qidev, "could not allocate extended descriptor\n");
++ return ERR_PTR(-ENOMEM);
+ }
+
-+ if (unlikely(req->src != req->dst)) {
-+ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
-+ if (unlikely(dst_nents < 0)) {
-+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->nbytes);
-+ return ERR_PTR(dst_nents);
++ if (likely(req->src == req->dst)) {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : 0));
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
-+ DMA_TO_DEVICE);
++ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(qidev, "unable to map source\n");
++ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
++ dst = req->dst;
++ } else {
++ src_nents = sg_nents_for_len(req->src, req->assoclen +
++ req->cryptlen);
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->assoclen + req->cryptlen);
++ qi_cache_free(edesc);
++ return ERR_PTR(src_nents);
++ }
+
-+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
++ dst_nents = sg_nents_for_len(dst, req->cryptlen +
++ (encrypt ? authsize : 0));
++ if (unlikely(dst_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
++ req->cryptlen +
++ (encrypt ? authsize : 0));
++ qi_cache_free(edesc);
++ return ERR_PTR(dst_nents);
++ }
++
++ if (src_nents) {
++ mapped_src_nents = dma_map_sg(qidev, req->src,
++ src_nents, DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = 0;
++ }
++
++ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ } else {
-+ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
-+ DMA_BIDIRECTIONAL);
-+ if (unlikely(!mapped_src_nents)) {
-+ dev_err(qidev, "unable to map source\n");
++ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
-+ iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
++ ivsize = crypto_aead_ivsize(aead);
++ iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, iv_dma)) {
+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-+ 0, 0, 0, 0);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+
-+ if (mapped_src_nents == 1 &&
-+ iv_dma + ivsize == sg_dma_address(req->src)) {
-+ in_contig = true;
-+ qm_sg_ents = 0;
-+ } else {
-+ in_contig = false;
-+ qm_sg_ents = 1 + mapped_src_nents;
-+ }
-+ dst_sg_idx = qm_sg_ents;
-+
-+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
-+ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
-+ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
-+ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
++ op_type, 0, 0);
++ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
-+ /* allocate space for base edesc and link tables */
-+ edesc = qi_cache_alloc(GFP_DMA | flags);
-+ if (unlikely(!edesc)) {
-+ dev_err(qidev, "could not allocate extended descriptor\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
-+ return ERR_PTR(-ENOMEM);
-+ }
++ /*
++ * Create S/G table: IV, src, dst.
++ * Input is not contiguous.
++ */
++ qm_sg_ents = 1 + mapped_src_nents +
++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ sg_table = &edesc->sgt[0];
++ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
++ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
-+ sg_table = &edesc->sgt[0];
-+ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ edesc->drv_req.app_ctx = req;
-+ edesc->drv_req.cbk = ablkcipher_done;
++ edesc->drv_req.cbk = tls_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
-+ if (!in_contig) {
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
-+ }
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ qm_sg_index = 1;
++
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
++ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-+ dst_sg_idx, 0);
++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
++ qm_sg_index, 0);
+
-+ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
++ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
++ ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
++ edesc->qm_sg_dma = qm_sg_dma;
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ out_len = req->cryptlen + (encrypt ? authsize : 0);
++ in_len = ivsize + req->assoclen + req->cryptlen;
++
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
+
-+ if (!in_contig)
-+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
-+ ivsize + req->nbytes, 0);
-+ else
-+ dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
-+ 0);
++ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
-+ if (req->src == req->dst) {
-+ if (!in_contig)
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
-+ sizeof(*sg_table), req->nbytes, 0);
-+ else
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-+ req->nbytes, 0);
-+ } else if (mapped_dst_nents > 1) {
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
-+ sizeof(*sg_table), req->nbytes, 0);
-+ } else {
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
-+ req->nbytes, 0);
-+ }
++ if (req->dst == req->src)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (sg_nents_for_len(req->src, req->assoclen) +
++ 1) * sizeof(*sg_table), out_len, 0);
++ else if (mapped_dst_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
++ else
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
++ qm_sg_index, out_len, 0);
+
+ return edesc;
+}
+
-+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
-+ struct skcipher_givcrypt_request *creq)
++static int tls_crypt(struct aead_request *req, bool encrypt)
+{
-+ struct ablkcipher_request *req = &creq->creq;
-+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-+ struct device *qidev = ctx->qidev;
-+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
-+ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
-+ struct ablkcipher_edesc *edesc;
++ struct tls_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ int ret;
++
++ if (unlikely(caam_congested))
++ return -EAGAIN;
++
++ edesc = tls_edesc_alloc(req, encrypt);
++ if (IS_ERR_OR_NULL(edesc))
++ return PTR_ERR(edesc);
++
++ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ tls_unmap(ctx->qidev, edesc, req);
++ qi_cache_free(edesc);
++ }
++
++ return ret;
++}
++
++static int tls_encrypt(struct aead_request *req)
++{
++ return tls_crypt(req, true);
++}
++
++static int tls_decrypt(struct aead_request *req)
++{
++ return tls_crypt(req, false);
++}
++
++static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
++{
++ struct ablkcipher_edesc *edesc;
++ struct ablkcipher_request *req = drv_req->app_ctx;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *qidev = caam_ctx->qidev;
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++
++#ifdef DEBUG
++ dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ edesc = container_of(drv_req, typeof(*edesc), drv_req);
++
++ if (status)
++ caam_jr_strstatus(qidev, status);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
++ edesc->src_nents > 1 ? 100 : ivsize, 1);
++ caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
++#endif
++
++ ablkcipher_unmap(qidev, edesc, req);
++ qi_cache_free(edesc);
++
++ /*
++ * The crypto API expects us to set the IV (req->info) to the last
++ * ciphertext block. This is used e.g. by the CTS mode.
++ */
++ scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
++ ivsize, 0);
++
++ ablkcipher_request_complete(req, status);
++}
++
++static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
++ *req, bool encrypt)
++{
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *qidev = ctx->qidev;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
++ struct ablkcipher_edesc *edesc;
++ dma_addr_t iv_dma;
++ bool in_contig;
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ int dst_sg_idx, qm_sg_ents;
++ struct qm_sg_entry *sg_table, *fd_sgt;
++ struct caam_drv_ctx *drv_ctx;
++ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++
++ drv_ctx = get_drv_ctx(ctx, op_type);
++ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
++ return (struct ablkcipher_edesc *)drv_ctx;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (unlikely(src_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++ req->nbytes);
++ return ERR_PTR(src_nents);
++ }
++
++ if (unlikely(req->src != req->dst)) {
++ dst_nents = sg_nents_for_len(req->dst, req->nbytes);
++ if (unlikely(dst_nents < 0)) {
++ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
++ req->nbytes);
++ return ERR_PTR(dst_nents);
++ }
++
++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ DMA_FROM_DEVICE);
++ if (unlikely(!mapped_dst_nents)) {
++ dev_err(qidev, "unable to map destination\n");
++ dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
++ return ERR_PTR(-ENOMEM);
++ }
++ } else {
++ mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++ DMA_BIDIRECTIONAL);
++ if (unlikely(!mapped_src_nents)) {
++ dev_err(qidev, "unable to map source\n");
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
++ iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ if (mapped_src_nents == 1 &&
++ iv_dma + ivsize == sg_dma_address(req->src)) {
++ in_contig = true;
++ qm_sg_ents = 0;
++ } else {
++ in_contig = false;
++ qm_sg_ents = 1 + mapped_src_nents;
++ }
++ dst_sg_idx = qm_sg_ents;
++
++ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
++ dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
++ qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_alloc(GFP_DMA | flags);
++ if (unlikely(!edesc)) {
++ dev_err(qidev, "could not allocate extended descriptor\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ edesc->src_nents = src_nents;
++ edesc->dst_nents = dst_nents;
++ edesc->iv_dma = iv_dma;
++ sg_table = &edesc->sgt[0];
++ edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++ edesc->drv_req.app_ctx = req;
++ edesc->drv_req.cbk = ablkcipher_done;
++ edesc->drv_req.drv_ctx = drv_ctx;
++
++ if (!in_contig) {
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
++ }
++
++ if (mapped_dst_nents > 1)
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ dst_sg_idx, 0);
++
++ edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
++ dev_err(qidev, "unable to map S/G table\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ fd_sgt = &edesc->drv_req.fd_sgt[0];
++
++ if (!in_contig)
++ dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
++ ivsize + req->nbytes, 0);
++ else
++ dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
++ 0);
++
++ if (req->src == req->dst) {
++ if (!in_contig)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
++ sizeof(*sg_table), req->nbytes, 0);
++ else
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
++ req->nbytes, 0);
++ } else if (mapped_dst_nents > 1) {
++ dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
++ sizeof(*sg_table), req->nbytes, 0);
++ } else {
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
++ req->nbytes, 0);
++ }
++
++ return edesc;
++}
++
++static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
++ struct skcipher_givcrypt_request *creq)
++{
++ struct ablkcipher_request *req = &creq->creq;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *qidev = ctx->qidev;
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
++ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ bool out_contig;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+};
+
+static struct caam_aead_alg driver_aeads[] = {
-+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
-+ .cra_name = "authenc(hmac(md5),cbc(aes))",
-+ .cra_driver_name = "authenc-hmac-md5-"
-+ "cbc-aes-caam-qi",
-+ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_name = "rfc4106(gcm(aes))",
++ .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
++ .cra_blocksize = 1,
+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = MD5_DIGEST_SIZE,
++ .setkey = rfc4106_setkey,
++ .setauthsize = rfc4106_setauthsize,
++ .encrypt = ipsec_gcm_encrypt,
++ .decrypt = ipsec_gcm_decrypt,
++ .ivsize = 8,
++ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc4543(gcm(aes))",
++ .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
++ .cra_blocksize = 1,
++ },
++ .setkey = rfc4543_setkey,
++ .setauthsize = rfc4543_setauthsize,
++ .encrypt = ipsec_gcm_encrypt,
++ .decrypt = ipsec_gcm_decrypt,
++ .ivsize = 8,
++ .maxauthsize = AES_BLOCK_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ },
++ },
++ /* Galois Counter Mode */
++ {
++ .aead = {
++ .base = {
++ .cra_name = "gcm(aes)",
++ .cra_driver_name = "gcm-aes-caam-qi",
++ .cra_blocksize = 1,
++ },
++ .setkey = gcm_setkey,
++ .setauthsize = gcm_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = 12,
++ .maxauthsize = AES_BLOCK_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++ }
++ },
++ /* single-pass ipsec_esp descriptor */
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(md5),cbc(aes))",
++ .cra_driver_name = "authenc-hmac-md5-"
++ "cbc-aes-caam-qi",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = MD5_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ struct caam_alg_entry caam;
+};
+
-+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
++static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
++ bool uses_dkp)
+{
+ struct caam_drv_private *priv;
+ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
+ return PTR_ERR(ctx->jrdev);
+ }
+
++ priv = dev_get_drvdata(ctx->jrdev->parent);
++ if (priv->era >= 6 && uses_dkp)
++ ctx->dir = DMA_BIDIRECTIONAL;
++ else
++ ctx->dir = DMA_TO_DEVICE;
++
+ ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
-+ DMA_TO_DEVICE);
++ ctx->dir);
+ if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+ dev_err(ctx->jrdev, "unable to map key\n");
+ caam_jr_free(ctx->jrdev);
+ ctx->authsize = 0;
+ }
+
-+ priv = dev_get_drvdata(ctx->jrdev->parent);
+ ctx->qidev = priv->qidev;
+
+ spin_lock_init(&ctx->lock);
+ crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+
-+ return caam_init_common(ctx, &caam_alg->caam);
++ return caam_init_common(ctx, &caam_alg->caam, false);
+}
+
+static int caam_aead_init(struct crypto_aead *tfm)
+ aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+
-+ return caam_init_common(ctx, &caam_alg->caam);
++ return caam_init_common(ctx, &caam_alg->caam,
++ (alg->setkey == aead_setkey) ||
++ (alg->setkey == tls_setkey));
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+ caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
+ caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
+
-+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
-+ DMA_TO_DEVICE);
++ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
+
+ caam_jr_free(ctx->jrdev);
+}
+MODULE_AUTHOR("Freescale Semiconductor");
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
-@@ -0,0 +1,4428 @@
+@@ -0,0 +1,5938 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#include <linux/fsl/mc.h>
+#include "compat.h"
+#include "regs.h"
+#include "caamalg_qi2.h"
+#include "sg_sw_qm2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
-+#include "../../../drivers/staging/fsl-mc/include/mc.h"
++#include "caamhash_desc.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+
+ * caam_ctx - per-session context
+ * @flc: Flow Contexts array
+ * @key: virtual address of the key(s): [authentication key], encryption key
++ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @key_dma: I/O virtual address of the key
++ * @dir: DMA direction for mapping key and Flow Contexts
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+ * @cdata: encryption algorithm details
+struct caam_ctx {
+ struct caam_flc flc[NUM_OP];
+ u8 key[CAAM_MAX_KEY_SIZE];
++ dma_addr_t flc_dma[NUM_OP];
+ dma_addr_t key_dma;
++ enum dma_data_direction dir;
+ struct device *dev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ case CRYPTO_ALG_TYPE_AEAD:
+ return aead_request_ctx(container_of(areq, struct aead_request,
+ base));
++ case CRYPTO_ALG_TYPE_AHASH:
++ return ahash_request_ctx(ahash_request_cast(areq));
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
++ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ if (alg->caam.geniv)
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686,
-+ nonce, ctx1_iv_off, true);
++ nonce, ctx1_iv_off, true,
++ priv->sec_attr.era);
+ else
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, true);
++ ctx1_iv_off, true, priv->sec_attr.era);
+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
-+ is_rfc3686, nonce, ctx1_iv_off, true);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ is_rfc3686, nonce, ctx1_iv_off, true,
++ priv->sec_attr.era);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ return 0;
+}
+ complete(&res->completion);
+}
+
-+static int gen_split_key_sh(struct device *dev, u8 *key_out,
-+ struct alginfo * const adata, const u8 *key_in,
-+ u32 keylen)
-+{
-+ struct caam_request *req_ctx;
-+ u32 *desc;
-+ struct split_key_sh_result result;
-+ dma_addr_t dma_addr_in, dma_addr_out;
-+ struct caam_flc *flc;
-+ struct dpaa2_fl_entry *in_fle, *out_fle;
-+ int ret = -ENOMEM;
-+
-+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
-+ if (!req_ctx)
-+ return -ENOMEM;
-+
-+ in_fle = &req_ctx->fd_flt[1];
-+ out_fle = &req_ctx->fd_flt[0];
-+
-+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
-+ if (!flc)
-+ goto err_flc;
-+
-+ dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, dma_addr_in)) {
-+ dev_err(dev, "unable to map key input memory\n");
-+ goto err_dma_addr_in;
-+ }
-+
-+ dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, dma_addr_out)) {
-+ dev_err(dev, "unable to map key output memory\n");
-+ goto err_dma_addr_out;
-+ }
-+
-+ desc = flc->sh_desc;
-+
-+ init_sh_desc(desc, 0);
-+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
-+
-+ /* Sets MDHA up into an HMAC-INIT */
-+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
-+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
-+ OP_ALG_AS_INIT);
-+
-+ /*
-+ * do a FIFO_LOAD of zero, this will trigger the internal key expansion
-+ * into both pads inside MDHA
-+ */
-+ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
-+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
-+
-+ /*
-+ * FIFO_STORE with the explicit split-key content store
-+ * (0x26 output type)
-+ */
-+ append_fifo_store(desc, dma_addr_out, adata->keylen,
-+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ goto err_flc_dma;
-+ }
-+
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(in_fle, dma_addr_in);
-+ dpaa2_fl_set_len(in_fle, keylen);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, dma_addr_out);
-+ dpaa2_fl_set_len(out_fle, adata->keylen_pad);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
-+ print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-+#endif
-+
-+ result.err = 0;
-+ init_completion(&result.completion);
-+ result.dev = dev;
-+
-+ req_ctx->flc = flc;
-+ req_ctx->cbk = split_key_sh_done;
-+ req_ctx->ctx = &result;
-+
-+ ret = dpaa2_caam_enqueue(dev, req_ctx);
-+ if (ret == -EINPROGRESS) {
-+ /* in progress */
-+ wait_for_completion(&result.completion);
-+ ret = result.err;
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
-+ adata->keylen_pad, 1);
-+#endif
-+ }
-+
-+ dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_TO_DEVICE);
-+err_flc_dma:
-+ dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
-+err_dma_addr_out:
-+ dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
-+err_dma_addr_in:
-+ kfree(flc);
-+err_flc:
-+ kfree(req_ctx);
-+ return ret;
-+}
-+
-+static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
-+ u32 authkeylen)
-+{
-+ return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
-+ authkeylen);
-+}
-+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
-+ int ret;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
-+ ctx->adata.keylen = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+#ifdef DEBUG
-+ dev_err(dev, "split keylen %d split keylen padded %d\n",
-+ ctx->adata.keylen, ctx->adata.keylen_pad);
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
-+#endif
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
-+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
-+ if (ret)
-+ goto badkey;
-+
-+ /* postpend encryption key to auth split key */
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+
-+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+
+ ctx->cdata.keylen = keys.enckeylen;
+
-+ ret = aead_set_sh_desc(aead);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
-+
-+ return ret;
++ return aead_set_sh_desc(aead);
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
-+ edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
++ edesc->assoclen = cpu_to_caam32(req->assoclen);
++ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ dev_err(dev, "unable to map assoclen\n");
+ unsigned int ivsize = crypto_aead_ivsize(tls);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
+ struct device *dev = ctx->dev;
++ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ unsigned int assoclen = 13; /* always 13 bytes for TLS */
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize);
-+
-+ flc->flc[1] = desc_len(desc);
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ assoclen, ivsize, ctx->authsize, blocksize,
++ priv->sec_attr.era);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc));
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ * Keys do not fit inline, regardless of algorithms used
+ */
++ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
-+ ctx->authsize, blocksize);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ ctx->authsize, blocksize, priv->sec_attr.era);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ return 0;
+}
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
-+ int ret;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
-+ ctx->adata.keylen = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+#ifdef DEBUG
-+ dev_err(dev, "split keylen %d split keylen padded %d\n",
-+ ctx->adata.keylen, ctx->adata.keylen_pad);
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
-+ keys.authkeylen + keys.enckeylen, 1);
-+#endif
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
-+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
-+ if (ret)
-+ goto badkey;
-+
-+ /* postpend encryption key to auth split key */
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+
-+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, ctx->dir);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+
+ ctx->cdata.keylen = keys.enckeylen;
+
-+ ret = tls_set_sh_desc(tls);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
-+
-+ return ret;
++ return tls_set_sh_desc(tls);
+badkey:
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ return 0;
+}
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
-+ int ret;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+#endif
+
+ memcpy(ctx->key, key, keylen);
-+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
-+ ret = gcm_set_sh_desc(aead);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
-+
-+ return ret;
++ return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ return 0;
+}
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
-+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
-+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
-+
-+ ret = rfc4106_set_sh_desc(aead);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
++ ctx->dir);
+
-+ return ret;
++ return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ return 0;
+}
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
-+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
-+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
-+
-+ ret = rfc4543_set_sh_desc(aead);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
++ ctx->dir);
+
-+ return ret;
++ return rfc4543_set_sh_desc(aead);
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+
-+ memcpy(ctx->key, key, keylen);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
-+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* ablkcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ /* ablkcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ /* ablkcipher_givencrypt shared descriptor */
+ flc = &ctx->flc[GIVENCRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
+ ivsize, is_rfc3686, ctx1_iv_off);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ return 0;
+}
+ return -EINVAL;
+ }
+
-+ memcpy(ctx->key, key, keylen);
-+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* xts_ablkcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ /* xts_ablkcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
+
+ return 0;
+}
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = aead_encrypt_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = aead_decrypt_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = tls_encrypt_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = tls_decrypt_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[GIVENCRYPT];
++ caam_req->flc_dma = ctx->flc_dma[GIVENCRYPT];
+ caam_req->op_type = GIVENCRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ struct caam_alg_entry caam;
+};
+
-+static int caam_cra_init(struct crypto_tfm *tfm)
++static int caam_cra_init(struct crypto_tfm *tfm, bool uses_dkp)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
++ dma_addr_t dma_addr;
++ int i;
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
+ caam_alg->caam.class2_alg_type;
+
+ ctx->dev = caam_alg->caam.dev;
++ ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
++
++ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
++ offsetof(struct caam_ctx, flc_dma),
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
++ if (dma_mapping_error(ctx->dev, dma_addr)) {
++ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < NUM_OP; i++)
++ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
++ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
+
+ return 0;
+}
+ crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
+
+ ablkcipher_tfm->reqsize = sizeof(struct caam_request);
-+ return caam_cra_init(tfm);
++ return caam_cra_init(tfm, false);
+}
+
+static int caam_cra_init_aead(struct crypto_aead *tfm)
+{
++ struct aead_alg *alg = crypto_aead_alg(tfm);
++
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
-+ return caam_cra_init(crypto_aead_tfm(tfm));
++ return caam_cra_init(crypto_aead_tfm(tfm),
++ (alg->setkey == aead_setkey) ||
++ (alg->setkey == tls_setkey));
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
-+ int i;
-+
-+ for (i = 0; i < NUM_OP; i++) {
-+ if (!ctx->flc[i].flc_dma)
-+ continue;
-+ dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
-+ sizeof(ctx->flc[i].flc) +
-+ desc_bytes(ctx->flc[i].sh_desc),
-+ DMA_TO_DEVICE);
-+ }
-+
-+ if (ctx->key_dma)
-+ dma_unmap_single(ctx->dev, ctx->key_dma,
-+ ctx->cdata.keylen + ctx->adata.keylen_pad,
-+ DMA_TO_DEVICE);
++ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
++ offsetof(struct caam_ctx, flc_dma), ctx->dir,
++ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+ },
+ },
+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "seqiv(authenc(hmac(sha384),"
-+ "rfc3686(ctr(aes))))",
-+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
++ .aead = {
++ .base = {
++ .cra_name = "seqiv(authenc(hmac(sha384),"
++ "rfc3686(ctr(aes))))",
++ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "authenc(hmac(sha512),"
++ "rfc3686(ctr(aes)))",
++ .cra_driver_name = "authenc-hmac-sha512-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "seqiv(authenc(hmac(sha512),"
++ "rfc3686(ctr(aes))))",
++ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
++ "rfc3686-ctr-aes-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = aead_setkey,
++ .setauthsize = aead_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES |
++ OP_ALG_AAI_CTR_MOD128,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ .rfc3686 = true,
++ .geniv = true,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "tls10(hmac(sha1),cbc(aes))",
++ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
++ .cra_blocksize = AES_BLOCK_SIZE,
++ },
++ .setkey = tls_setkey,
++ .setauthsize = tls_setauthsize,
++ .encrypt = tls_encrypt,
++ .decrypt = tls_decrypt,
++ .ivsize = AES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++ OP_ALG_AAI_HMAC_PRECOMP,
++ },
++ },
++};
++
++static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
++ *template)
++{
++ struct caam_crypto_alg *t_alg;
++ struct crypto_alg *alg;
++
++ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
++ if (!t_alg)
++ return ERR_PTR(-ENOMEM);
++
++ alg = &t_alg->crypto_alg;
++
++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->driver_name);
++ alg->cra_module = THIS_MODULE;
++ alg->cra_exit = caam_cra_exit;
++ alg->cra_priority = CAAM_CRA_PRIORITY;
++ alg->cra_blocksize = template->blocksize;
++ alg->cra_alignmask = 0;
++ alg->cra_ctxsize = sizeof(struct caam_ctx);
++ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
++ template->type;
++ switch (template->type) {
++ case CRYPTO_ALG_TYPE_GIVCIPHER:
++ alg->cra_init = caam_cra_init_ablkcipher;
++ alg->cra_type = &crypto_givcipher_type;
++ alg->cra_ablkcipher = template->template_ablkcipher;
++ break;
++ case CRYPTO_ALG_TYPE_ABLKCIPHER:
++ alg->cra_init = caam_cra_init_ablkcipher;
++ alg->cra_type = &crypto_ablkcipher_type;
++ alg->cra_ablkcipher = template->template_ablkcipher;
++ break;
++ }
++
++ t_alg->caam.class1_alg_type = template->class1_alg_type;
++ t_alg->caam.class2_alg_type = template->class2_alg_type;
++
++ return t_alg;
++}
++
++static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
++{
++ struct aead_alg *alg = &t_alg->aead;
++
++ alg->base.cra_module = THIS_MODULE;
++ alg->base.cra_priority = CAAM_CRA_PRIORITY;
++ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
++ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
++
++ alg->init = caam_cra_init_aead;
++ alg->exit = caam_cra_exit_aead;
++}
++
++/* max hash key is max split key size */
++#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
++
++#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
++#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
++
++#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
++ CAAM_MAX_HASH_KEY_SIZE)
++#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
++
++/* caam context sizes for hashes: running digest + 8 */
++#define HASH_MSG_LEN 8
++#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
++
++enum hash_optype {
++ UPDATE = 0,
++ UPDATE_FIRST,
++ FINALIZE,
++ DIGEST,
++ HASH_NUM_OP
++};
++
++/**
++ * caam_hash_ctx - ahash per-session context
++ * @flc: Flow Contexts array
++ * @flc_dma: I/O virtual addresses of the Flow Contexts
++ * @key: virtual address of the authentication key
++ * @dev: dpseci device
++ * @ctx_len: size of Context Register
++ * @adata: hashing algorithm details
++ */
++struct caam_hash_ctx {
++ struct caam_flc flc[HASH_NUM_OP];
++ dma_addr_t flc_dma[HASH_NUM_OP];
++ u8 key[CAAM_MAX_HASH_KEY_SIZE];
++ struct device *dev;
++ int ctx_len;
++ struct alginfo adata;
++};
++
++/* ahash state */
++struct caam_hash_state {
++ struct caam_request caam_req;
++ dma_addr_t buf_dma;
++ dma_addr_t ctx_dma;
++ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
++ int buflen_0;
++ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
++ int buflen_1;
++ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
++ int (*update)(struct ahash_request *req);
++ int (*final)(struct ahash_request *req);
++ int (*finup)(struct ahash_request *req);
++ int current_buf;
++};
++
++struct caam_export_state {
++ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
++ u8 caam_ctx[MAX_CTX_LEN];
++ int buflen;
++ int (*update)(struct ahash_request *req);
++ int (*final)(struct ahash_request *req);
++ int (*finup)(struct ahash_request *req);
++};
++
++static inline void switch_buf(struct caam_hash_state *state)
++{
++ state->current_buf ^= 1;
++}
++
++static inline u8 *current_buf(struct caam_hash_state *state)
++{
++ return state->current_buf ? state->buf_1 : state->buf_0;
++}
++
++static inline u8 *alt_buf(struct caam_hash_state *state)
++{
++ return state->current_buf ? state->buf_0 : state->buf_1;
++}
++
++static inline int *current_buflen(struct caam_hash_state *state)
++{
++ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
++}
++
++static inline int *alt_buflen(struct caam_hash_state *state)
++{
++ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
++}
++
++/* Map current buffer in state (if length > 0) and put it in link table */
++static inline int buf_map_to_qm_sg(struct device *dev,
++ struct dpaa2_sg_entry *qm_sg,
++ struct caam_hash_state *state)
++{
++ int buflen = *current_buflen(state);
++
++ if (!buflen)
++ return 0;
++
++ state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, state->buf_dma)) {
++ dev_err(dev, "unable to map buf\n");
++ state->buf_dma = 0;
++ return -ENOMEM;
++ }
++
++ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
++
++ return 0;
++}
++
++/* Map state->caam_ctx, and add it to link table */
++static inline int ctx_map_to_qm_sg(struct device *dev,
++ struct caam_hash_state *state, int ctx_len,
++ struct dpaa2_sg_entry *qm_sg, u32 flag)
++{
++ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
++ if (dma_mapping_error(dev, state->ctx_dma)) {
++ dev_err(dev, "unable to map ctx\n");
++ state->ctx_dma = 0;
++ return -ENOMEM;
++ }
++
++ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
++
++ return 0;
++}
++
++static int ahash_set_sh_desc(struct crypto_ahash *ahash)
++{
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
++ struct caam_flc *flc;
++ u32 *desc;
++
++ ctx->adata.key_virt = ctx->key;
++ ctx->adata.key_inline = true;
++
++ /* ahash_update shared descriptor */
++ flc = &ctx->flc[UPDATE];
++ desc = flc->sh_desc;
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
++ ctx->ctx_len, true, priv->sec_attr.era);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
++ desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ahash update shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ /* ahash_update_first shared descriptor */
++ flc = &ctx->flc[UPDATE_FIRST];
++ desc = flc->sh_desc;
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
++ ctx->ctx_len, false, priv->sec_attr.era);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
++ desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ahash update first shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ /* ahash_final shared descriptor */
++ flc = &ctx->flc[FINALIZE];
++ desc = flc->sh_desc;
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
++ ctx->ctx_len, true, priv->sec_attr.era);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
++ desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ahash final shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ /* ahash_digest shared descriptor */
++ flc = &ctx->flc[DIGEST];
++ desc = flc->sh_desc;
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
++ ctx->ctx_len, false, priv->sec_attr.era);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
++ desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ahash digest shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ return 0;
++}
++
++/* Digest hash size if it is too large */
++static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
++ u32 *keylen, u8 *key_out, u32 digestsize)
++{
++ struct caam_request *req_ctx;
++ u32 *desc;
++ struct split_key_sh_result result;
++ dma_addr_t src_dma, dst_dma;
++ struct caam_flc *flc;
++ dma_addr_t flc_dma;
++ int ret = -ENOMEM;
++ struct dpaa2_fl_entry *in_fle, *out_fle;
++
++ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
++ if (!req_ctx)
++ return -ENOMEM;
++
++ in_fle = &req_ctx->fd_flt[1];
++ out_fle = &req_ctx->fd_flt[0];
++
++ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
++ if (!flc)
++ goto err_flc;
++
++ src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, src_dma)) {
++ dev_err(ctx->dev, "unable to map key input memory\n");
++ goto err_src_dma;
++ }
++ dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, dst_dma)) {
++ dev_err(ctx->dev, "unable to map key output memory\n");
++ goto err_dst_dma;
++ }
++
++ desc = flc->sh_desc;
++
++ init_sh_desc(desc, 0);
++
++ /* descriptor to perform unkeyed hash on key_in */
++ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
++ OP_ALG_AS_INITFINAL);
++ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
++ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, flc_dma)) {
++ dev_err(ctx->dev, "unable to map shared descriptor\n");
++ goto err_flc_dma;
++ }
++
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, src_dma);
++ dpaa2_fl_set_len(in_fle, *keylen);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
++ print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ result.err = 0;
++ init_completion(&result.completion);
++ result.dev = ctx->dev;
++
++ req_ctx->flc = flc;
++ req_ctx->flc_dma = flc_dma;
++ req_ctx->cbk = split_key_sh_done;
++ req_ctx->ctx = &result;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS) {
++ /* in progress */
++ wait_for_completion(&result.completion);
++ ret = result.err;
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "digested key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
++ 1);
++#endif
++ }
++
++ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
++ DMA_TO_DEVICE);
++err_flc_dma:
++ dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
++err_dst_dma:
++ dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
++err_src_dma:
++ kfree(flc);
++err_flc:
++ kfree(req_ctx);
++
++ *keylen = digestsize;
++
++ return ret;
++}
++
++static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
++ unsigned int digestsize = crypto_ahash_digestsize(ahash);
++ int ret;
++ u8 *hashed_key = NULL;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
++#endif
++
++ if (keylen > blocksize) {
++ hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
++ GFP_KERNEL | GFP_DMA);
++ if (!hashed_key)
++ return -ENOMEM;
++ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
++ digestsize);
++ if (ret)
++ goto bad_free_key;
++ key = hashed_key;
++ }
++
++ ctx->adata.keylen = keylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
++ goto bad_free_key;
++
++ memcpy(ctx->key, key, keylen);
++
++ kfree(hashed_key);
++ return ahash_set_sh_desc(ahash);
++bad_free_key:
++ kfree(hashed_key);
++ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
++ struct ahash_request *req, int dst_len)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ if (edesc->src_nents)
++ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
++ if (edesc->dst_dma)
++ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
++
++ if (edesc->qm_sg_bytes)
++ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
++ DMA_TO_DEVICE);
++
++ if (state->buf_dma) {
++ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
++ DMA_TO_DEVICE);
++ state->buf_dma = 0;
++ }
++}
++
++static inline void ahash_unmap_ctx(struct device *dev,
++ struct ahash_edesc *edesc,
++ struct ahash_request *req, int dst_len,
++ u32 flag)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ if (state->ctx_dma) {
++ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
++ state->ctx_dma = 0;
++ }
++ ahash_unmap(dev, edesc, req, dst_len);
++}
++
++static void ahash_done(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct ahash_edesc *edesc = state->caam_req.edesc;
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ int ecode = 0;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ ahash_unmap(ctx->dev, edesc, req, digestsize);
++ qi_cache_free(edesc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++ ctx->ctx_len, 1);
++ if (req->result)
++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++ digestsize, 1);
++#endif
++
++ req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_bi(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct ahash_edesc *edesc = state->caam_req.edesc;
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int ecode = 0;
++#ifdef DEBUG
++ int digestsize = crypto_ahash_digestsize(ahash);
++
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++ switch_buf(state);
++ qi_cache_free(edesc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++ ctx->ctx_len, 1);
++ if (req->result)
++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++ digestsize, 1);
++#endif
++
++ req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct ahash_edesc *edesc = state->caam_req.edesc;
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ int ecode = 0;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++ ctx->ctx_len, 1);
++ if (req->result)
++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++ digestsize, 1);
++#endif
++
++ req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct ahash_edesc *edesc = state->caam_req.edesc;
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int ecode = 0;
++#ifdef DEBUG
++ int digestsize = crypto_ahash_digestsize(ahash);
++
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
++ switch_buf(state);
++ qi_cache_free(edesc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++ ctx->ctx_len, 1);
++ if (req->result)
++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++ digestsize, 1);
++#endif
++
++ req->base.complete(&req->base, ecode);
++}
++
++static int ahash_update_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int *buflen = current_buflen(state);
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state), last_buflen;
++ int in_len = *buflen + req->nbytes, to_hash;
++ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
++ struct ahash_edesc *edesc;
++ int ret = 0;
++
++ last_buflen = *next_buflen;
++ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
++ to_hash = in_len - *next_buflen;
++
++ if (to_hash) {
++ struct dpaa2_sg_entry *sg_table;
++
++ src_nents = sg_nents_for_len(req->src,
++ req->nbytes - (*next_buflen));
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to DMA map source\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ qm_sg_src_index = 1 + (*buflen ? 1 : 0);
++ qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
++ sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++ DMA_BIDIRECTIONAL);
++ if (ret)
++ goto unmap_ctx;
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++ if (ret)
++ goto unmap_ctx;
++
++ if (mapped_nents) {
++ sg_to_qm_sg_last(req->src, mapped_nents,
++ sg_table + qm_sg_src_index, 0);
++ if (*next_buflen)
++ scatterwalk_map_and_copy(next_buf, req->src,
++ to_hash - *buflen,
++ *next_buflen, 0);
++ } else {
++ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
++ true);
++ }
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++ qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++ req_ctx->flc = &ctx->flc[UPDATE];
++ req_ctx->flc_dma = ctx->flc_dma[UPDATE];
++ req_ctx->cbk = ahash_done_bi;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY &&
++ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ goto unmap_ctx;
++ } else if (*next_buflen) {
++ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
++ req->nbytes, 0);
++ *buflen = *next_buflen;
++ *next_buflen = last_buflen;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
++ *next_buflen, 1);
++#endif
++
++ return ret;
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_final_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
++ int qm_sg_bytes, qm_sg_src_index;
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct ahash_edesc *edesc;
++ struct dpaa2_sg_entry *sg_table;
++ int ret;
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc)
++ return -ENOMEM;
++
++ qm_sg_src_index = 1 + (buflen ? 1 : 0);
++ qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++ DMA_TO_DEVICE);
++ if (ret)
++ goto unmap_ctx;
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++ if (ret)
++ goto unmap_ctx;
++
++ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[FINALIZE];
++ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
++ req_ctx->cbk = ahash_done_ctx_src;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS ||
++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ return ret;
++
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_finup_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
++ int qm_sg_bytes, qm_sg_src_index;
++ int src_nents, mapped_nents;
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct ahash_edesc *edesc;
++ struct dpaa2_sg_entry *sg_table;
++ int ret;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to DMA map source\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ qm_sg_src_index = 1 + (buflen ? 1 : 0);
++ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++ DMA_TO_DEVICE);
++ if (ret)
++ goto unmap_ctx;
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++ if (ret)
++ goto unmap_ctx;
++
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[FINALIZE];
++ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
++ req_ctx->cbk = ahash_done_ctx_src;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS ||
++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ return ret;
++
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_digest(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int digestsize = crypto_ahash_digestsize(ahash);
++ int src_nents, mapped_nents;
++ struct ahash_edesc *edesc;
++ int ret = -ENOMEM;
++
++ state->buf_dma = 0;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to map source for DMA\n");
++ return ret;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++ return ret;
++ }
++
++ edesc->src_nents = src_nents;
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++
++ if (mapped_nents > 1) {
++ int qm_sg_bytes;
++ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
++
++ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++ qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ goto unmap;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ } else {
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
++ }
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ goto unmap;
++ }
++
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_len(in_fle, req->nbytes);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[DIGEST];
++ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++ req_ctx->cbk = ahash_done;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS ||
++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ return ret;
++
++unmap:
++ ahash_unmap(ctx->dev, edesc, req, digestsize);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_final_no_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int buflen = *current_buflen(state);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct ahash_edesc *edesc;
++ int ret = -ENOMEM;
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc)
++ return ret;
++
++ state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
++ dev_err(ctx->dev, "unable to map src\n");
++ goto unmap;
++ }
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ goto unmap;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, state->buf_dma);
++ dpaa2_fl_set_len(in_fle, buflen);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[DIGEST];
++ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++ req_ctx->cbk = ahash_done;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS ||
++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ return ret;
++
++unmap:
++ ahash_unmap(ctx->dev, edesc, req, digestsize);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_update_no_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int *buflen = current_buflen(state);
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state);
++ int in_len = *buflen + req->nbytes, to_hash;
++ int qm_sg_bytes, src_nents, mapped_nents;
++ struct ahash_edesc *edesc;
++ int ret = 0;
++
++ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
++ to_hash = in_len - *next_buflen;
++
++ if (to_hash) {
++ struct dpaa2_sg_entry *sg_table;
++
++ src_nents = sg_nents_for_len(req->src,
++ req->nbytes - *next_buflen);
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to DMA map source\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
++ if (ret)
++ goto unmap_ctx;
++
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
++
++ if (*next_buflen)
++ scatterwalk_map_and_copy(next_buf, req->src,
++ to_hash - *buflen,
++ *next_buflen, 0);
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++ qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
++ ctx->ctx_len, DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++ dev_err(ctx->dev, "unable to map ctx\n");
++ state->ctx_dma = 0;
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, to_hash);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
++ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
++ req_ctx->cbk = ahash_done_ctx_dst;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY &&
++ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ goto unmap_ctx;
++
++ state->update = ahash_update_ctx;
++ state->finup = ahash_finup_ctx;
++ state->final = ahash_final_ctx;
++ } else if (*next_buflen) {
++ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
++ req->nbytes, 0);
++ *buflen = *next_buflen;
++ *next_buflen = 0;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
++ *next_buflen, 1);
++#endif
++
++ return ret;
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_finup_no_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
++ int qm_sg_bytes, src_nents, mapped_nents;
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct ahash_edesc *edesc;
++ struct dpaa2_sg_entry *sg_table;
++ int ret;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to DMA map source\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
++ if (ret)
++ goto unmap;
++
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ ret = -ENOMEM;
++ goto unmap;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[DIGEST];
++ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++ req_ctx->cbk = ahash_done;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ goto unmap;
++
++ return ret;
++unmap:
++ ahash_unmap(ctx->dev, edesc, req, digestsize);
++ qi_cache_free(edesc);
++ return -ENOMEM;
++}
++
++static int ahash_update_first(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state);
++ int to_hash;
++ int src_nents, mapped_nents;
++ struct ahash_edesc *edesc;
++ int ret = 0;
++
++ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
++ 1);
++ to_hash = req->nbytes - *next_buflen;
++
++ if (to_hash) {
++ struct dpaa2_sg_entry *sg_table;
++
++ src_nents = sg_nents_for_len(req->src,
++ req->nbytes - (*next_buflen));
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to map source for DMA\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ sg_table = &edesc->sgt[0];
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_len(in_fle, to_hash);
++
++ if (mapped_nents > 1) {
++ int qm_sg_bytes;
++
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
++ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++ qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ } else {
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
++ }
++
++ if (*next_buflen)
++ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
++ *next_buflen, 0);
++
++ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
++ ctx->ctx_len, DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++ dev_err(ctx->dev, "unable to map ctx\n");
++ state->ctx_dma = 0;
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
++ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
++ req_ctx->cbk = ahash_done_ctx_dst;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags &
++ CRYPTO_TFM_REQ_MAY_BACKLOG))
++ goto unmap_ctx;
++
++ state->update = ahash_update_ctx;
++ state->finup = ahash_finup_ctx;
++ state->final = ahash_final_ctx;
++ } else if (*next_buflen) {
++ state->update = ahash_update_no_ctx;
++ state->finup = ahash_finup_no_ctx;
++ state->final = ahash_final_no_ctx;
++ scatterwalk_map_and_copy(next_buf, req->src, 0,
++ req->nbytes, 0);
++ switch_buf(state);
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
++#endif
++
++ return ret;
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_finup_first(struct ahash_request *req)
++{
++ return ahash_digest(req);
++}
++
++static int ahash_init(struct ahash_request *req)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ state->update = ahash_update_first;
++ state->finup = ahash_finup_first;
++ state->final = ahash_final_no_ctx;
++
++ state->ctx_dma = 0;
++ state->current_buf = 0;
++ state->buf_dma = 0;
++ state->buflen_0 = 0;
++ state->buflen_1 = 0;
++
++ return 0;
++}
++
++static int ahash_update(struct ahash_request *req)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ return state->update(req);
++}
++
++static int ahash_finup(struct ahash_request *req)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ return state->finup(req);
++}
++
++static int ahash_final(struct ahash_request *req)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ return state->final(req);
++}
++
++static int ahash_export(struct ahash_request *req, void *out)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_export_state *export = out;
++ int len;
++ u8 *buf;
++
++ if (state->current_buf) {
++ buf = state->buf_1;
++ len = state->buflen_1;
++ } else {
++ buf = state->buf_0;
++ len = state->buflen_0;
++ }
++
++ memcpy(export->buf, buf, len);
++ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
++ export->buflen = len;
++ export->update = state->update;
++ export->final = state->final;
++ export->finup = state->finup;
++
++ return 0;
++}
++
++static int ahash_import(struct ahash_request *req, const void *in)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ const struct caam_export_state *export = in;
++
++ memset(state, 0, sizeof(*state));
++ memcpy(state->buf_0, export->buf, export->buflen);
++ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
++ state->buflen_0 = export->buflen;
++ state->update = export->update;
++ state->final = export->final;
++ state->finup = export->finup;
++
++ return 0;
++}
++
++struct caam_hash_template {
++ char name[CRYPTO_MAX_ALG_NAME];
++ char driver_name[CRYPTO_MAX_ALG_NAME];
++ char hmac_name[CRYPTO_MAX_ALG_NAME];
++ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
++ unsigned int blocksize;
++ struct ahash_alg template_ahash;
++ u32 alg_type;
++};
++
++/* ahash descriptors */
++static struct caam_hash_template driver_hash[] = {
++ {
++ .name = "sha1",
++ .driver_name = "sha1-caam-qi2",
++ .hmac_name = "hmac(sha1)",
++ .hmac_driver_name = "hmac-sha1-caam-qi2",
++ .blocksize = SHA1_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA1_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA384_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ .geniv = true,
+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "authenc(hmac(sha512),"
-+ "rfc3686(ctr(aes)))",
-+ .cra_driver_name = "authenc-hmac-sha512-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
++ .alg_type = OP_ALG_ALGSEL_SHA1,
++ }, {
++ .name = "sha224",
++ .driver_name = "sha224-caam-qi2",
++ .hmac_name = "hmac(sha224)",
++ .hmac_driver_name = "hmac-sha224-caam-qi2",
++ .blocksize = SHA224_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA224_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
-+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "seqiv(authenc(hmac(sha512),"
-+ "rfc3686(ctr(aes))))",
-+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
-+ "rfc3686-ctr-aes-caam-qi2",
-+ .cra_blocksize = 1,
++ .alg_type = OP_ALG_ALGSEL_SHA224,
++ }, {
++ .name = "sha256",
++ .driver_name = "sha256-caam-qi2",
++ .hmac_name = "hmac(sha256)",
++ .hmac_driver_name = "hmac-sha256-caam-qi2",
++ .blocksize = SHA256_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA256_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
+ },
-+ .setkey = aead_setkey,
-+ .setauthsize = aead_setauthsize,
-+ .encrypt = aead_encrypt,
-+ .decrypt = aead_decrypt,
-+ .ivsize = CTR_RFC3686_IV_SIZE,
-+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES |
-+ OP_ALG_AAI_CTR_MOD128,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
-+ .rfc3686 = true,
-+ .geniv = true,
++ .alg_type = OP_ALG_ALGSEL_SHA256,
++ }, {
++ .name = "sha384",
++ .driver_name = "sha384-caam-qi2",
++ .hmac_name = "hmac(sha384)",
++ .hmac_driver_name = "hmac-sha384-caam-qi2",
++ .blocksize = SHA384_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA384_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
++ },
+ },
-+ },
-+ {
-+ .aead = {
-+ .base = {
-+ .cra_name = "tls10(hmac(sha1),cbc(aes))",
-+ .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
-+ .cra_blocksize = AES_BLOCK_SIZE,
++ .alg_type = OP_ALG_ALGSEL_SHA384,
++ }, {
++ .name = "sha512",
++ .driver_name = "sha512-caam-qi2",
++ .hmac_name = "hmac(sha512)",
++ .hmac_driver_name = "hmac-sha512-caam-qi2",
++ .blocksize = SHA512_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA512_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
+ },
-+ .setkey = tls_setkey,
-+ .setauthsize = tls_setauthsize,
-+ .encrypt = tls_encrypt,
-+ .decrypt = tls_decrypt,
-+ .ivsize = AES_BLOCK_SIZE,
-+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
-+ .caam = {
-+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+ OP_ALG_AAI_HMAC_PRECOMP,
++ .alg_type = OP_ALG_ALGSEL_SHA512,
++ }, {
++ .name = "md5",
++ .driver_name = "md5-caam-qi2",
++ .hmac_name = "hmac(md5)",
++ .hmac_driver_name = "hmac-md5-caam-qi2",
++ .blocksize = MD5_BLOCK_WORDS * 4,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = MD5_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
++ },
+ },
-+ },
++ .alg_type = OP_ALG_ALGSEL_MD5,
++ }
+};
+
-+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
-+ *template)
++struct caam_hash_alg {
++ struct list_head entry;
++ struct device *dev;
++ int alg_type;
++ struct ahash_alg ahash_alg;
++};
++
++static int caam_hash_cra_init(struct crypto_tfm *tfm)
++{
++ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
++ struct crypto_alg *base = tfm->__crt_alg;
++ struct hash_alg_common *halg =
++ container_of(base, struct hash_alg_common, base);
++ struct ahash_alg *alg =
++ container_of(halg, struct ahash_alg, halg);
++ struct caam_hash_alg *caam_hash =
++ container_of(alg, struct caam_hash_alg, ahash_alg);
++ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
++ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
++ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
++ HASH_MSG_LEN + 32,
++ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
++ HASH_MSG_LEN + 64,
++ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
++ dma_addr_t dma_addr;
++ int i;
++
++ ctx->dev = caam_hash->dev;
++
++ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
++ DMA_BIDIRECTIONAL,
++ DMA_ATTR_SKIP_CPU_SYNC);
++ if (dma_mapping_error(ctx->dev, dma_addr)) {
++ dev_err(ctx->dev, "unable to map shared descriptors\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < HASH_NUM_OP; i++)
++ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
++
++ /* copy descriptor header template value */
++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
++
++ ctx->ctx_len = runninglen[(ctx->adata.algtype &
++ OP_ALG_ALGSEL_SUBMASK) >>
++ OP_ALG_ALGSEL_SHIFT];
++
++ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
++ sizeof(struct caam_hash_state));
++
++ return ahash_set_sh_desc(ahash);
++}
++
++static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
-+ struct caam_crypto_alg *t_alg;
++ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
++ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
++}
++
++static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
++ struct caam_hash_template *template, bool keyed)
++{
++ struct caam_hash_alg *t_alg;
++ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
-+ alg = &t_alg->crypto_alg;
++ t_alg->ahash_alg = template->template_ahash;
++ halg = &t_alg->ahash_alg;
++ alg = &halg->halg.base;
+
-+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
-+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
-+ template->driver_name);
++ if (keyed) {
++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->hmac_name);
++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->hmac_driver_name);
++ } else {
++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->name);
++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->driver_name);
++ t_alg->ahash_alg.setkey = NULL;
++ }
+ alg->cra_module = THIS_MODULE;
-+ alg->cra_exit = caam_cra_exit;
++ alg->cra_init = caam_hash_cra_init;
++ alg->cra_exit = caam_hash_cra_exit;
++ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
-+ alg->cra_ctxsize = sizeof(struct caam_ctx);
-+ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
-+ template->type;
-+ switch (template->type) {
-+ case CRYPTO_ALG_TYPE_GIVCIPHER:
-+ alg->cra_init = caam_cra_init_ablkcipher;
-+ alg->cra_type = &crypto_givcipher_type;
-+ alg->cra_ablkcipher = template->template_ablkcipher;
-+ break;
-+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
-+ alg->cra_init = caam_cra_init_ablkcipher;
-+ alg->cra_type = &crypto_ablkcipher_type;
-+ alg->cra_ablkcipher = template->template_ablkcipher;
-+ break;
-+ }
++ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
++ alg->cra_type = &crypto_ahash_type;
+
-+ t_alg->caam.class1_alg_type = template->class1_alg_type;
-+ t_alg->caam.class2_alg_type = template->class2_alg_type;
++ t_alg->alg_type = template->alg_type;
++ t_alg->dev = dev;
+
+ return t_alg;
+}
+
-+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
-+{
-+ struct aead_alg *alg = &t_alg->aead;
-+
-+ alg->base.cra_module = THIS_MODULE;
-+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
-+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
-+
-+ alg->init = caam_cra_init_aead;
-+ alg->exit = caam_cra_exit_aead;
-+}
-+
+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ /* Register notification callbacks */
+ err = dpaa2_io_service_register(NULL, nctx);
+ if (unlikely(err)) {
-+ dev_err(dev, "notification register failed\n");
++ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
++ /*
++ * If no affine DPIO for this core, there's probably
++ * none available for next cores either. Signal we want
++ * to retry later, in case the DPIO devices weren't
++ * probed yet.
++ */
++ err = -EPROBE_DEFER;
+ goto err;
+ }
+
+}
+
+static struct list_head alg_list;
++static struct list_head hash_list;
+
+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
+{
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
+ if (err) {
-+ dev_err(dev, "MC portal allocation failed\n");
++ if (err == -ENXIO)
++ err = -EPROBE_DEFER;
++ else
++ dev_err(dev, "MC portal allocation failed\n");
++
+ goto err_dma_mask;
+ }
+
+ if (registered)
+ dev_info(dev, "algorithms registered in /proc/crypto\n");
+
++ /* register hash algorithms the device supports */
++ INIT_LIST_HEAD(&hash_list);
++
++ /*
++ * Skip registration of any hashing algorithms if MD block
++ * is not present.
++ */
++ if (!priv->sec_attr.md_acc_num)
++ return 0;
++
++ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
++ struct caam_hash_alg *t_alg;
++ struct caam_hash_template *alg = driver_hash + i;
++
++ /* register hmac version */
++ t_alg = caam_hash_alloc(dev, alg, true);
++ if (IS_ERR(t_alg)) {
++ err = PTR_ERR(t_alg);
++ dev_warn(dev, "%s hash alg allocation failed: %d\n",
++ alg->driver_name, err);
++ continue;
++ }
++
++ err = crypto_register_ahash(&t_alg->ahash_alg);
++ if (err) {
++ dev_warn(dev, "%s alg registration failed: %d\n",
++ t_alg->ahash_alg.halg.base.cra_driver_name,
++ err);
++ kfree(t_alg);
++ } else {
++ list_add_tail(&t_alg->entry, &hash_list);
++ }
++
++ /* register unkeyed version */
++ t_alg = caam_hash_alloc(dev, alg, false);
++ if (IS_ERR(t_alg)) {
++ err = PTR_ERR(t_alg);
++ dev_warn(dev, "%s alg allocation failed: %d\n",
++ alg->driver_name, err);
++ continue;
++ }
++
++ err = crypto_register_ahash(&t_alg->ahash_alg);
++ if (err) {
++ dev_warn(dev, "%s alg registration failed: %d\n",
++ t_alg->ahash_alg.halg.base.cra_driver_name,
++ err);
++ kfree(t_alg);
++ } else {
++ list_add_tail(&t_alg->entry, &hash_list);
++ }
++ }
++ if (!list_empty(&hash_list))
++ dev_info(dev, "hash algorithms registered in /proc/crypto\n");
++
+ return err;
+
+err_bind:
+ }
+ }
+
++ if (hash_list.next) {
++ struct caam_hash_alg *t_hash_alg, *p;
++
++ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
++ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
++ list_del(&t_hash_alg->entry);
++ kfree(t_hash_alg);
++ }
++ }
++
+ dpaa2_dpseci_disable(priv);
+ dpaa2_dpseci_dpio_free(priv);
+ dpaa2_dpseci_free(priv);
+ }
+ }
+
-+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
++ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
+
+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ memset(&fd, 0, sizeof(fd));
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
-+ dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
-+ dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
++ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
++ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
+ /*
+ * There is no guarantee that preemption is disabled here,
+module_fsl_mc_driver(dpaa2_caam_driver);
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
-@@ -0,0 +1,265 @@
+@@ -0,0 +1,283 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @sgt: the h/w link table
+ */
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
++ unsigned int assoclen;
+ dma_addr_t assoclen_dma;
+#define CAAM_QI_MAX_AEAD_SG \
+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
+ struct dpaa2_sg_entry sgt[0];
+};
+
++/*
++ * ahash_edesc - s/w-extended ahash descriptor
++ * @dst_dma: I/O virtual address of req->result
++ * @qm_sg_dma: I/O virtual address of h/w link table
++ * @src_nents: number of segments in input scatterlist
++ * @qm_sg_bytes: length of dma mapped qm_sg space
++ * @sgt: pointer to h/w link table
++ */
++struct ahash_edesc {
++ dma_addr_t dst_dma;
++ dma_addr_t qm_sg_dma;
++ int src_nents;
++ int qm_sg_bytes;
++ struct dpaa2_sg_entry sgt[0];
++};
++
+/**
+ * caam_flc - Flow Context (FLC)
+ * @flc: Flow Context options
+ * @sh_desc: Shared Descriptor
-+ * @flc_dma: DMA address of the Flow Context
+ */
+struct caam_flc {
+ u32 flc[16];
+ u32 sh_desc[MAX_SDLEN];
-+ dma_addr_t flc_dma;
+} ____cacheline_aligned;
+
+enum optype {
+ * fd_flt[1] - FLE pointing to input buffer
+ * @fd_flt_dma: DMA address for the frame list table
+ * @flc: Flow Context
++ * @flc_dma: I/O virtual address of Flow Context
+ * @op_type: operation type
+ * @cbk: Callback function to invoke when job is completed
+ * @ctx: arbit context attached with request by the application
+ struct dpaa2_fl_entry fd_flt[2];
+ dma_addr_t fd_flt_dma;
+ struct caam_flc *flc;
++ dma_addr_t flc_dma;
+ enum optype op_type;
+ void (*cbk)(void *ctx, u32 err);
+ void *ctx;
+#endif /* _CAAMALG_QI2_H_ */
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
-@@ -72,7 +72,7 @@
+@@ -62,6 +62,7 @@
+ #include "error.h"
+ #include "sg_sw_sec4.h"
+ #include "key_gen.h"
++#include "caamhash_desc.h"
+
+ #define CAAM_CRA_PRIORITY 3000
+
+@@ -71,14 +72,6 @@
+ #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
- /* length of descriptors text */
+-/* length of descriptors text */
-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
-+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
- #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
- #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
- #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-@@ -103,20 +103,14 @@ struct caam_hash_ctx {
+-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+-
+ #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
+ CAAM_MAX_HASH_KEY_SIZE)
+ #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+@@ -103,20 +96,15 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
- dma_addr_t sh_desc_finup_dma;
++ enum dma_data_direction dir;
struct device *jrdev;
- u32 alg_type;
- u32 alg_op;
};
/* ahash state */
-@@ -143,6 +137,31 @@ struct caam_export_state {
+@@ -143,6 +131,31 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-@@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr
+@@ -175,40 +188,31 @@ static inline dma_addr_t map_seq_out_ptr
return dst_dma;
}
}
/* Map state->caam_ctx, and add it to link table */
-@@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32
+-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
++static inline int ctx_map_to_sec4_sg(struct device *jrdev,
+ struct caam_hash_state *state, int ctx_len,
+ struct sec4_sg_entry *sec4_sg, u32 flag)
+ {
+@@ -224,124 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
return 0;
}
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
- /*
+-/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
-+ * For ahash update, final and finup (import_ctx = true)
-+ * import context, read and write to seqout
-+ * For ahash firsts and digest (import_ctx = false)
-+ * read and write to seqout
- */
+- */
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
-+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
-+ struct caam_hash_ctx *ctx, bool import_ctx)
- {
+-{
- /* Calculate remaining bytes to read */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-+ u32 op = ctx->adata.algtype;
-+ u32 *skip_key_load;
-
+-
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
-+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-
-- /* Store class2 context bytes */
-- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
-- LDST_SRCDST_BYTE_CONTEXT);
--}
-+ /* Append key if it has been set; ahash update excluded */
-+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
-+ /* Skip key loading if already shared */
-+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_SHRD);
-
+-
+- /* Store class2 context bytes */
+- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-}
+-
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
-+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
-+ ctx->adata.keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
-
+-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-+ set_jump_tgt_here(desc, skip_key_load);
-
+-
- /*
- * Load from buf and/or src and write to req->result or state->context
- */
- ahash_append_load_str(desc, digestsize);
-}
-+ op |= OP_ALG_AAI_HMAC_PRECOMP;
-+ }
-
+-
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize, struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
-+ /* If needed, import context from software */
-+ if (import_ctx)
-+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT);
-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
- /*
- * Load from buf and/or src and write to req->result or state->context
-+ * Calculate remaining bytes to read
- */
+-
+- /* Class 2 operation */
+- append_operation(desc, op | state | OP_ALG_ENCRYPT);
+-
+- /*
+- * Load from buf and/or src and write to req->result or state->context
+- */
- ahash_append_load_str(desc, digestsize);
-+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-+ /* Read remaining bytes */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
-+ FIFOLD_TYPE_MSG | KEY_VLF);
-+ /* Store class2 context bytes */
-+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT);
- }
-
+-}
+-
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
-@@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct cryp
+ {
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
- u32 have_key = 0;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
- if (ctx->split_key_len)
- have_key = OP_ALG_AAI_HMAC_PRECOMP;
--
++ ctx->adata.key_virt = ctx->key;
+
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
-
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
++ ctx->ctx_len, true, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
-@@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct cryp
+@@ -350,17 +252,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
++ ctx->ctx_len, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
-@@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct cryp
+@@ -369,53 +264,22 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
++ ctx->ctx_len, true, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
++ ctx->ctx_len, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
-+ desc_bytes(desc), DMA_TO_DEVICE);
++ desc_bytes(desc), ctx->dir);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
-@@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct cryp
+@@ -426,14 +290,6 @@ static int ahash_set_sh_desc(struct cryp
return 0;
}
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
-@@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_h
+@@ -469,7 +325,7 @@ static int hash_digest_key(struct caam_h
}
/* Job descriptor to perform unkeyed hash on key_in */
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
-@@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_h
+@@ -513,12 +369,10 @@ static int hash_digest_key(struct caam_h
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
-@@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ah
+ u8 *hashed_key = NULL;
+
+@@ -539,43 +393,29 @@ static int ahash_setkey(struct crypto_ah
key = hashed_key;
}
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
--
++ /*
++ * If DKP is supported, use it in the shared descriptor to generate
++ * the split key.
++ */
++ if (ctrlpriv->era >= 6) {
++ ctx->adata.key_inline = true;
++ ctx->adata.keylen = keylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
+
- ret = gen_split_hash_key(ctx, key, keylen);
-+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
-+ CAAM_MAX_HASH_KEY_SIZE);
- if (ret)
- goto bad_free_key;
+- if (ret)
+- goto bad_free_key;
++ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
++ goto bad_free_key;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
- DMA_TO_DEVICE);
- dev_err(jrdev, "unable to map key i/o memory\n");
- ret = -ENOMEM;
- goto error_free_key;
-- }
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++ memcpy(ctx->key, key, keylen);
++ } else {
++ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
++ keylen, CAAM_MAX_HASH_KEY_SIZE);
++ if (ret)
++ goto bad_free_key;
+ }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
-+ ctx->adata.keylen_pad, 1);
- #endif
+-#endif
- ret = ahash_set_sh_desc(ahash);
- if (ret) {
bad_free_key:
kfree(hashed_key);
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-@@ -604,6 +482,8 @@ static inline void ahash_unmap(struct de
+@@ -604,6 +444,8 @@ static inline void ahash_unmap(struct de
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->dst_dma)
-@@ -612,6 +492,12 @@ static inline void ahash_unmap(struct de
+@@ -612,6 +454,12 @@ static inline void ahash_unmap(struct de
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
}
static inline void ahash_unmap_ctx(struct device *dev,
-@@ -643,8 +529,7 @@ static void ahash_done(struct device *jr
+@@ -643,8 +491,7 @@ static void ahash_done(struct device *jr
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -671,19 +556,19 @@ static void ahash_done_bi(struct device
+@@ -671,19 +518,19 @@ static void ahash_done_bi(struct device
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
kfree(edesc);
#ifdef DEBUG
-@@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct de
+@@ -713,8 +560,7 @@ static void ahash_done_ctx_src(struct de
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct de
+@@ -741,19 +587,19 @@ static void ahash_done_ctx_dst(struct de
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
kfree(edesc);
#ifdef DEBUG
-@@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash
+@@ -835,13 +681,12 @@ static int ahash_update_ctx(struct ahash
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
-@@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash
+@@ -890,15 +735,14 @@ static int ahash_update_ctx(struct ahash
+ edesc->src_nents = src_nents;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
if (mapped_nents) {
sg_to_sec4_sg_last(req->src, mapped_nents,
-@@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash
+@@ -909,12 +753,10 @@ static int ahash_update_ctx(struct ahash
to_hash - *buflen,
*next_buflen, 0);
} else {
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-@@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_
+@@ -969,12 +811,9 @@ static int ahash_final_ctx(struct ahash_
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
-@@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_
+@@ -994,18 +833,17 @@ static int ahash_final_ctx(struct ahash_
+ desc = edesc->hw_desc;
+
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+- edesc->src_nents = 0;
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
-@@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1048,12 +886,9 @@ static int ahash_finup_ctx(struct ahash_
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
-@@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1082,7 +917,7 @@ static int ahash_finup_ctx(struct ahash_
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
-@@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1093,14 +928,14 @@ static int ahash_finup_ctx(struct ahash_
+
+ edesc->src_nents = src_nents;
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
sec4_sg_src_index, ctx->ctx_len + buflen,
-@@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_req
+@@ -1136,15 +971,18 @@ static int ahash_digest(struct ahash_req
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
-@@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct aha
+@@ -1215,10 +1053,10 @@ static int ahash_final_no_ctx(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
-@@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ah
+@@ -1246,7 +1084,6 @@ static int ahash_final_no_ctx(struct aha
+ dev_err(jrdev, "unable to map dst\n");
+ goto unmap;
+ }
+- edesc->src_nents = 0;
+
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+@@ -1276,13 +1113,12 @@ static int ahash_update_no_ctx(struct ah
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
-@@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ah
+@@ -1329,10 +1165,11 @@ static int ahash_update_no_ctx(struct ah
+
+ edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->dst_dma = 0;
+- edesc->dst_dma = 0;
- state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
- buf, *buflen);
sg_to_sec4_sg_last(req->src, mapped_nents,
edesc->sec4_sg + 1, 0);
-@@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ah
+@@ -1342,8 +1179,6 @@ static int ahash_update_no_ctx(struct ah
*next_buflen, 0);
}
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-@@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct aha
+@@ -1403,12 +1238,9 @@ static int ahash_finup_no_ctx(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
-@@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct aha
+@@ -1450,9 +1282,9 @@ static int ahash_finup_no_ctx(struct aha
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
req->nbytes);
-@@ -1496,11 +1369,10 @@ static int ahash_update_first(struct aha
+@@ -1496,11 +1328,10 @@ static int ahash_update_first(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int to_hash;
u32 *desc;
int src_nents, mapped_nents;
-@@ -1582,6 +1454,7 @@ static int ahash_update_first(struct aha
+@@ -1545,7 +1376,6 @@ static int ahash_update_first(struct aha
+ }
+
+ edesc->src_nents = src_nents;
+- edesc->dst_dma = 0;
+
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+ to_hash);
+@@ -1582,6 +1412,7 @@ static int ahash_update_first(struct aha
state->final = ahash_final_no_ctx;
scatterwalk_map_and_copy(next_buf, req->src, 0,
req->nbytes, 0);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
-@@ -1688,7 +1561,6 @@ struct caam_hash_template {
+@@ -1688,7 +1519,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
};
/* ahash descriptors */
-@@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_
+@@ -1714,7 +1544,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
-@@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_
+@@ -1736,7 +1565,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
-@@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_
+@@ -1758,7 +1586,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
-@@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_
+@@ -1780,7 +1607,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
-@@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_
+@@ -1802,7 +1628,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
}, {
.name = "md5",
.driver_name = "md5-caam",
-@@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_
+@@ -1824,14 +1649,12 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
struct ahash_alg ahash_alg;
};
-@@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct cry
+@@ -1853,6 +1676,8 @@ static int caam_hash_cra_init(struct cry
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ dma_addr_t dma_addr;
++ struct caam_drv_private *priv;
/*
* Get a Job ring from Job Ring driver to ensure in-order
-@@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct cry
+@@ -1863,11 +1688,34 @@ static int caam_hash_cra_init(struct cry
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->jrdev);
}
+
++ priv = dev_get_drvdata(ctx->jrdev->parent);
++ ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
++
+ dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
+ offsetof(struct caam_hash_ctx,
+ sh_desc_update_dma),
-+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+ dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+ caam_jr_free(ctx->jrdev);
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-@@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct cr
+@@ -1879,30 +1727,10 @@ static void caam_hash_cra_exit(struct cr
{
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
+ offsetof(struct caam_hash_ctx,
+ sh_desc_update_dma),
-+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
caam_jr_free(ctx->jrdev);
}
-@@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_templat
+@@ -1961,7 +1789,6 @@ caam_hash_alloc(struct caam_hash_templat
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
return t_alg;
}
+--- /dev/null
++++ b/drivers/crypto/caam/caamhash_desc.c
+@@ -0,0 +1,108 @@
++/*
++ * Shared descriptors for ahash algorithms
++ *
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "compat.h"
++#include "desc_constr.h"
++#include "caamhash_desc.h"
++
++/**
++ * cnstr_shdsc_ahash - ahash shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case.
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
++ * SHA256, SHA384, SHA512}.
++ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
++ * @digestsize: algorithm's digest size
++ * @ctx_len: size of Context Register
++ * @import_ctx: true if previous Context Register needs to be restored
++ * must be true for ahash update and final
++ * must be false for for ahash first and digest
++ * @era: SEC Era
++ */
++void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
++ int digestsize, int ctx_len, bool import_ctx, int era)
++{
++ u32 op = adata->algtype;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Append key if it has been set; ahash update excluded */
++ if (state != OP_ALG_AS_UPDATE && adata->keylen) {
++ u32 *skip_key_load;
++
++ /* Skip key loading if already shared */
++ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ if (era < 6)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad,
++ adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ else
++ append_proto_dkp(desc, adata);
++
++ set_jump_tgt_here(desc, skip_key_load);
++
++ op |= OP_ALG_AAI_HMAC_PRECOMP;
++ }
++
++ /* If needed, import context from software */
++ if (import_ctx)
++ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++ /* Class 2 operation */
++ append_operation(desc, op | state | OP_ALG_ENCRYPT);
++
++ /*
++ * Load from buf and/or src and write to req->result or state->context
++ * Calculate remaining bytes to read
++ */
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ /* Read remaining bytes */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
++ FIFOLD_TYPE_MSG | KEY_VLF);
++ /* Store class2 context bytes */
++ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++}
++EXPORT_SYMBOL(cnstr_shdsc_ahash);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
++MODULE_AUTHOR("NXP Semiconductors");
+--- /dev/null
++++ b/drivers/crypto/caam/caamhash_desc.h
+@@ -0,0 +1,49 @@
++/*
++ * Shared descriptors for ahash algorithms
++ *
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _CAAMHASH_DESC_H_
++#define _CAAMHASH_DESC_H_
++
++/* length of descriptors text */
++#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
++#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
++#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
++#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
++#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
++
++void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
++ int digestsize, int ctx_len, bool import_ctx, int era);
++
++#endif /* _CAAMHASH_DESC_H_ */
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -18,6 +18,10 @@
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
-@@ -270,7 +271,7 @@ static int deinstantiate_rng(struct devi
+@@ -274,7 +275,7 @@ static int deinstantiate_rng(struct devi
/*
* If the corresponding bit is set, then it means the state
* handle was initialized by us, and thus it needs to be
*/
if ((1 << sh_idx) & state_handle_mask) {
/*
-@@ -303,20 +304,24 @@ static int caam_remove(struct platform_d
+@@ -307,20 +308,24 @@ static int caam_remove(struct platform_d
struct device *ctrldev;
struct caam_drv_private *ctrlpriv;
struct caam_ctrl __iomem *ctrl;
deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
/* Shut down debug views */
-@@ -331,8 +336,8 @@ static int caam_remove(struct platform_d
+@@ -335,8 +340,8 @@ static int caam_remove(struct platform_d
clk_disable_unprepare(ctrlpriv->caam_ipg);
clk_disable_unprepare(ctrlpriv->caam_mem);
clk_disable_unprepare(ctrlpriv->caam_aclk);
return 0;
}
-@@ -366,11 +371,8 @@ static void kick_trng(struct platform_de
+@@ -370,11 +375,8 @@ static void kick_trng(struct platform_de
*/
val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
>> RTSDCTL_ENT_DLY_SHIFT;
val = rd_reg32(&r4tst->rtsdctl);
val = (val & ~RTSDCTL_ENT_DLY_MASK) |
-@@ -382,15 +384,12 @@ static void kick_trng(struct platform_de
+@@ -386,15 +388,12 @@ static void kick_trng(struct platform_de
wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
/* read the control register */
val = rd_reg32(&r4tst->rtmctl);
}
/**
-@@ -411,28 +410,26 @@ int caam_get_era(void)
+@@ -415,28 +414,26 @@ int caam_get_era(void)
}
EXPORT_SYMBOL(caam_get_era);
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
-@@ -452,9 +449,10 @@ static int caam_probe(struct platform_de
+@@ -456,9 +453,10 @@ static int caam_probe(struct platform_de
dev = &pdev->dev;
dev_set_drvdata(dev, ctrlpriv);
/* Enable clocking */
clk = caam_drv_identify_clk(&pdev->dev, "ipg");
if (IS_ERR(clk)) {
-@@ -483,14 +481,16 @@ static int caam_probe(struct platform_de
+@@ -487,14 +485,16 @@ static int caam_probe(struct platform_de
}
ctrlpriv->caam_aclk = clk;
ret = clk_prepare_enable(ctrlpriv->caam_ipg);
if (ret < 0) {
-@@ -511,11 +511,13 @@ static int caam_probe(struct platform_de
+@@ -515,11 +515,13 @@ static int caam_probe(struct platform_de
goto disable_caam_mem;
}
}
/* Get configuration properties from device tree */
-@@ -542,13 +544,13 @@ static int caam_probe(struct platform_de
+@@ -546,13 +548,13 @@ static int caam_probe(struct platform_de
else
BLOCK_OFFSET = PG_SIZE_64K;
BLOCK_OFFSET * DECO_BLOCK_NUMBER
);
-@@ -557,12 +559,17 @@ static int caam_probe(struct platform_de
+@@ -561,12 +563,17 @@ static int caam_probe(struct platform_de
/*
* Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
/*
* Read the Compile Time paramters and SCFGR to determine
-@@ -590,64 +597,67 @@ static int caam_probe(struct platform_de
+@@ -594,64 +601,69 @@ static int caam_probe(struct platform_de
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
- else
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
--
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
++ } else {
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
++ }
++ if (ret) {
++ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
++ goto iounmap_ctrl;
++ }
+
- /*
- * Detect and enable JobRs
- * First, find out how many ring spec'ed, allocate references
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
- rspec++;
-+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
-+ } else {
-+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-+ }
-+ if (ret) {
-+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
-+ goto iounmap_ctrl;
-+ }
++ ctrlpriv->era = caam_get_era();
- ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
- sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
}
/* If no QI and no rings specified, quit and go home */
-@@ -662,8 +672,10 @@ static int caam_probe(struct platform_de
+@@ -666,8 +678,10 @@ static int caam_probe(struct platform_de
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
-@@ -731,77 +743,46 @@ static int caam_probe(struct platform_de
+@@ -734,78 +748,47 @@ static int caam_probe(struct platform_de
+
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+- caam_get_era());
- dev_info(dev, "job rings = %d, qi = %d\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present);
++ ctrlpriv->era);
+ dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present,
+ caam_dpaa2 ? "yes" : "no");
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
-@@ -809,7 +790,7 @@ static int caam_probe(struct platform_de
+@@ -813,7 +796,7 @@ static int caam_probe(struct platform_de
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
-@@ -817,7 +798,7 @@ static int caam_probe(struct platform_de
+@@ -821,7 +804,7 @@ static int caam_probe(struct platform_de
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
-@@ -828,13 +809,17 @@ static int caam_probe(struct platform_de
+@@ -832,13 +815,17 @@ static int caam_probe(struct platform_de
return 0;
caam_remove:
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
-@@ -844,17 +829,6 @@ disable_caam_ipg:
+@@ -848,17 +835,6 @@ disable_caam_ipg:
return ret;
}
#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
-@@ -1107,8 +1104,8 @@ struct sec4_sg_entry {
+@@ -449,6 +446,18 @@ struct sec4_sg_entry {
+ #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+ #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
+ #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
+
+ /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+ #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+@@ -1098,6 +1107,22 @@ struct sec4_sg_entry {
+ /* MacSec protinfos */
+ #define OP_PCL_MACSEC 0x0001
+
++/* Derived Key Protocol (DKP) Protinfo */
++#define OP_PCL_DKP_SRC_SHIFT 14
++#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_DST_SHIFT 12
++#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_KEY_SHIFT 0
++#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
++
+ /* PKI unidirectional protocol protinfo bits */
+ #define OP_PCL_PKPROT_TEST 0x0008
+ #define OP_PCL_PKPROT_DECRYPT 0x0004
+@@ -1107,8 +1132,8 @@ struct sec4_sg_entry {
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
-@@ -1249,7 +1246,7 @@ struct sec4_sg_entry {
+@@ -1249,7 +1274,7 @@ struct sec4_sg_entry {
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
-@@ -1445,10 +1442,11 @@ struct sec4_sg_entry {
+@@ -1445,10 +1470,11 @@ struct sec4_sg_entry {
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
/* Destination selectors */
#define MATH_DEST_SHIFT 8
-@@ -1629,4 +1627,31 @@ struct sec4_sg_entry {
+@@ -1457,6 +1483,7 @@ struct sec4_sg_entry {
+ #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+ #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+ #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
++#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
+ #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+ #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+ #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+@@ -1629,4 +1656,31 @@ struct sec4_sg_entry {
/* Frame Descriptor Command for Replacement Job Descriptor */
#define FD_CMD_REPLACE_JOB_DESC 0x20000000
}
-static inline void append_data(u32 *desc, void *data, int len)
-+static inline void append_data(u32 * const desc, void *data, int len)
++static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
}
-static inline void append_cmd_data(u32 *desc, void *data, int len,
-+static inline void append_cmd_data(u32 * const desc, void *data, int len,
++static inline void append_cmd_data(u32 * const desc, const void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
-+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
++static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
-+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
++static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
u32 options) \
{ \
PRINT_POS; \
-@@ -426,3 +434,66 @@ do { \
+@@ -426,3 +434,107 @@ do { \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+ unsigned int keylen_pad;
+ union {
+ dma_addr_t key_dma;
-+ void *key_virt;
++ const void *key_virt;
+ };
+ bool key_inline;
+};
+ return (rem_bytes >= 0) ? 0 : -1;
+}
+
++/**
++ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
++ * @desc: pointer to buffer used for descriptor construction
++ * @adata: pointer to authentication transform definitions.
++ * keylen should be the length of initial key, while keylen_pad
++ * the length of the derived (split) key.
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
++ * SHA256, SHA384, SHA512}.
++ */
++static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
++{
++ u32 protid;
++
++ /*
++ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
++ * to OP_PCLID_DKP_{MD5, SHA*}
++ */
++ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
++ (0x20 << OP_ALG_ALGSEL_SHIFT);
++
++ if (adata->key_inline) {
++ int words;
++
++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
++ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
++ adata->keylen);
++ append_data(desc, adata->key_virt, adata->keylen);
++
++ /* Reserve space in descriptor buffer for the derived key */
++ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
++ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
++ if (words)
++ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
++ } else {
++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
++ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
++ adata->keylen);
++ append_ptr(desc, adata->key_dma);
++ }
++}
++
+#endif /* DESC_CONSTR_H */
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
-@@ -0,0 +1,859 @@
+@@ -0,0 +1,858 @@
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
-+#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
-+#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
+#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
+#include "dpseci.h"
+#include "dpseci_cmd.h"
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_open *cmd_params;
+ int err;
+
+ */
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
+ const struct dpseci_cfg *cfg, u32 *obj_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_create *cmd_params;
+ int i, err;
+
+int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
+ u32 object_id)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_destroy *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_is_enabled *rsp_params;
+ int err;
+
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 *en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_enable *cmd_params;
+ struct dpseci_rsp_get_irq_enable *rsp_params;
+ int err;
+int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u8 en)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_enable *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
+int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_mask *cmd_params;
+ int err;
+
+int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 mask)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_mask *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
+int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 *status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_status *cmd_params;
+ int err;
+
+int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 irq_index, u32 status)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_irq_status *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_attributes *rsp_params;
+ int err;
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ int err;
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ int err;
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ int err;
+
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_counters *counters)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_counters *rsp_params;
+ int err;
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_api_version *rsp_params;
+ int err;
+
+int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
+ u8 options, struct opr_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_opr *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
+ struct opr_cfg *cfg, struct opr_qry *qry)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_opr *cmd_params;
+ struct dpseci_rsp_get_opr *rsp_params;
+ int err;
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
+{
-+ struct mc_command cmd = { 0 };
++ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *rsp_params;
+ int err;
+
#endif /* CAAM_ERROR_H */
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
-@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
- struct device *dev;
- int ridx;
- struct caam_job_ring __iomem *rregs; /* JobR's register space */
-+ struct tasklet_struct irqtask;
- int irq; /* One per queue */
-
- /* Number of scatterlist crypt transforms active on the JobR */
-@@ -63,10 +64,9 @@ struct caam_drv_private_jr {
+@@ -64,10 +64,9 @@ struct caam_drv_private_jr {
* Driver-private storage for a single CAAM block instance
*/
struct caam_drv_private {
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
-@@ -102,11 +102,6 @@ struct caam_drv_private {
+@@ -84,6 +83,7 @@ struct caam_drv_private {
+ u8 qi_present; /* Nonzero if QI present in device */
+ int secvio_irq; /* Security violation interrupt number */
+ int virt_en; /* Virtualization enabled in CAAM */
++ int era; /* CAAM Era (internal HW revision) */
+
+ #define RNG4_MAX_HANDLES 2
+ /* RNG4 block */
+@@ -103,11 +103,6 @@ struct caam_drv_private {
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#endif
-@@ -114,4 +109,22 @@ struct caam_drv_private {
+@@ -115,4 +110,22 @@ struct caam_drv_private {
void caam_jr_algapi_init(struct device *dev);
void caam_jr_algapi_remove(struct device *dev);
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
-@@ -73,6 +82,8 @@ static int caam_jr_shutdown(struct devic
-
- ret = caam_reset_hw_jr(dev);
-
-+ tasklet_kill(&jrp->irqtask);
-+
- /* Release interrupt */
- free_irq(jrp->irq, dev);
-
-@@ -116,6 +127,8 @@ static int caam_jr_remove(struct platfor
+@@ -118,6 +127,8 @@ static int caam_jr_remove(struct platfor
dev_err(jrdev, "Failed to shut down job ring\n");
irq_dispose_mapping(jrpriv->irq);
return ret;
}
-@@ -128,7 +141,7 @@ static irqreturn_t caam_jr_interrupt(int
-
- /*
- * Check the output ring for ready responses, kick
-- * the threaded irq if jobs done.
-+ * tasklet if jobs done.
- */
- irqstate = rd_reg32(&jrp->rregs->jrintstatus);
- if (!irqstate)
-@@ -150,13 +163,18 @@ static irqreturn_t caam_jr_interrupt(int
- /* Have valid interrupt at this point, just ACK and trigger */
- wr_reg32(&jrp->rregs->jrintstatus, irqstate);
-
-- return IRQ_WAKE_THREAD;
-+ preempt_disable();
-+ tasklet_schedule(&jrp->irqtask);
-+ preempt_enable();
-+
-+ return IRQ_HANDLED;
- }
-
--static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
-+/* Deferred service handler, run as interrupt-fired tasklet */
-+static void caam_jr_dequeue(unsigned long devarg)
- {
- int hw_idx, sw_idx, i, head, tail;
-- struct device *dev = st_dev;
-+ struct device *dev = (struct device *)devarg;
- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
- void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
- u32 *userdesc, userstatus;
-@@ -230,8 +248,6 @@ static irqreturn_t caam_jr_threadirq(int
-
- /* reenable / unmask IRQs */
- clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
--
-- return IRQ_HANDLED;
- }
-
- /**
-@@ -275,6 +291,36 @@ struct device *caam_jr_alloc(void)
+@@ -281,6 +292,36 @@ struct device *caam_jr_alloc(void)
EXPORT_SYMBOL(caam_jr_alloc);
/**
* caam_jr_free() - Free the Job Ring
* @rdev - points to the dev that identifies the Job ring to
* be released.
-@@ -389,10 +435,11 @@ static int caam_jr_init(struct device *d
-
- jrp = dev_get_drvdata(dev);
-
-+ tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
-+
- /* Connect job ring interrupt handler. */
-- error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
-- caam_jr_threadirq, IRQF_SHARED,
-- dev_name(dev), dev);
-+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
-+ dev_name(dev), dev);
- if (error) {
- dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
- jrp->ridx, jrp->irq);
-@@ -454,6 +501,7 @@ out_free_inpring:
- out_free_irq:
- free_irq(jrp->irq, dev);
- out_kill_deq:
-+ tasklet_kill(&jrp->irqtask);
- return error;
- }
-
-@@ -489,15 +537,28 @@ static int caam_jr_probe(struct platform
+@@ -497,15 +538,28 @@ static int caam_jr_probe(struct platform
return -ENOMEM;
}
/* Identify the interrupt */
jrpriv->irq = irq_of_parse_and_map(nprop, 0);
-@@ -517,10 +578,12 @@ static int caam_jr_probe(struct platform
+@@ -525,10 +579,12 @@ static int caam_jr_probe(struct platform
atomic_set(&jrpriv->tfm_count, 0);
+}
--- /dev/null
+++ b/drivers/crypto/caam/qi.c
-@@ -0,0 +1,797 @@
+@@ -0,0 +1,804 @@
+/*
+ * CAAM/SEC 4.x QI transport/backend driver
+ * Queue Interface backend functionality
+
+ fd.cmd = 0;
+ fd.format = qm_fd_compound;
-+ fd.cong_weight = req->fd_sgt[1].length;
++ fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
+ fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(qidev, fd.addr)) {
+ return qman_cb_dqrr_stop;
+
+ fd = &dqrr->fd;
-+ if (unlikely(fd->status))
-+ dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
++ if (unlikely(fd->status)) {
++ u32 ssrc = fd->status & JRSTA_SSRC_MASK;
++ u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
++
++ if (ssrc != JRSTA_SSRC_CCB_ERROR ||
++ err_id != JRSTA_CCBERR_ERRID_ICVCHK)
++ dev_err(qidev, "Error: %#x in CAAM response FD\n",
++ fd->status);
++ }
+
+ if (unlikely(fd->format != fd->format)) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
- return sg_nents;
-}
+#endif /* _SG_SW_SEC4_H_ */
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ sg_count, areq->assoclen, tbl_off, elen);
+
++ /*
++ * In case of SEC 2.x+, cipher in len must include only the ciphertext,
++ * while extent is used for ICV len.
++ */
++ if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
++ (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
++ desc->ptr[4].len = cpu_to_be16(cryptlen);
++
+ if (ret > 1) {
+ tbl_off += ret;
+ sync_needed = true;
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -516,7 +516,7 @@ err: