layerscape: update linux 4.9 patches to LSDK-18.06
[openwrt/staging/jow.git] / target / linux / layerscape / patches-4.9 / 804-crypto-support-layerscape.patch
index 79103a273b3959db697bf827f8269b332c3ce0e5..56a3dc01c48a91dac3a7db1d563bc930bbb85957 100644 (file)
@@ -1,12 +1,12 @@
-From 0a5b97d1f524c1769b4059e3c7123b52755f7121 Mon Sep 17 00:00:00 2001
+From 2a0aa9bd187f6f5693982a8f79665585af772237 Mon Sep 17 00:00:00 2001
 From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 27 Sep 2017 15:02:01 +0800
-Subject: [PATCH] crypto: support layerscape
+Date: Thu, 5 Jul 2018 17:29:41 +0800
+Subject: [PATCH 16/32] crypto: support layerscape
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
 
-This is a integrated patch for layerscape sec support.
+This is an integrated patch for layerscape sec support.
 
 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
 Signed-off-by: Fabio Estevam <festevam@gmail.com>
@@ -27,63 +27,67 @@ Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
 Signed-off-by: Laura Abbott <labbott@redhat.com>
 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+Singed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 ---
- crypto/Kconfig                                    |   30 +
- crypto/Makefile                                   |    4 +
- crypto/acompress.c                                |  169 +
- crypto/algboss.c                                  |   12 +-
- crypto/crypto_user.c                              |   19 +
- crypto/scompress.c                                |  356 ++
- crypto/tcrypt.c                                   |   17 +-
- crypto/testmgr.c                                  | 1701 ++++----
- crypto/testmgr.h                                  | 1125 +++---
- crypto/tls.c                                      |  607 +++
- drivers/crypto/caam/Kconfig                       |   72 +-
- drivers/crypto/caam/Makefile                      |   15 +-
- drivers/crypto/caam/caamalg.c                     | 2125 +++-------
- drivers/crypto/caam/caamalg_desc.c                | 1913 +++++++++
- drivers/crypto/caam/caamalg_desc.h                |  127 +
- drivers/crypto/caam/caamalg_qi.c                  | 2877 +++++++++++++
- drivers/crypto/caam/caamalg_qi2.c                 | 4428 +++++++++++++++++++++
- drivers/crypto/caam/caamalg_qi2.h                 |  265 ++
- drivers/crypto/caam/caamhash.c                    |  521 +--
- drivers/crypto/caam/caampkc.c                     |  471 ++-
- drivers/crypto/caam/caampkc.h                     |   58 +
- drivers/crypto/caam/caamrng.c                     |   16 +-
- drivers/crypto/caam/compat.h                      |    1 +
- drivers/crypto/caam/ctrl.c                        |  356 +-
- drivers/crypto/caam/ctrl.h                        |    2 +
- drivers/crypto/caam/desc.h                        |   52 +-
- drivers/crypto/caam/desc_constr.h                 |  139 +-
- drivers/crypto/caam/dpseci.c                      |  859 ++++
- drivers/crypto/caam/dpseci.h                      |  395 ++
- drivers/crypto/caam/dpseci_cmd.h                  |  261 ++
- drivers/crypto/caam/error.c                       |  127 +-
- drivers/crypto/caam/error.h                       |   10 +-
- drivers/crypto/caam/intern.h                      |   31 +-
- drivers/crypto/caam/jr.c                          |   55 +-
- drivers/crypto/caam/key_gen.c                     |   32 +-
- drivers/crypto/caam/key_gen.h                     |   36 +-
- drivers/crypto/caam/pdb.h                         |   62 +
- drivers/crypto/caam/pkc_desc.c                    |   36 +
- drivers/crypto/caam/qi.c                          |  797 ++++
- drivers/crypto/caam/qi.h                          |  204 +
- drivers/crypto/caam/regs.h                        |   63 +-
- drivers/crypto/caam/sg_sw_qm.h                    |  126 +
- drivers/crypto/caam/sg_sw_qm2.h                   |   81 +
- drivers/crypto/caam/sg_sw_sec4.h                  |   60 +-
- drivers/net/wireless/rsi/rsi_91x_usb.c            |    2 +-
- drivers/staging/wilc1000/linux_wlan.c             |    2 +-
- drivers/staging/wilc1000/wilc_wfi_cfgoperations.c |    2 +-
- include/crypto/acompress.h                        |  269 ++
- include/crypto/internal/acompress.h               |   81 +
- include/crypto/internal/scompress.h               |  136 +
- include/linux/crypto.h                            |    3 +
- include/uapi/linux/cryptouser.h                   |    5 +
- scripts/spelling.txt                              |    3 +
- sound/soc/amd/acp-pcm-dma.c                       |    2 +-
- 54 files changed, 17263 insertions(+), 3955 deletions(-)
+ crypto/Kconfig                                |   30 +
+ crypto/Makefile                               |    4 +
+ crypto/acompress.c                            |  169 +
+ crypto/algboss.c                              |   12 +-
+ crypto/crypto_user.c                          |   19 +
+ crypto/scompress.c                            |  356 +
+ crypto/tcrypt.c                               |   17 +-
+ crypto/testmgr.c                              | 1708 ++---
+ crypto/testmgr.h                              | 1125 ++--
+ crypto/tls.c                                  |  607 ++
+ drivers/crypto/caam/Kconfig                   |   77 +-
+ drivers/crypto/caam/Makefile                  |   16 +-
+ drivers/crypto/caam/caamalg.c                 | 2185 ++----
+ drivers/crypto/caam/caamalg_desc.c            | 1961 ++++++
+ drivers/crypto/caam/caamalg_desc.h            |  127 +
+ drivers/crypto/caam/caamalg_qi.c              | 3321 +++++++++
+ drivers/crypto/caam/caamalg_qi2.c             | 5938 +++++++++++++++++
+ drivers/crypto/caam/caamalg_qi2.h             |  283 +
+ drivers/crypto/caam/caamhash.c                |  555 +-
+ drivers/crypto/caam/caamhash_desc.c           |  108 +
+ drivers/crypto/caam/caamhash_desc.h           |   49 +
+ drivers/crypto/caam/caampkc.c                 |  471 +-
+ drivers/crypto/caam/caampkc.h                 |   58 +
+ drivers/crypto/caam/caamrng.c                 |   16 +-
+ drivers/crypto/caam/compat.h                  |    1 +
+ drivers/crypto/caam/ctrl.c                    |  358 +-
+ drivers/crypto/caam/ctrl.h                    |    2 +
+ drivers/crypto/caam/desc.h                    |   84 +-
+ drivers/crypto/caam/desc_constr.h             |  180 +-
+ drivers/crypto/caam/dpseci.c                  |  858 +++
+ drivers/crypto/caam/dpseci.h                  |  395 ++
+ drivers/crypto/caam/dpseci_cmd.h              |  261 +
+ drivers/crypto/caam/error.c                   |  127 +-
+ drivers/crypto/caam/error.h                   |   10 +-
+ drivers/crypto/caam/intern.h                  |   31 +-
+ drivers/crypto/caam/jr.c                      |   72 +-
+ drivers/crypto/caam/jr.h                      |    2 +
+ drivers/crypto/caam/key_gen.c                 |   32 +-
+ drivers/crypto/caam/key_gen.h                 |   36 +-
+ drivers/crypto/caam/pdb.h                     |   62 +
+ drivers/crypto/caam/pkc_desc.c                |   36 +
+ drivers/crypto/caam/qi.c                      |  804 +++
+ drivers/crypto/caam/qi.h                      |  204 +
+ drivers/crypto/caam/regs.h                    |   63 +-
+ drivers/crypto/caam/sg_sw_qm.h                |  126 +
+ drivers/crypto/caam/sg_sw_qm2.h               |   81 +
+ drivers/crypto/caam/sg_sw_sec4.h              |   60 +-
+ drivers/crypto/talitos.c                      |    8 +
+ drivers/net/wireless/rsi/rsi_91x_usb.c        |    2 +-
+ drivers/staging/wilc1000/linux_wlan.c         |    2 +-
+ .../staging/wilc1000/wilc_wfi_cfgoperations.c |    2 +-
+ include/crypto/acompress.h                    |  269 +
+ include/crypto/internal/acompress.h           |   81 +
+ include/crypto/internal/scompress.h           |  136 +
+ include/linux/crypto.h                        |    3 +
+ include/uapi/linux/cryptouser.h               |    5 +
+ scripts/spelling.txt                          |    3 +
+ sound/soc/amd/acp-pcm-dma.c                   |    2 +-
+ 58 files changed, 19620 insertions(+), 3990 deletions(-)
  create mode 100644 crypto/acompress.c
  create mode 100644 crypto/scompress.c
  create mode 100644 crypto/tls.c
@@ -92,6 +96,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  create mode 100644 drivers/crypto/caam/caamalg_qi.c
  create mode 100644 drivers/crypto/caam/caamalg_qi2.c
  create mode 100644 drivers/crypto/caam/caamalg_qi2.h
+ create mode 100644 drivers/crypto/caam/caamhash_desc.c
+ create mode 100644 drivers/crypto/caam/caamhash_desc.h
  create mode 100644 drivers/crypto/caam/dpseci.c
  create mode 100644 drivers/crypto/caam/dpseci.h
  create mode 100644 drivers/crypto/caam/dpseci_cmd.h
@@ -776,7 +782,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  };
  
  struct tcrypt_result {
-@@ -1329,6 +1329,10 @@ static int do_test(const char *alg, u32
+@@ -1333,6 +1333,10 @@ static int do_test(const char *alg, u32
                ret += tcrypt_test("hmac(sha3-512)");
                break;
  
@@ -787,7 +793,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        case 150:
                ret += tcrypt_test("ansi_cprng");
                break;
-@@ -1390,6 +1394,9 @@ static int do_test(const char *alg, u32
+@@ -1394,6 +1398,9 @@ static int do_test(const char *alg, u32
        case 190:
                ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
                break;
@@ -797,7 +803,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        case 200:
                test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
                                speed_template_16_24_32);
-@@ -1404,9 +1411,9 @@ static int do_test(const char *alg, u32
+@@ -1408,9 +1415,9 @@ static int do_test(const char *alg, u32
                test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
                                speed_template_32_40_48);
                test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
@@ -809,7 +815,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
                                speed_template_16_24_32);
                test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
-@@ -1837,9 +1844,9 @@ static int do_test(const char *alg, u32
+@@ -1841,9 +1848,9 @@ static int do_test(const char *alg, u32
                test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
                                   speed_template_32_40_48);
                test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
@@ -1256,7 +1262,31 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                           const bool diff_dst, const int align_offset)
  {
        const char *algo =
-@@ -1330,7 +1571,8 @@ out_nobuf:
+@@ -1079,12 +1320,16 @@ static int __test_skcipher(struct crypto
+       const char *e, *d;
+       struct tcrypt_result result;
+       void *data;
+-      char iv[MAX_IVLEN];
++      char *iv;
+       char *xbuf[XBUFSIZE];
+       char *xoutbuf[XBUFSIZE];
+       int ret = -ENOMEM;
+       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
++      iv = kmalloc(MAX_IVLEN, GFP_KERNEL);
++      if (!iv)
++              return ret;
++
+       if (testmgr_alloc_buf(xbuf))
+               goto out_nobuf;
+@@ -1325,12 +1570,14 @@ out:
+               testmgr_free_buf(xoutbuf);
+ out_nooutbuf:
+       testmgr_free_buf(xbuf);
++      kfree(iv);
+ out_nobuf:
+       return ret;
  }
  
  static int test_skcipher(struct crypto_skcipher *tfm, int enc,
@@ -1266,7 +1296,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  {
        unsigned int alignmask;
        int ret;
-@@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_s
+@@ -1362,8 +1609,10 @@ static int test_skcipher(struct crypto_s
        return 0;
  }
  
@@ -1279,7 +1309,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  {
        const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
        unsigned int i;
-@@ -1442,7 +1686,154 @@ out:
+@@ -1442,7 +1691,154 @@ out:
        return ret;
  }
  
@@ -1435,7 +1465,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                      unsigned int tcount)
  {
        const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
-@@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct al
+@@ -1509,7 +1905,7 @@ static int alg_test_aead(const struct al
        struct crypto_aead *tfm;
        int err = 0;
  
@@ -1444,7 +1474,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(tfm)) {
                printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
                       "%ld\n", driver, PTR_ERR(tfm));
-@@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct
+@@ -1538,7 +1934,7 @@ static int alg_test_cipher(const struct
        struct crypto_cipher *tfm;
        int err = 0;
  
@@ -1453,7 +1483,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(tfm)) {
                printk(KERN_ERR "alg: cipher: Failed to load transform for "
                       "%s: %ld\n", driver, PTR_ERR(tfm));
-@@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struc
+@@ -1567,7 +1963,7 @@ static int alg_test_skcipher(const struc
        struct crypto_skcipher *tfm;
        int err = 0;
  
@@ -1462,7 +1492,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(tfm)) {
                printk(KERN_ERR "alg: skcipher: Failed to load transform for "
                       "%s: %ld\n", driver, PTR_ERR(tfm));
-@@ -1593,22 +1984,38 @@ out:
+@@ -1593,22 +1989,38 @@ out:
  static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
                         u32 type, u32 mask)
  {
@@ -1513,7 +1543,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        return err;
  }
  
-@@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct al
+@@ -1618,7 +2030,7 @@ static int alg_test_hash(const struct al
        struct crypto_ahash *tfm;
        int err;
  
@@ -1522,7 +1552,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(tfm)) {
                printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
                       "%ld\n", driver, PTR_ERR(tfm));
-@@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct
+@@ -1646,7 +2058,7 @@ static int alg_test_crc32c(const struct
        if (err)
                goto out;
  
@@ -1531,7 +1561,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(tfm)) {
                printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
                       "%ld\n", driver, PTR_ERR(tfm));
-@@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct a
+@@ -1688,7 +2100,7 @@ static int alg_test_cprng(const struct a
        struct crypto_rng *rng;
        int err;
  
@@ -1540,7 +1570,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(rng)) {
                printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
                       "%ld\n", driver, PTR_ERR(rng));
-@@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct a
+@@ -1703,7 +2115,7 @@ static int alg_test_cprng(const struct a
  }
  
  
@@ -1549,7 +1579,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                          const char *driver, u32 type, u32 mask)
  {
        int ret = -EAGAIN;
-@@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_te
+@@ -1715,7 +2127,7 @@ static int drbg_cavs_test(struct drbg_te
        if (!buf)
                return -ENOMEM;
  
@@ -1558,7 +1588,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(drng)) {
                printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
                       "%s\n", driver);
-@@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct al
+@@ -1777,7 +2189,7 @@ static int alg_test_drbg(const struct al
        int err = 0;
        int pr = 0;
        int i = 0;
@@ -1567,7 +1597,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        unsigned int tcount = desc->suite.drbg.count;
  
        if (0 == memcmp(driver, "drbg_pr_", 8))
-@@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct al
+@@ -1796,7 +2208,7 @@ static int alg_test_drbg(const struct al
  
  }
  
@@ -1576,7 +1606,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                       const char *alg)
  {
        struct kpp_request *req;
-@@ -1888,7 +2295,7 @@ free_req:
+@@ -1888,7 +2300,7 @@ free_req:
  }
  
  static int test_kpp(struct crypto_kpp *tfm, const char *alg,
@@ -1585,7 +1615,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  {
        int ret, i;
  
-@@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg
+@@ -1909,7 +2321,7 @@ static int alg_test_kpp(const struct alg
        struct crypto_kpp *tfm;
        int err = 0;
  
@@ -1594,7 +1624,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(tfm)) {
                pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
                       driver, PTR_ERR(tfm));
-@@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg
+@@ -1924,7 +2336,7 @@ static int alg_test_kpp(const struct alg
  }
  
  static int test_akcipher_one(struct crypto_akcipher *tfm,
@@ -1603,7 +1633,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  {
        char *xbuf[XBUFSIZE];
        struct akcipher_request *req;
-@@ -2044,7 +2451,8 @@ free_xbuf:
+@@ -2044,7 +2456,8 @@ free_xbuf:
  }
  
  static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
@@ -1613,7 +1643,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  {
        const char *algo =
                crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
-@@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struc
+@@ -2068,7 +2481,7 @@ static int alg_test_akcipher(const struc
        struct crypto_akcipher *tfm;
        int err = 0;
  
@@ -1622,7 +1652,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (IS_ERR(tfm)) {
                pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
                       driver, PTR_ERR(tfm));
-@@ -2088,112 +2496,23 @@ static int alg_test_null(const struct al
+@@ -2088,112 +2501,23 @@ static int alg_test_null(const struct al
        return 0;
  }
  
@@ -1740,7 +1770,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_te
+@@ -2201,12 +2525,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -1754,7 +1784,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_te
+@@ -2214,12 +2533,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -1768,7 +1798,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_te
+@@ -2228,12 +2542,7 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -1782,7 +1812,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_te
+@@ -2245,18 +2554,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -1803,7 +1833,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_te
+@@ -2268,12 +2567,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -1817,7 +1847,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_te
+@@ -2282,12 +2576,7 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -1831,7 +1861,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_te
+@@ -2296,12 +2585,7 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -1845,7 +1875,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_te
+@@ -2309,12 +2593,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -1859,7 +1889,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_te
+@@ -2323,12 +2602,7 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -1873,7 +1903,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_te
+@@ -2344,12 +2618,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -1887,7 +1917,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_te
+@@ -2358,12 +2627,7 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -1901,7 +1931,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_te
+@@ -2380,12 +2644,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -1915,7 +1945,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_te
+@@ -2393,12 +2652,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -1929,7 +1959,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_te
+@@ -2407,12 +2661,7 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -1943,7 +1973,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_te
+@@ -2429,14 +2678,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .cipher = {
@@ -1960,7 +1990,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_te
+@@ -2444,14 +2687,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -1977,7 +2007,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_te
+@@ -2459,14 +2696,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -1994,7 +2024,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_te
+@@ -2474,14 +2705,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2011,7 +2041,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_te
+@@ -2489,14 +2714,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2028,7 +2058,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_te
+@@ -2504,14 +2723,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2045,7 +2075,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_te
+@@ -2519,14 +2732,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2062,7 +2092,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_te
+@@ -2535,14 +2742,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .cipher = {
@@ -2079,7 +2109,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_te
+@@ -2550,14 +2751,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2096,7 +2126,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_te
+@@ -2565,30 +2760,25 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2138,7 +2168,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_te
+@@ -2596,14 +2786,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2155,7 +2185,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_te
+@@ -2611,20 +2795,14 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .test = alg_test_hash,
                .suite = {
@@ -2178,7 +2208,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                .alg = "compress_null",
-@@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_te
+@@ -2633,94 +2811,30 @@ static const struct alg_test_desc alg_te
                .alg = "crc32",
                .test = alg_test_hash,
                .suite = {
@@ -2278,7 +2308,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_te
+@@ -2728,14 +2842,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2295,7 +2325,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_te
+@@ -2743,14 +2851,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2312,7 +2342,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_te
+@@ -2758,14 +2860,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2329,7 +2359,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_te
+@@ -2773,14 +2869,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2346,7 +2376,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_te
+@@ -2788,29 +2878,18 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2381,7 +2411,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_te
+@@ -2818,14 +2897,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2398,7 +2428,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_te
+@@ -2833,14 +2906,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2415,7 +2445,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_te
+@@ -2848,14 +2915,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2432,7 +2462,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_te
+@@ -2864,14 +2925,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .comp = {
@@ -2449,7 +2479,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_te
+@@ -2879,10 +2934,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_kpp,
                .fips_allowed = 1,
                .suite = {
@@ -2461,7 +2491,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                .alg = "digest_null",
-@@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_te
+@@ -2892,30 +2944,21 @@ static const struct alg_test_desc alg_te
                .test = alg_test_drbg,
                .fips_allowed = 1,
                .suite = {
@@ -2495,7 +2525,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                /*
-@@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_te
+@@ -2930,11 +2973,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_drbg,
                .fips_allowed = 1,
                .suite = {
@@ -2508,7 +2538,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                /* covered by drbg_nopr_hmac_sha256 test */
-@@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_te
+@@ -2954,10 +2993,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_drbg,
                .fips_allowed = 1,
                .suite = {
@@ -2520,7 +2550,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                /* covered by drbg_nopr_sha256 test */
-@@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_te
+@@ -2973,10 +3009,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_drbg,
                .fips_allowed = 1,
                .suite = {
@@ -2532,7 +2562,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                /* covered by drbg_pr_ctr_aes128 test */
-@@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_te
+@@ -2996,10 +3029,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_drbg,
                .fips_allowed = 1,
                .suite = {
@@ -2544,7 +2574,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                /* covered by drbg_pr_hmac_sha256 test */
-@@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_te
+@@ -3019,10 +3049,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_drbg,
                .fips_allowed = 1,
                .suite = {
@@ -2556,7 +2586,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                /* covered by drbg_pr_sha256 test */
-@@ -3034,23 +3056,13 @@ static const struct alg_test_desc alg_te
+@@ -3034,23 +3061,13 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .test = alg_test_null,
        }, {
@@ -2582,7 +2612,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_te
+@@ -3058,14 +3075,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2599,7 +2629,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_te
+@@ -3073,14 +3084,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2616,7 +2646,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_te
+@@ -3088,14 +3093,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2633,7 +2663,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_te
+@@ -3103,14 +3102,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2650,7 +2680,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_te
+@@ -3118,14 +3111,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2667,7 +2697,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_te
+@@ -3133,14 +3120,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2684,7 +2714,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_te
+@@ -3151,14 +3132,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2701,7 +2731,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_te
+@@ -3167,14 +3142,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .cipher = {
@@ -2718,7 +2748,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_te
+@@ -3197,14 +3166,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2735,7 +2765,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_te
+@@ -3212,14 +3175,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2752,7 +2782,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_te
+@@ -3227,14 +3184,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2769,7 +2799,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_te
+@@ -3242,14 +3193,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2786,7 +2816,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_te
+@@ -3257,14 +3202,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2803,7 +2833,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_te
+@@ -3272,14 +3211,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2820,7 +2850,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_te
+@@ -3287,14 +3220,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2837,7 +2867,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_te
+@@ -3302,14 +3229,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -2854,7 +2884,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_te
+@@ -3317,10 +3238,7 @@ static const struct alg_test_desc alg_te
                .test = alg_test_kpp,
                .fips_allowed = 1,
                .suite = {
@@ -2866,7 +2896,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                .alg = "gcm(aes)",
-@@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_te
+@@ -3328,14 +3246,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -2883,7 +2913,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_te
+@@ -3343,136 +3255,94 @@ static const struct alg_test_desc alg_te
                .test = alg_test_hash,
                .fips_allowed = 1,
                .suite = {
@@ -3034,7 +3064,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                .alg = "jitterentropy_rng",
-@@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_te
+@@ -3484,14 +3354,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .cipher = {
@@ -3051,7 +3081,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_te
+@@ -3499,14 +3363,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3068,7 +3098,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_te
+@@ -3514,14 +3372,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3085,7 +3115,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_te
+@@ -3529,14 +3381,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3102,7 +3132,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_te
+@@ -3544,14 +3390,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3119,7 +3149,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_te
+@@ -3559,14 +3399,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3136,7 +3166,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_te
+@@ -3575,14 +3409,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .comp = {
@@ -3153,7 +3183,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_te
+@@ -3591,14 +3419,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .comp = {
@@ -3170,7 +3200,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_te
+@@ -3607,42 +3429,27 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .comp = {
@@ -3218,7 +3248,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                .alg = "ofb(aes)",
-@@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_te
+@@ -3650,14 +3457,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .cipher = {
@@ -3235,7 +3265,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_te
+@@ -3665,24 +3466,15 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3263,7 +3293,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                .alg = "rfc3686(ctr(aes))",
-@@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_te
+@@ -3690,14 +3482,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .cipher = {
@@ -3280,7 +3310,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_te
+@@ -3706,14 +3492,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -3297,7 +3327,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_te
+@@ -3722,14 +3502,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .aead = {
@@ -3314,7 +3344,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_te
+@@ -3737,14 +3511,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -3331,7 +3361,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_te
+@@ -3752,14 +3520,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -3348,7 +3378,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_te
+@@ -3767,71 +3529,47 @@ static const struct alg_test_desc alg_te
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -3428,7 +3458,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_te
+@@ -3839,162 +3577,120 @@ static const struct alg_test_desc alg_te
                .test = alg_test_hash,
                .fips_allowed = 1,
                .suite = {
@@ -3616,7 +3646,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        }, {
                .alg = "xts(aes)",
-@@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_te
+@@ -4002,14 +3698,8 @@ static const struct alg_test_desc alg_te
                .fips_allowed = 1,
                .suite = {
                        .cipher = {
@@ -3633,7 +3663,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_te
+@@ -4017,14 +3707,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3650,7 +3680,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_te
+@@ -4032,14 +3716,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3667,7 +3697,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_te
+@@ -4047,14 +3725,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -3684,7 +3714,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        }
                }
        }, {
-@@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_te
+@@ -4062,14 +3734,8 @@ static const struct alg_test_desc alg_te
                .test = alg_test_skcipher,
                .suite = {
                        .cipher = {
@@ -6986,7 +7016,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        default y
        select CRYPTO_RNG
        select HW_RANDOM
-@@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
+@@ -124,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
          To compile this as a module, choose M here: the module
          will be called caamrng.
  
@@ -7010,6 +7040,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      select CRYPTO_BLKCIPHER
 +      select CRYPTO_AUTHENC
 +      select CRYPTO_AEAD
++      select CRYPTO_HASH
 +      ---help---
 +        CAAM driver for QorIQ Data Path Acceleration Architecture 2.
 +        It handles DPSECI DPAA2 objects that sit on the Management Complex
@@ -7022,9 +7053,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
 +                    CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
 +                    CRYPTO_DEV_FSL_DPAA2_CAAM)
++
++config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
++      def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
++                    CRYPTO_DEV_FSL_DPAA2_CAAM)
 --- a/drivers/crypto/caam/Makefile
 +++ b/drivers/crypto/caam/Makefile
-@@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
+@@ -5,13 +5,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
        ccflags-y := -DDEBUG
  endif
  
@@ -7037,6 +7072,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
  obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
  
@@ -7165,7 +7201,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        bool rfc3686;
        bool geniv;
  };
-@@ -163,302 +96,67 @@ struct caam_aead_alg {
+@@ -163,302 +96,71 @@ struct caam_aead_alg {
        bool registered;
  };
  
@@ -7235,6 +7271,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      unsigned int enckeylen;
 -      unsigned int split_key_len;
 -      unsigned int split_key_pad_len;
++      enum dma_data_direction dir;
 +      struct device *jrdev;
 +      struct alginfo adata;
 +      struct alginfo cdata;
@@ -7307,6 +7344,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        struct device *jrdev = ctx->jrdev;
 -      bool keys_fit_inline = false;
 -      u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
        u32 *desc;
 +      int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
 +                      ctx->adata.keylen_pad;
@@ -7393,9 +7431,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -                     DUMP_PREFIX_ADDRESS, 16, 4, desc,
 -                     desc_bytes(desc), 1);
 -#endif
-+      cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
++      cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
++                                  ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        /*
         * Job Descriptor and Shared Descriptors
@@ -7489,18 +7528,20 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -                     desc_bytes(desc), 1);
 -#endif
 +      desc = ctx->sh_desc_dec;
-+      cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
++      cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
++                                  ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        return 0;
  }
-@@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypt
+@@ -470,11 +172,12 @@ static int aead_set_sh_desc(struct crypt
        unsigned int ivsize = crypto_aead_ivsize(aead);
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
 -      bool keys_fit_inline;
 -      u32 geniv, moveiv;
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
        u32 ctx1_iv_off = 0;
 -      u32 *desc;
 -      const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
@@ -7511,7 +7552,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                               OP_ALG_AAI_CTR_MOD128);
        const bool is_rfc3686 = alg->caam.rfc3686;
  
-@@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypt
+@@ -482,7 +185,7 @@ static int aead_set_sh_desc(struct crypt
                return 0;
  
        /* NULL encryption / decryption */
@@ -7520,7 +7561,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return aead_null_set_sh_desc(aead);
  
        /*
-@@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypt
+@@ -497,8 +200,14 @@ static int aead_set_sh_desc(struct crypt
         * RFC3686 specific:
         *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
         */
@@ -7536,7 +7577,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        if (alg->caam.geniv)
                goto skip_enc;
-@@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypt
+@@ -507,146 +216,64 @@ static int aead_set_sh_desc(struct crypt
         * Job Descriptor and Shared Descriptors
         * must all fit into the 64-word Descriptor h/w Buffer
         */
@@ -7556,33 +7597,31 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      /* Class 2 operation */
 -      append_operation(desc, ctx->class2_alg_type |
 -                       OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+-      /* Read and write assoclen bytes */
+-      append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+-      append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 +      if (desc_inline_query(DESC_AEAD_ENC_LEN +
 +                            (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
 +                            AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
 +                            ARRAY_SIZE(data_len)) < 0)
 +              return -EINVAL;
  
--      /* Read and write assoclen bytes */
--      append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
--      append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-      /* Skip assoc data */
+-      append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 +      if (inl_mask & 1)
 +              ctx->adata.key_virt = ctx->key;
 +      else
 +              ctx->adata.key_dma = ctx->key_dma;
  
--      /* Skip assoc data */
--      append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+-      /* read assoc before reading payload */
+-      append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+-                                    FIFOLDST_VLF);
 +      if (inl_mask & 2)
 +              ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
 +      else
 +              ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  
--      /* read assoc before reading payload */
--      append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
--                                    FIFOLDST_VLF);
-+      ctx->adata.key_inline = !!(inl_mask & 1);
-+      ctx->cdata.key_inline = !!(inl_mask & 2);
 -      /* Load Counter into CONTEXT1 reg */
 -      if (is_rfc3686)
 -              append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
@@ -7602,7 +7641,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      /* Write ICV */
 -      append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
 -                       LDST_SRCDST_BYTE_CONTEXT);
--
++      ctx->adata.key_inline = !!(inl_mask & 1);
++      ctx->cdata.key_inline = !!(inl_mask & 2);
 -      ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 -                                            desc_bytes(desc),
 -                                            DMA_TO_DEVICE);
@@ -7619,9 +7660,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      desc = ctx->sh_desc_enc;
 +      cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
 +                             ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
-+                             false);
++                             false, ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
  skip_enc:
        /*
@@ -7719,13 +7760,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      desc = ctx->sh_desc_dec;
 +      cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
 +                             ctx->authsize, alg->caam.geniv, is_rfc3686,
-+                             nonce, ctx1_iv_off, false);
++                             nonce, ctx1_iv_off, false, ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        if (!alg->caam.geniv)
                goto skip_givenc;
-@@ -655,107 +277,32 @@ skip_enc:
+@@ -655,107 +282,32 @@ skip_enc:
         * Job Descriptor and Shared Descriptors
         * must all fit into the 64-word Descriptor h/w Buffer
         */
@@ -7849,13 +7890,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      desc = ctx->sh_desc_enc;
 +      cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
 +                                ctx->authsize, is_rfc3686, nonce,
-+                                ctx1_iv_off, false);
++                                ctx1_iv_off, false, ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
  skip_givenc:
        return 0;
-@@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto
+@@ -776,12 +328,12 @@ static int gcm_set_sh_desc(struct crypto
  {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
@@ -7872,7 +7913,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return 0;
  
        /*
-@@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto
+@@ -789,175 +341,35 @@ static int gcm_set_sh_desc(struct crypto
         * Job Descriptor and Shared Descriptor
         * must fit into the 64-word Descriptor h/w Buffer
         */
@@ -7973,7 +8014,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -#endif
 +      cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        /*
         * Job Descriptor and Shared Descriptors
@@ -8064,11 +8105,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -#endif
 +      cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        return 0;
  }
-@@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -976,11 +388,12 @@ static int rfc4106_set_sh_desc(struct cr
  {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
@@ -8084,7 +8125,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return 0;
  
        /*
-@@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -988,148 +401,37 @@ static int rfc4106_set_sh_desc(struct cr
         * Job Descriptor and Shared Descriptor
         * must fit into the 64-word Descriptor h/w Buffer
         */
@@ -8166,7 +8207,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 +                                false);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        /*
         * Job Descriptor and Shared Descriptors
@@ -8251,11 +8292,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 +                                false);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        return 0;
  }
-@@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -1149,12 +451,12 @@ static int rfc4543_set_sh_desc(struct cr
  {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
@@ -8272,7 +8313,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return 0;
  
        /*
-@@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -1162,151 +464,37 @@ static int rfc4543_set_sh_desc(struct cr
         * Job Descriptor and Shared Descriptor
         * must fit into the 64-word Descriptor h/w Buffer
         */
@@ -8353,7 +8394,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 +                                false);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        /*
         * Job Descriptor and Shared Descriptors
@@ -8442,11 +8483,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 +                                false);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        return 0;
  }
-@@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct cr
+@@ -1322,74 +510,67 @@ static int rfc4543_setauthsize(struct cr
        return 0;
  }
  
@@ -8465,8 +8506,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
        struct crypto_authenc_keys keys;
-@@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aea
+       int ret = 0;
        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
                goto badkey;
  
@@ -8489,6 +8532,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  #endif
  
 -      ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
++      /*
++       * If DKP is supported, use it in the shared descriptor to generate
++       * the split key.
++       */
++      if (ctrlpriv->era >= 6) {
++              ctx->adata.keylen = keys.authkeylen;
++              ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++                                                    OP_ALG_ALGSEL_MASK);
++
++              if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++                      goto badkey;
++
++              memcpy(ctx->key, keys.authkey, keys.authkeylen);
++              memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++                     keys.enckeylen);
++              dma_sync_single_for_device(jrdev, ctx->key_dma,
++                                         ctx->adata.keylen_pad +
++                                         keys.enckeylen, ctx->dir);
++              goto skip_split_key;
++      }
++
 +      ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
 +                          keys.authkeylen, CAAM_MAX_KEY_SIZE -
 +                          keys.enckeylen);
@@ -8507,14 +8571,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      }
 +      memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
 +      dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+                                 keys.enckeylen, DMA_TO_DEVICE);
++                                 keys.enckeylen, ctx->dir);
  #ifdef DEBUG
        print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 -                     ctx->split_key_pad_len + keys.enckeylen, 1);
 +                     ctx->adata.keylen_pad + keys.enckeylen, 1);
  #endif
--
 -      ctx->enckeylen = keys.enckeylen;
 -
 -      ret = aead_set_sh_desc(aead);
@@ -8524,12 +8588,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      }
 -
 -      return ret;
++skip_split_key:
 +      ctx->cdata.keylen = keys.enckeylen;
 +      return aead_set_sh_desc(aead);
  badkey:
        crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
        return -EINVAL;
-@@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead
+@@ -1400,7 +581,6 @@ static int gcm_setkey(struct crypto_aead
  {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
@@ -8537,7 +8602,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
  #ifdef DEBUG
        print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
-@@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead
+@@ -1408,21 +588,10 @@ static int gcm_setkey(struct crypto_aead
  #endif
  
        memcpy(ctx->key, key, keylen);
@@ -8548,21 +8613,21 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -              return -ENOMEM;
 -      }
 -      ctx->enckeylen = keylen;
--
++      dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
++      ctx->cdata.keylen = keylen;
 -      ret = gcm_set_sh_desc(aead);
 -      if (ret) {
 -              dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
 -                               DMA_TO_DEVICE);
 -      }
-+      dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+      ctx->cdata.keylen = keylen;
+-
 -      return ret;
 +      return gcm_set_sh_desc(aead);
  }
  
  static int rfc4106_setkey(struct crypto_aead *aead,
-@@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_
+@@ -1430,7 +599,6 @@ static int rfc4106_setkey(struct crypto_
  {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
@@ -8570,7 +8635,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        if (keylen < 4)
                return -EINVAL;
-@@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_
+@@ -1446,22 +614,10 @@ static int rfc4106_setkey(struct crypto_
         * The last four bytes of the key material are used as the salt value
         * in the nonce. Update the AES key length.
         */
@@ -8592,12 +8657,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      return ret;
 +      ctx->cdata.keylen = keylen - 4;
 +      dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-+                                 DMA_TO_DEVICE);
++                                 ctx->dir);
 +      return rfc4106_set_sh_desc(aead);
  }
  
  static int rfc4543_setkey(struct crypto_aead *aead,
-@@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_
+@@ -1469,7 +625,6 @@ static int rfc4543_setkey(struct crypto_
  {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
@@ -8605,7 +8670,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        if (keylen < 4)
                return -EINVAL;
-@@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_
+@@ -1485,43 +640,28 @@ static int rfc4543_setkey(struct crypto_
         * The last four bytes of the key material are used as the salt value
         * in the nonce. Update the AES key length.
         */
@@ -8627,7 +8692,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      return ret;
 +      ctx->cdata.keylen = keylen - 4;
 +      dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
-+                                 DMA_TO_DEVICE);
++                                 ctx->dir);
 +      return rfc4543_set_sh_desc(aead);
  }
  
@@ -8656,7 +8721,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  #ifdef DEBUG
        print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-@@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct cryp
+@@ -1544,215 +684,33 @@ static int ablkcipher_setkey(struct cryp
                keylen -= CTR_RFC3686_NONCE_SIZE;
        }
  
@@ -8703,11 +8768,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      /* Load iv */
 -      append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
 -                      LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-+      cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
-+                                   ctx1_iv_off);
-+      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
+-
 -      /* Load counter into CONTEXT1 reg */
 -      if (is_rfc3686)
 -              append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
@@ -8721,7 +8782,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -
 -      /* Perform operation */
 -      ablkcipher_append_src_dst(desc);
--
++      cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
++                                   ctx1_iv_off);
++      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++                                 desc_bytes(desc), ctx->dir);
 -      ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 -                                            desc_bytes(desc),
 -                                            DMA_TO_DEVICE);
@@ -8740,7 +8805,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
 +                                   ctx1_iv_off);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
 -      init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
 -      /* Skip if already shared */
@@ -8807,7 +8872,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
 +                                      ctx1_iv_off);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
 -      init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
 -      /* Skip if already shared */
@@ -8889,7 +8954,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  }
  
  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-@@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct
+@@ -1760,8 +718,7 @@ static int xts_ablkcipher_setkey(struct
  {
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        struct device *jrdev = ctx->jrdev;
@@ -8899,7 +8964,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
                crypto_ablkcipher_set_flags(ablkcipher,
-@@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct
+@@ -1771,126 +728,38 @@ static int xts_ablkcipher_setkey(struct
        }
  
        memcpy(ctx->key, key, keylen);
@@ -8961,7 +9026,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -#endif
 +      cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        /* xts_ablkcipher_decrypt shared descriptor */
        desc = ctx->sh_desc_dec;
@@ -9012,7 +9077,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -#endif
 +      cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  
        return 0;
  }
@@ -9039,7 +9104,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        int sec4_sg_bytes;
        dma_addr_t sec4_sg_dma;
        struct sec4_sg_entry *sec4_sg;
-@@ -1899,12 +739,12 @@ struct aead_edesc {
+@@ -1899,12 +768,12 @@ struct aead_edesc {
  
  /*
   * ablkcipher_edesc - s/w-extended ablkcipher descriptor
@@ -9055,7 +9120,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
   * @hw_desc: the h/w job descriptor followed by any referenced link tables
   */
  struct ablkcipher_edesc {
-@@ -1924,10 +764,11 @@ static void caam_unmap(struct device *de
+@@ -1924,10 +793,11 @@ static void caam_unmap(struct device *de
                       int sec4_sg_bytes)
  {
        if (dst != src) {
@@ -9070,7 +9135,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }
  
        if (iv_dma)
-@@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(stru
+@@ -2021,8 +891,7 @@ static void ablkcipher_encrypt_done(stru
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  #endif
  
@@ -9080,7 +9145,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        if (err)
                caam_jr_strstatus(jrdev, err);
-@@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(stru
+@@ -2031,10 +900,10 @@ static void ablkcipher_encrypt_done(stru
        print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
                       edesc->src_nents > 1 ? 100 : ivsize, 1);
@@ -9094,7 +9159,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        ablkcipher_unmap(jrdev, edesc, req);
  
-@@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(stru
+@@ -2062,8 +931,7 @@ static void ablkcipher_decrypt_done(stru
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  #endif
  
@@ -9104,7 +9169,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (err)
                caam_jr_strstatus(jrdev, err);
  
-@@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(stru
+@@ -2071,10 +939,10 @@ static void ablkcipher_decrypt_done(stru
        print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
                       ivsize, 1);
@@ -9118,7 +9183,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        ablkcipher_unmap(jrdev, edesc, req);
  
-@@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_re
+@@ -2114,7 +982,7 @@ static void init_aead_job(struct aead_re
        init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  
        if (all_contig) {
@@ -9127,7 +9192,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                in_options = 0;
        } else {
                src_dma = edesc->sec4_sg_dma;
-@@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_re
+@@ -2129,7 +997,7 @@ static void init_aead_job(struct aead_re
        out_options = in_options;
  
        if (unlikely(req->src != req->dst)) {
@@ -9136,7 +9201,25 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        dst_dma = sg_dma_address(req->dst);
                } else {
                        dst_dma = edesc->sec4_sg_dma +
-@@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_req
+@@ -2147,9 +1015,6 @@ static void init_aead_job(struct aead_re
+               append_seq_out_ptr(desc, dst_dma,
+                                  req->assoclen + req->cryptlen - authsize,
+                                  out_options);
+-
+-      /* REG3 = assoclen */
+-      append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ }
+ static void init_gcm_job(struct aead_request *req,
+@@ -2164,6 +1029,7 @@ static void init_gcm_job(struct aead_req
+       unsigned int last;
+       init_aead_job(req, edesc, all_contig, encrypt);
++      append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+       /* BUG This should not be specific to generic GCM. */
+       last = 0;
+@@ -2175,7 +1041,7 @@ static void init_gcm_job(struct aead_req
                         FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
        /* Append Salt */
        if (!generic_gcm)
@@ -9145,16 +9228,33 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        /* Append IV */
        append_data(desc, req->iv, ivsize);
        /* End of blank commands */
-@@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead
+@@ -2190,7 +1056,8 @@ static void init_authenc_job(struct aead
                                                 struct caam_aead_alg, aead);
        unsigned int ivsize = crypto_aead_ivsize(aead);
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 -      const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
 +      const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
                               OP_ALG_AAI_CTR_MOD128);
        const bool is_rfc3686 = alg->caam.rfc3686;
        u32 *desc = edesc->hw_desc;
-@@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2213,6 +1080,15 @@ static void init_authenc_job(struct aead
+       init_aead_job(req, edesc, all_contig, encrypt);
++      /*
++       * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
++       * having DPOVRD as destination.
++       */
++      if (ctrlpriv->era < 3)
++              append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
++      else
++              append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
++
+       if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
+               append_load_as_imm(desc, req->iv, ivsize,
+                                  LDST_CLASS_1_CCB |
+@@ -2236,16 +1112,15 @@ static void init_ablkcipher_job(u32 *sh_
        int len, sec4_sg_index = 0;
  
  #ifdef DEBUG
@@ -9176,7 +9276,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        len = desc_len(sh_desc);
        init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-@@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2261,7 +1136,7 @@ static void init_ablkcipher_job(u32 *sh_
        append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  
        if (likely(req->src == req->dst)) {
@@ -9185,7 +9285,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        dst_dma = sg_dma_address(req->src);
                } else {
                        dst_dma = edesc->sec4_sg_dma +
-@@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2269,7 +1144,7 @@ static void init_ablkcipher_job(u32 *sh_
                        out_options = LDST_SGF;
                }
        } else {
@@ -9194,7 +9294,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        dst_dma = sg_dma_address(req->dst);
                } else {
                        dst_dma = edesc->sec4_sg_dma +
-@@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32
+@@ -2296,20 +1171,18 @@ static void init_ablkcipher_giv_job(u32
        int len, sec4_sg_index = 0;
  
  #ifdef DEBUG
@@ -9219,7 +9319,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                src_dma = sg_dma_address(req->src);
                in_options = 0;
        } else {
-@@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_all
+@@ -2340,87 +1213,100 @@ static struct aead_edesc *aead_edesc_all
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
@@ -9375,7 +9475,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                                   edesc->sec4_sg + sec4_sg_index, 0);
        }
  
-@@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_requ
+@@ -2573,13 +1459,9 @@ static int aead_decrypt(struct aead_requ
        u32 *desc;
        int ret = 0;
  
@@ -9392,7 +9492,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        /* allocate extended descriptor */
        edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
-@@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2619,51 +1501,80 @@ static struct ablkcipher_edesc *ablkciph
        struct device *jrdev = ctx->jrdev;
        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                       GFP_KERNEL : GFP_ATOMIC;
@@ -9496,7 +9596,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return ERR_PTR(-ENOMEM);
        }
  
-@@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2673,23 +1584,24 @@ static struct ablkcipher_edesc *ablkciph
        edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
                         desc_bytes;
  
@@ -9528,7 +9628,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return ERR_PTR(-ENOMEM);
        }
  
-@@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2701,7 +1613,7 @@ static struct ablkcipher_edesc *ablkciph
                       sec4_sg_bytes, 1);
  #endif
  
@@ -9537,7 +9637,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        return edesc;
  }
  
-@@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2792,30 +1704,54 @@ static struct ablkcipher_edesc *ablkciph
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        struct device *jrdev = ctx->jrdev;
@@ -9554,10 +9654,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      bool out_contig;
        int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 -      int sec4_sg_index;
--
--      src_nents = sg_count(req->src, req->nbytes);
 +      int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
  
+-      src_nents = sg_count(req->src, req->nbytes);
+-
 -      if (unlikely(req->dst != req->src))
 -              dst_nents = sg_count(req->dst, req->nbytes);
 +      src_nents = sg_nents_for_len(req->src, req->nbytes);
@@ -9608,7 +9708,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }
  
        /*
-@@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2825,21 +1761,29 @@ static struct ablkcipher_edesc *ablkciph
        iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
        if (dma_mapping_error(jrdev, iv_dma)) {
                dev_err(jrdev, "unable to map IV\n");
@@ -9644,7 +9744,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return ERR_PTR(-ENOMEM);
        }
  
-@@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2849,24 +1793,24 @@ static struct ablkcipher_edesc *ablkciph
        edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
                         desc_bytes;
  
@@ -9679,7 +9779,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return ERR_PTR(-ENOMEM);
        }
        edesc->iv_dma = iv_dma;
-@@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2878,7 +1822,7 @@ static struct ablkcipher_edesc *ablkciph
                       sec4_sg_bytes, 1);
  #endif
  
@@ -9688,7 +9788,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        return edesc;
  }
  
-@@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct
+@@ -2889,7 +1833,7 @@ static int ablkcipher_givencrypt(struct
        struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
        struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
        struct device *jrdev = ctx->jrdev;
@@ -9697,7 +9797,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        u32 *desc;
        int ret = 0;
  
-@@ -2933,7 +1840,6 @@ struct caam_alg_template {
+@@ -2933,7 +1877,6 @@ struct caam_alg_template {
        } template_u;
        u32 class1_alg_type;
        u32 class2_alg_type;
@@ -9705,7 +9805,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  };
  
  static struct caam_alg_template driver_algs[] = {
-@@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3118,7 +2061,6 @@ static struct caam_aead_alg driver_aeads
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9713,7 +9813,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3140,7 +2082,6 @@ static struct caam_aead_alg driver_aeads
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9721,7 +9821,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3162,7 +2103,6 @@ static struct caam_aead_alg driver_aeads
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9729,7 +9829,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3184,7 +2124,6 @@ static struct caam_aead_alg driver_aeads
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9737,7 +9837,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3206,7 +2145,6 @@ static struct caam_aead_alg driver_aeads
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9745,7 +9845,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3228,7 +2166,6 @@ static struct caam_aead_alg driver_aeads
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9753,7 +9853,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3250,7 +2187,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9761,7 +9861,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3273,7 +2209,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9769,7 +9869,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3296,7 +2231,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9777,7 +9877,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3319,7 +2253,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9785,7 +9885,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3342,7 +2275,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9793,7 +9893,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3365,7 +2297,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9801,7 +9901,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3388,7 +2319,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9809,7 +9909,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3411,7 +2341,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9817,7 +9917,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3434,7 +2363,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9825,7 +9925,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3457,7 +2385,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9833,7 +9933,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3480,7 +2407,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9841,7 +9941,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3503,7 +2429,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9849,7 +9949,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3526,7 +2451,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9857,7 +9957,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                }
        },
        {
-@@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3549,7 +2473,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9865,7 +9965,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                }
        },
-@@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3573,7 +2496,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9873,7 +9973,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3597,7 +2519,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9881,7 +9981,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3621,7 +2542,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9889,7 +9989,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3645,7 +2565,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9897,7 +9997,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3669,7 +2588,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9905,7 +10005,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3693,7 +2611,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9913,7 +10013,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3717,7 +2634,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9921,7 +10021,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3741,7 +2657,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9929,7 +10029,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3765,7 +2680,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9937,7 +10037,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3789,7 +2703,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9945,7 +10045,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3812,7 +2725,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9953,7 +10053,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3835,7 +2747,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9961,7 +10061,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3858,7 +2769,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9969,7 +10069,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3881,7 +2791,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9977,7 +10077,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3904,7 +2813,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9985,7 +10085,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3927,7 +2835,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -9993,7 +10093,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3950,7 +2857,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10001,7 +10101,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3973,7 +2879,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10009,7 +10109,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3996,7 +2901,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10017,7 +10117,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4019,7 +2923,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10025,7 +10125,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4042,7 +2945,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10033,7 +10133,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                },
        },
        {
-@@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4065,7 +2967,6 @@ static struct caam_aead_alg driver_aeads
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10041,7 +10141,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .geniv = true,
                },
        },
-@@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4090,7 +2991,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10049,7 +10149,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                },
        },
-@@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4115,7 +3015,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10057,7 +10157,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                        .geniv = true,
                },
-@@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4141,7 +3040,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10065,7 +10165,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                },
        },
-@@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4166,7 +3064,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10073,7 +10173,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                        .geniv = true,
                },
-@@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4192,7 +3089,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10081,7 +10181,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                },
        },
-@@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4217,7 +3113,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10089,7 +10189,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                        .geniv = true,
                },
-@@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4243,7 +3138,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10097,7 +10197,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                },
        },
-@@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4268,7 +3162,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10105,7 +10205,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                        .geniv = true,
                },
-@@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4294,7 +3187,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10113,7 +10213,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                },
        },
-@@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4319,7 +3211,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10121,7 +10221,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                        .geniv = true,
                },
-@@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4345,7 +3236,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10129,7 +10229,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                },
        },
-@@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4370,7 +3260,6 @@ static struct caam_aead_alg driver_aeads
                                           OP_ALG_AAI_CTR_MOD128,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
@@ -10137,11 +10237,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                        .rfc3686 = true,
                        .geniv = true,
                },
-@@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
+@@ -4383,18 +3272,44 @@ struct caam_crypto_alg {
+       struct caam_alg_entry caam;
+ };
  
- static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
+-static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
++static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
++                         bool uses_dkp)
  {
 +      dma_addr_t dma_addr;
++      struct caam_drv_private *priv;
 +
        ctx->jrdev = caam_jr_alloc();
        if (IS_ERR(ctx->jrdev)) {
@@ -10149,10 +10254,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                return PTR_ERR(ctx->jrdev);
        }
  
++      priv = dev_get_drvdata(ctx->jrdev->parent);
++      if (priv->era >= 6 && uses_dkp)
++              ctx->dir = DMA_BIDIRECTIONAL;
++      else
++              ctx->dir = DMA_TO_DEVICE;
++
 +      dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
 +                                      offsetof(struct caam_ctx,
 +                                               sh_desc_enc_dma),
-+                                      DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++                                      ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
 +      if (dma_mapping_error(ctx->jrdev, dma_addr)) {
 +              dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
 +              caam_jr_free(ctx->jrdev);
@@ -10175,7 +10286,23 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        return 0;
  }
-@@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_
+@@ -4406,7 +3321,7 @@ static int caam_cra_init(struct crypto_t
+                container_of(alg, struct caam_crypto_alg, crypto_alg);
+       struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+-      return caam_init_common(ctx, &caam_alg->caam);
++      return caam_init_common(ctx, &caam_alg->caam, false);
+ }
+ static int caam_aead_init(struct crypto_aead *tfm)
+@@ -4416,30 +3331,15 @@ static int caam_aead_init(struct crypto_
+                container_of(alg, struct caam_aead_alg, aead);
+       struct caam_ctx *ctx = crypto_aead_ctx(tfm);
+-      return caam_init_common(ctx, &caam_alg->caam);
++      return caam_init_common(ctx, &caam_alg->caam,
++                              alg->setkey == aead_setkey);
+ }
  
  static void caam_exit_common(struct caam_ctx *ctx)
  {
@@ -10200,11 +10327,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -
 +      dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
 +                             offsetof(struct caam_ctx, sh_desc_enc_dma),
-+                             DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++                             ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
        caam_jr_free(ctx->jrdev);
  }
  
-@@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_
+@@ -4515,7 +3415,6 @@ static struct caam_crypto_alg *caam_alg_
  
        t_alg->caam.class1_alg_type = template->class1_alg_type;
        t_alg->caam.class2_alg_type = template->class2_alg_type;
@@ -10214,7 +10341,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  }
 --- /dev/null
 +++ b/drivers/crypto/caam/caamalg_desc.c
-@@ -0,0 +1,1913 @@
+@@ -0,0 +1,1961 @@
 +/*
 + * Shared descriptors for aead, ablkcipher algorithms
 + *
@@ -10262,16 +10389,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
 + *                               (non-protocol) with no (null) encryption.
 + * @desc: pointer to buffer used for descriptor construction
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ *         split key is to be used, the size of the split key itself is
-+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ *         A split key is required for SEC Era < 6; the size of the split key
++ *         is specified in this case. Valid algorithm values - one of
++ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ *         with OP_ALG_AAI_HMAC_PRECOMP.
 + * @icvsize: integrity check value (ICV) size (truncated or full)
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
 + */
 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
-+                               unsigned int icvsize)
++                               unsigned int icvsize, int era)
 +{
 +      u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
 +
@@ -10280,13 +10407,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      /* Skip if already shared */
 +      key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 +                                 JUMP_COND_SHRD);
-+      if (adata->key_inline)
-+              append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+                                adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+                                KEY_ENC);
-+      else
-+              append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      if (era < 6) {
++              if (adata->key_inline)
++                      append_key_as_imm(desc, adata->key_virt,
++                                        adata->keylen_pad, adata->keylen,
++                                        CLASS_2 | KEY_DEST_MDHA_SPLIT |
++                                        KEY_ENC);
++              else
++                      append_key(desc, adata->key_dma, adata->keylen,
++                                 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      } else {
++              append_proto_dkp(desc, adata);
++      }
 +      set_jump_tgt_here(desc, key_jump_cmd);
 +
 +      /* assoclen + cryptlen = seqinlen */
@@ -10338,16 +10470,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
 + *                               (non-protocol) with no (null) decryption.
 + * @desc: pointer to buffer used for descriptor construction
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ *         split key is to be used, the size of the split key itself is
-+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ *         A split key is required for SEC Era < 6; the size of the split key
++ *         is specified in this case. Valid algorithm values - one of
++ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ *         with OP_ALG_AAI_HMAC_PRECOMP.
 + * @icvsize: integrity check value (ICV) size (truncated or full)
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
 + */
 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
-+                               unsigned int icvsize)
++                               unsigned int icvsize, int era)
 +{
 +      u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
 +
@@ -10356,13 +10488,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      /* Skip if already shared */
 +      key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 +                                 JUMP_COND_SHRD);
-+      if (adata->key_inline)
-+              append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+                                adata->keylen, CLASS_2 |
-+                                KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+      else
-+              append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      if (era < 6) {
++              if (adata->key_inline)
++                      append_key_as_imm(desc, adata->key_virt,
++                                        adata->keylen_pad, adata->keylen,
++                                        CLASS_2 | KEY_DEST_MDHA_SPLIT |
++                                        KEY_ENC);
++              else
++                      append_key(desc, adata->key_dma, adata->keylen,
++                                 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      } else {
++              append_proto_dkp(desc, adata);
++      }
 +      set_jump_tgt_here(desc, key_jump_cmd);
 +
 +      /* Class 2 operation */
@@ -10421,7 +10558,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +static void init_sh_desc_key_aead(u32 * const desc,
 +                                struct alginfo * const cdata,
 +                                struct alginfo * const adata,
-+                                const bool is_rfc3686, u32 *nonce)
++                                const bool is_rfc3686, u32 *nonce, int era)
 +{
 +      u32 *key_jump_cmd;
 +      unsigned int enckeylen = cdata->keylen;
@@ -10441,13 +10578,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      if (is_rfc3686)
 +              enckeylen -= CTR_RFC3686_NONCE_SIZE;
 +
-+      if (adata->key_inline)
-+              append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+                                adata->keylen, CLASS_2 |
-+                                KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+      else
-+              append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      if (era < 6) {
++              if (adata->key_inline)
++                      append_key_as_imm(desc, adata->key_virt,
++                                        adata->keylen_pad, adata->keylen,
++                                        CLASS_2 | KEY_DEST_MDHA_SPLIT |
++                                        KEY_ENC);
++              else
++                      append_key(desc, adata->key_dma, adata->keylen,
++                                 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      } else {
++              append_proto_dkp(desc, adata);
++      }
 +
 +      if (cdata->key_inline)
 +              append_key_as_imm(desc, cdata->key_virt, enckeylen,
@@ -10478,26 +10620,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * @cdata: pointer to block cipher transform definitions
 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ *         split key is to be used, the size of the split key itself is
-+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ *         A split key is required for SEC Era < 6; the size of the split key
++ *         is specified in this case. Valid algorithm values - one of
++ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ *         with OP_ALG_AAI_HMAC_PRECOMP.
 + * @ivsize: initialization vector size
 + * @icvsize: integrity check value (ICV) size (truncated or full)
 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
 + * @nonce: pointer to rfc3686 nonce
 + * @ctx1_iv_off: IV offset in CONTEXT1 register
 + * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
 + */
 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
 +                          struct alginfo *adata, unsigned int ivsize,
 +                          unsigned int icvsize, const bool is_rfc3686,
-+                          u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
++                          u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
++                          int era)
 +{
 +      /* Note: Context registers are saved. */
-+      init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++      init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
 +
 +      /* Class 2 operation */
 +      append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -10523,8 +10666,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      }
 +
 +      /* Read and write assoclen bytes */
-+      append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+      append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++      if (is_qi || era < 3) {
++              append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++              append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++      } else {
++              append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++              append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++      }
 +
 +      /* Skip assoc data */
 +      append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -10567,27 +10715,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * @cdata: pointer to block cipher transform definitions
 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ *         split key is to be used, the size of the split key itself is
-+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ *         A split key is required for SEC Era < 6; the size of the split key
++ *         is specified in this case. Valid algorithm values - one of
++ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ *         with OP_ALG_AAI_HMAC_PRECOMP.
 + * @ivsize: initialization vector size
 + * @icvsize: integrity check value (ICV) size (truncated or full)
 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
 + * @nonce: pointer to rfc3686 nonce
 + * @ctx1_iv_off: IV offset in CONTEXT1 register
 + * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
 + */
 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
 +                          struct alginfo *adata, unsigned int ivsize,
 +                          unsigned int icvsize, const bool geniv,
 +                          const bool is_rfc3686, u32 *nonce,
-+                          const u32 ctx1_iv_off, const bool is_qi)
++                          const u32 ctx1_iv_off, const bool is_qi, int era)
 +{
 +      /* Note: Context registers are saved. */
-+      init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++      init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
 +
 +      /* Class 2 operation */
 +      append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
@@ -10614,11 +10762,23 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      }
 +
 +      /* Read and write assoclen bytes */
-+      append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+      if (geniv)
-+              append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
-+      else
-+              append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++      if (is_qi || era < 3) {
++              append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++              if (geniv)
++                      append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
++                                              ivsize);
++              else
++                      append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
++                                      CAAM_CMD_SZ);
++      } else {
++              append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++              if (geniv)
++                      append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
++                                              ivsize);
++              else
++                      append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
++                                      CAAM_CMD_SZ);
++      }
 +
 +      /* Skip assoc data */
 +      append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -10673,29 +10833,29 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * @cdata: pointer to block cipher transform definitions
 + *         Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
 + *         with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ *         split key is to be used, the size of the split key itself is
-+ *         specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ *         SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ *         A split key is required for SEC Era < 6; the size of the split key
++ *         is specified in this case. Valid algorithm values - one of
++ *         OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ *         with OP_ALG_AAI_HMAC_PRECOMP.
 + * @ivsize: initialization vector size
 + * @icvsize: integrity check value (ICV) size (truncated or full)
 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
 + * @nonce: pointer to rfc3686 nonce
 + * @ctx1_iv_off: IV offset in CONTEXT1 register
 + * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
 + */
 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
 +                             struct alginfo *adata, unsigned int ivsize,
 +                             unsigned int icvsize, const bool is_rfc3686,
 +                             u32 *nonce, const u32 ctx1_iv_off,
-+                             const bool is_qi)
++                             const bool is_qi, int era)
 +{
 +      u32 geniv, moveiv;
 +
 +      /* Note: Context registers are saved. */
-+      init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++      init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
 +
 +      if (is_qi) {
 +              u32 *wait_load_cmd;
@@ -10745,8 +10905,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                       OP_ALG_ENCRYPT);
 +
 +      /* Read and write assoclen bytes */
-+      append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+      append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++      if (is_qi || era < 3) {
++              append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++              append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++      } else {
++              append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++              append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++      }
 +
 +      /* Skip assoc data */
 +      append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
@@ -10805,19 +10970,20 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * @cdata: pointer to block cipher transform definitions
 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
 + *         with OP_ALG_AAI_CBC
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ *         split key is to be used, the size of the split key itself is
-+ *         specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
-+ *         OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ *         A split key is required for SEC Era < 6; the size of the split key
++ *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
++ *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
 + * @assoclen: associated data length
 + * @ivsize: initialization vector size
 + * @authsize: authentication data size
 + * @blocksize: block cipher size
++ * @era: SEC Era
 + */
 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
 +                         struct alginfo *adata, unsigned int assoclen,
 +                         unsigned int ivsize, unsigned int authsize,
-+                         unsigned int blocksize)
++                         unsigned int blocksize, int era)
 +{
 +      u32 *key_jump_cmd, *zero_payload_jump_cmd;
 +      u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
@@ -10845,13 +11011,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 +                                 JUMP_COND_SHRD);
 +
-+      if (adata->key_inline)
-+              append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+                                adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+                                KEY_ENC);
-+      else
-+              append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      if (era < 6) {
++              if (adata->key_inline)
++                      append_key_as_imm(desc, adata->key_virt,
++                                        adata->keylen_pad, adata->keylen,
++                                        CLASS_2 | KEY_DEST_MDHA_SPLIT |
++                                        KEY_ENC);
++              else
++                      append_key(desc, adata->key_dma, adata->keylen,
++                                 CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      } else {
++              append_proto_dkp(desc, adata);
++      }
 +
 +      if (cdata->key_inline)
 +              append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
@@ -10958,19 +11129,20 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * @cdata: pointer to block cipher transform definitions
 + *         Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
 + *         with OP_ALG_AAI_CBC
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ *         split key is to be used, the size of the split key itself is
-+ *         specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
-+ *         OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ *         A split key is required for SEC Era < 6; the size of the split key
++ *         is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
++ *         ANDed with OP_ALG_AAI_HMAC_PRECOMP.
 + * @assoclen: associated data length
 + * @ivsize: initialization vector size
 + * @authsize: authentication data size
 + * @blocksize: block cipher size
++ * @era: SEC Era
 + */
 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
 +                         struct alginfo *adata, unsigned int assoclen,
 +                         unsigned int ivsize, unsigned int authsize,
-+                         unsigned int blocksize)
++                         unsigned int blocksize, int era)
 +{
 +      u32 stidx, jumpback;
 +      u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
@@ -10988,8 +11160,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 +                                 JUMP_COND_SHRD);
 +
-+      append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+                 KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      if (era < 6)
++              append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
++                         KEY_DEST_MDHA_SPLIT | KEY_ENC);
++      else
++              append_proto_dkp(desc, adata);
 +
 +      append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
 +                 KEY_DEST_CLASS_REG);
@@ -11060,10 +11235,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      /* VSOL = payloadlen + icvlen + padlen */
 +      append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
 +
-+#ifdef __LITTLE_ENDIAN
-+      append_moveb(desc, MOVE_WAITCOMP |
-+                   MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
-+#endif
++      if (caam_little_end)
++              append_moveb(desc, MOVE_WAITCOMP |
++                           MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
++
 +      /* update Len field */
 +      append_math_sub(desc, REG0, REG0, REG2, 8);
 +
@@ -11113,15 +11288,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +               * SEQ OUT PTR command, Output Pointer (2 words) and
 +               * Output Length into math registers.
 +               */
-+#ifdef __LITTLE_ENDIAN
-+              append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+                          MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
-+                          20);
-+#else
-+              append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+                          MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+                          20);
-+#endif
++              if (caam_little_end)
++                      append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++                                  MOVE_DEST_MATH0 |
++                                  (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
++              else
++                      append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++                                  MOVE_DEST_MATH0 |
++                                  (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
++
 +              /* Transform SEQ OUT PTR command in SEQ IN PTR command */
 +              append_math_and_imm_u32(desc, REG0, REG0, IMM,
 +                                      ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
@@ -11132,15 +11307,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                                  (4 << LDST_OFFSET_SHIFT));
 +              append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
 +              /* Move the updated fields back to the Job Descriptor */
-+#ifdef __LITTLE_ENDIAN
-+              append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+                          MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
-+                          24);
-+#else
-+              append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+                          MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+                          24);
-+#endif
++              if (caam_little_end)
++                      append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++                                  MOVE_DEST_DESCBUF |
++                                  (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
++              else
++                      append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++                                  MOVE_DEST_DESCBUF |
++                                  (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
++
 +              /*
 +               * Read the new SEQ IN PTR command, Input Pointer, Input Length
 +               * and then jump back to the next command from the
@@ -11152,15 +11327,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +               * Move the SEQ OUT PTR command, Output Pointer (1 word) and
 +               * Output Length into math registers.
 +               */
-+#ifdef __LITTLE_ENDIAN
-+              append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+                          MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+                          12);
-+#else
-+              append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
-+                          MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
-+                          12);
-+#endif
++              if (caam_little_end)
++                      append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++                                  MOVE_DEST_MATH0 |
++                                  (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
++              else
++                      append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
++                                  MOVE_DEST_MATH0 |
++                                  (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
++
 +              /* Transform SEQ OUT PTR command in SEQ IN PTR command */
 +              append_math_and_imm_u64(desc, REG0, REG0, IMM,
 +                                      ~(((u64)(CMD_SEQ_IN_PTR ^
@@ -11172,15 +11347,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                                  (4 << LDST_OFFSET_SHIFT));
 +              append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
 +              /* Move the updated fields back to the Job Descriptor */
-+#ifdef __LITTLE_ENDIAN
-+              append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+                          MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
-+                          16);
-+#else
-+              append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
-+                          MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
-+                          16);
-+#endif
++              if (caam_little_end)
++                      append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++                                  MOVE_DEST_DESCBUF |
++                                  (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
++              else
++                      append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
++                                  MOVE_DEST_DESCBUF |
++                                  (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
++
 +              /*
 +               * Read the new SEQ IN PTR command, Input Pointer, Input Length
 +               * and then jump back to the next command from the
@@ -11835,7 +12010,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      /* Load nonce into CONTEXT1 reg */
 +      if (is_rfc3686) {
-+              u8 *nonce = cdata->key_virt + cdata->keylen;
++              const u8 *nonce = cdata->key_virt + cdata->keylen;
 +
 +              append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
 +                                 LDST_CLASS_IND_CCB |
@@ -11900,7 +12075,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      /* Load nonce into CONTEXT1 reg */
 +      if (is_rfc3686) {
-+              u8 *nonce = cdata->key_virt + cdata->keylen;
++              const u8 *nonce = cdata->key_virt + cdata->keylen;
 +
 +              append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
 +                                 LDST_CLASS_IND_CCB |
@@ -11969,7 +12144,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      /* Load Nonce into CONTEXT1 reg */
 +      if (is_rfc3686) {
-+              u8 *nonce = cdata->key_virt + cdata->keylen;
++              const u8 *nonce = cdata->key_virt + cdata->keylen;
 +
 +              append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
 +                                 LDST_CLASS_IND_CCB |
@@ -12184,38 +12359,38 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                                       15 * CAAM_CMD_SZ)
 +
 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
-+                               unsigned int icvsize);
++                               unsigned int icvsize, int era);
 +
 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
-+                               unsigned int icvsize);
++                               unsigned int icvsize, int era);
 +
 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
 +                          struct alginfo *adata, unsigned int ivsize,
 +                          unsigned int icvsize, const bool is_rfc3686,
 +                          u32 *nonce, const u32 ctx1_iv_off,
-+                          const bool is_qi);
++                          const bool is_qi, int era);
 +
 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
 +                          struct alginfo *adata, unsigned int ivsize,
 +                          unsigned int icvsize, const bool geniv,
 +                          const bool is_rfc3686, u32 *nonce,
-+                          const u32 ctx1_iv_off, const bool is_qi);
++                          const u32 ctx1_iv_off, const bool is_qi, int era);
 +
 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
 +                             struct alginfo *adata, unsigned int ivsize,
 +                             unsigned int icvsize, const bool is_rfc3686,
 +                             u32 *nonce, const u32 ctx1_iv_off,
-+                             const bool is_qi);
++                             const bool is_qi, int era);
 +
 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
 +                         struct alginfo *adata, unsigned int assoclen,
 +                         unsigned int ivsize, unsigned int authsize,
-+                         unsigned int blocksize);
++                         unsigned int blocksize, int era);
 +
 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
 +                         struct alginfo *adata, unsigned int assoclen,
 +                         unsigned int ivsize, unsigned int authsize,
-+                         unsigned int blocksize);
++                         unsigned int blocksize, int era);
 +
 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
 +                         unsigned int ivsize, unsigned int icvsize,
@@ -12260,7 +12435,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +#endif /* _CAAMALG_DESC_H_ */
 --- /dev/null
 +++ b/drivers/crypto/caam/caamalg_qi.c
-@@ -0,0 +1,2877 @@
+@@ -0,0 +1,3321 @@
 +/*
 + * Freescale FSL CAAM support for crypto API over QI backend.
 + * Based on caamalg.c
@@ -12316,6 +12491,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      u32 sh_desc_givenc[DESC_MAX_USED_LEN];
 +      u8 key[CAAM_MAX_KEY_SIZE];
 +      dma_addr_t key_dma;
++      enum dma_data_direction dir;
 +      struct alginfo adata;
 +      struct alginfo cdata;
 +      unsigned int authsize;
@@ -12337,6 +12513,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 +                             OP_ALG_AAI_CTR_MOD128);
 +      const bool is_rfc3686 = alg->caam.rfc3686;
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
 +
 +      if (!ctx->cdata.keylen || !ctx->authsize)
 +              return 0;
@@ -12387,7 +12564,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
 +                             ivsize, ctx->authsize, is_rfc3686, nonce,
-+                             ctx1_iv_off, true);
++                             ctx1_iv_off, true, ctrlpriv->era);
 +
 +skip_enc:
 +      /* aead_decrypt shared descriptor */
@@ -12412,7 +12589,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
 +                             ivsize, ctx->authsize, alg->caam.geniv,
-+                             is_rfc3686, nonce, ctx1_iv_off, true);
++                             is_rfc3686, nonce, ctx1_iv_off, true,
++                             ctrlpriv->era);
 +
 +      if (!alg->caam.geniv)
 +              goto skip_givenc;
@@ -12439,7 +12617,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
 +                                ivsize, ctx->authsize, is_rfc3686, nonce,
-+                                ctx1_iv_off, true);
++                                ctx1_iv_off, true, ctrlpriv->era);
 +
 +skip_givenc:
 +      return 0;
@@ -12460,6 +12638,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +{
 +      struct caam_ctx *ctx = crypto_aead_ctx(aead);
 +      struct device *jrdev = ctx->jrdev;
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 +      struct crypto_authenc_keys keys;
 +      int ret = 0;
 +
@@ -12474,6 +12653,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 +#endif
 +
++      /*
++       * If DKP is supported, use it in the shared descriptor to generate
++       * the split key.
++       */
++      if (ctrlpriv->era >= 6) {
++              ctx->adata.keylen = keys.authkeylen;
++              ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++                                                    OP_ALG_ALGSEL_MASK);
++
++              if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++                      goto badkey;
++
++              memcpy(ctx->key, keys.authkey, keys.authkeylen);
++              memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++                     keys.enckeylen);
++              dma_sync_single_for_device(jrdev, ctx->key_dma,
++                                         ctx->adata.keylen_pad +
++                                         keys.enckeylen, ctx->dir);
++              goto skip_split_key;
++      }
++
 +      ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
 +                          keys.authkeylen, CAAM_MAX_KEY_SIZE -
 +                          keys.enckeylen);
@@ -12483,13 +12683,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      /* postpend encryption key to auth split key */
 +      memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
 +      dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+                                 keys.enckeylen, DMA_TO_DEVICE);
++                                 keys.enckeylen, ctx->dir);
 +#ifdef DEBUG
 +      print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
 +                     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 +                     ctx->adata.keylen_pad + keys.enckeylen, 1);
 +#endif
 +
++skip_split_key:
 +      ctx->cdata.keylen = keys.enckeylen;
 +
 +      ret = aead_set_sh_desc(aead);
@@ -12529,6 +12730,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      unsigned int assoclen = 13; /* always 13 bytes for TLS */
 +      unsigned int data_len[2];
 +      u32 inl_mask;
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
 +
 +      if (!ctx->cdata.keylen || !ctx->authsize)
 +              return 0;
@@ -12559,17 +12761,20 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      ctx->cdata.key_inline = !!(inl_mask & 2);
 +
 +      cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
-+                            assoclen, ivsize, ctx->authsize, blocksize);
++                            assoclen, ivsize, ctx->authsize, blocksize,
++                            ctrlpriv->era);
 +
 +      /*
 +       * TLS 1.0 decrypt shared descriptor
 +       * Keys do not fit inline, regardless of algorithms used
 +       */
++      ctx->adata.key_inline = false;
 +      ctx->adata.key_dma = ctx->key_dma;
 +      ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
 +
 +      cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
-+                            assoclen, ivsize, ctx->authsize, blocksize);
++                            assoclen, ivsize, ctx->authsize, blocksize,
++                            ctrlpriv->era);
 +
 +      return 0;
 +}
@@ -12589,6 +12794,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +{
 +      struct caam_ctx *ctx = crypto_aead_ctx(tls);
 +      struct device *jrdev = ctx->jrdev;
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
 +      struct crypto_authenc_keys keys;
 +      int ret = 0;
 +
@@ -12603,6 +12809,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 +#endif
 +
++      /*
++       * If DKP is supported, use it in the shared descriptor to generate
++       * the split key.
++       */
++      if (ctrlpriv->era >= 6) {
++              ctx->adata.keylen = keys.authkeylen;
++              ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++                                                    OP_ALG_ALGSEL_MASK);
++
++              if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++                      goto badkey;
++
++              memcpy(ctx->key, keys.authkey, keys.authkeylen);
++              memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++                     keys.enckeylen);
++              dma_sync_single_for_device(jrdev, ctx->key_dma,
++                                         ctx->adata.keylen_pad +
++                                         keys.enckeylen, ctx->dir);
++              goto skip_split_key;
++      }
++
 +      ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
 +                          keys.authkeylen, CAAM_MAX_KEY_SIZE -
 +                          keys.enckeylen);
@@ -12612,7 +12839,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      /* postpend encryption key to auth split key */
 +      memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
 +      dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-+                                 keys.enckeylen, DMA_TO_DEVICE);
++                                 keys.enckeylen, ctx->dir);
 +
 +#ifdef DEBUG
 +      dev_err(jrdev, "split keylen %d split keylen padded %d\n",
@@ -12622,6 +12849,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                     ctx->adata.keylen_pad + keys.enckeylen, 1);
 +#endif
 +
++skip_split_key:
 +      ctx->cdata.keylen = keys.enckeylen;
 +
 +      ret = tls_set_sh_desc(tls);
@@ -12653,63 +12881,86 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      return -EINVAL;
 +}
 +
-+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-+                           const u8 *key, unsigned int keylen)
++static int gcm_set_sh_desc(struct crypto_aead *aead)
 +{
-+      struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-+      struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
-+      const char *alg_name = crypto_tfm_alg_name(tfm);
-+      struct device *jrdev = ctx->jrdev;
-+      unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-+      u32 ctx1_iv_off = 0;
-+      const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-+                             OP_ALG_AAI_CTR_MOD128);
-+      const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
-+      int ret = 0;
++      struct caam_ctx *ctx = crypto_aead_ctx(aead);
++      unsigned int ivsize = crypto_aead_ivsize(aead);
++      int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++                      ctx->cdata.keylen;
++
++      if (!ctx->cdata.keylen || !ctx->authsize)
++              return 0;
 +
-+      memcpy(ctx->key, key, keylen);
-+#ifdef DEBUG
-+      print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
 +      /*
-+       * AES-CTR needs to load IV in CONTEXT1 reg
-+       * at an offset of 128bits (16bytes)
-+       * CONTEXT1[255:128] = IV
++       * Job Descriptor and Shared Descriptor
++       * must fit into the 64-word Descriptor h/w Buffer
 +       */
-+      if (ctr_mode)
-+              ctx1_iv_off = 16;
++      if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
++              ctx->cdata.key_inline = true;
++              ctx->cdata.key_virt = ctx->key;
++      } else {
++              ctx->cdata.key_inline = false;
++              ctx->cdata.key_dma = ctx->key_dma;
++      }
++
++      cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++                            ctx->authsize, true);
 +
 +      /*
-+       * RFC3686 specific:
-+       *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
-+       *      | *key = {KEY, NONCE}
++       * Job Descriptor and Shared Descriptor
++       * must fit into the 64-word Descriptor h/w Buffer
 +       */
-+      if (is_rfc3686) {
-+              ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
-+              keylen -= CTR_RFC3686_NONCE_SIZE;
++      if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
++              ctx->cdata.key_inline = true;
++              ctx->cdata.key_virt = ctx->key;
++      } else {
++              ctx->cdata.key_inline = false;
++              ctx->cdata.key_dma = ctx->key_dma;
 +      }
 +
-+      dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+      ctx->cdata.keylen = keylen;
-+      ctx->cdata.key_virt = ctx->key;
-+      ctx->cdata.key_inline = true;
++      cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++                            ctx->authsize, true);
 +
-+      /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
-+      cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-+                                   is_rfc3686, ctx1_iv_off);
-+      cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-+                                   is_rfc3686, ctx1_iv_off);
-+      cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
-+                                      ivsize, is_rfc3686, ctx1_iv_off);
++      return 0;
++}
 +
-+      /* Now update the driver contexts with the new shared descriptor */
-+      if (ctx->drv_ctx[ENCRYPT]) {
-+              ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+                                        ctx->sh_desc_enc);
++static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
++{
++      struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++      ctx->authsize = authsize;
++      gcm_set_sh_desc(authenc);
++
++      return 0;
++}
++
++static int gcm_setkey(struct crypto_aead *aead,
++                    const u8 *key, unsigned int keylen)
++{
++      struct caam_ctx *ctx = crypto_aead_ctx(aead);
++      struct device *jrdev = ctx->jrdev;
++      int ret;
++
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++      memcpy(ctx->key, key, keylen);
++      dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
++      ctx->cdata.keylen = keylen;
++
++      ret = gcm_set_sh_desc(aead);
++      if (ret)
++              return ret;
++
++      /* Now update the driver contexts with the new shared descriptor */
++      if (ctx->drv_ctx[ENCRYPT]) {
++              ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++                                        ctx->sh_desc_enc);
 +              if (ret) {
 +                      dev_err(jrdev, "driver enc context update failed\n");
-+                      goto badkey;
++                      return ret;
 +              }
 +      }
 +
@@ -12718,48 +12969,94 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                                        ctx->sh_desc_dec);
 +              if (ret) {
 +                      dev_err(jrdev, "driver dec context update failed\n");
-+                      goto badkey;
++                      return ret;
 +              }
 +      }
 +
-+      if (ctx->drv_ctx[GIVENCRYPT]) {
-+              ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
-+                                        ctx->sh_desc_givenc);
-+              if (ret) {
-+                      dev_err(jrdev, "driver givenc context update failed\n");
-+                      goto badkey;
-+              }
++      return 0;
++}
++
++static int rfc4106_set_sh_desc(struct crypto_aead *aead)
++{
++      struct caam_ctx *ctx = crypto_aead_ctx(aead);
++      unsigned int ivsize = crypto_aead_ivsize(aead);
++      int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++                      ctx->cdata.keylen;
++
++      if (!ctx->cdata.keylen || !ctx->authsize)
++              return 0;
++
++      ctx->cdata.key_virt = ctx->key;
++
++      /*
++       * Job Descriptor and Shared Descriptor
++       * must fit into the 64-word Descriptor h/w Buffer
++       */
++      if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
++              ctx->cdata.key_inline = true;
++      } else {
++              ctx->cdata.key_inline = false;
++              ctx->cdata.key_dma = ctx->key_dma;
 +      }
 +
-+      return ret;
-+badkey:
-+      crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+      return -EINVAL;
++      cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++                                ctx->authsize, true);
++
++      /*
++       * Job Descriptor and Shared Descriptor
++       * must fit into the 64-word Descriptor h/w Buffer
++       */
++      if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
++              ctx->cdata.key_inline = true;
++      } else {
++              ctx->cdata.key_inline = false;
++              ctx->cdata.key_dma = ctx->key_dma;
++      }
++
++      cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++                                ctx->authsize, true);
++
++      return 0;
 +}
 +
-+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-+                               const u8 *key, unsigned int keylen)
++static int rfc4106_setauthsize(struct crypto_aead *authenc,
++                             unsigned int authsize)
 +{
-+      struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      struct caam_ctx *ctx = crypto_aead_ctx(authenc);
++
++      ctx->authsize = authsize;
++      rfc4106_set_sh_desc(authenc);
++
++      return 0;
++}
++
++static int rfc4106_setkey(struct crypto_aead *aead,
++                        const u8 *key, unsigned int keylen)
++{
++      struct caam_ctx *ctx = crypto_aead_ctx(aead);
 +      struct device *jrdev = ctx->jrdev;
-+      int ret = 0;
++      int ret;
 +
-+      if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
-+              crypto_ablkcipher_set_flags(ablkcipher,
-+                                          CRYPTO_TFM_RES_BAD_KEY_LEN);
-+              dev_err(jrdev, "key size mismatch\n");
++      if (keylen < 4)
 +              return -EINVAL;
-+      }
++
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
 +
 +      memcpy(ctx->key, key, keylen);
-+      dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+      ctx->cdata.keylen = keylen;
-+      ctx->cdata.key_virt = ctx->key;
-+      ctx->cdata.key_inline = true;
++      /*
++       * The last four bytes of the key material are used as the salt value
++       * in the nonce. Update the AES key length.
++       */
++      ctx->cdata.keylen = keylen - 4;
++      dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
++                                 ctx->dir);
 +
-+      /* xts ablkcipher encrypt, decrypt shared descriptors */
-+      cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
-+      cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++      ret = rfc4106_set_sh_desc(aead);
++      if (ret)
++              return ret;
 +
 +      /* Now update the driver contexts with the new shared descriptor */
 +      if (ctx->drv_ctx[ENCRYPT]) {
@@ -12767,7 +13064,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                                        ctx->sh_desc_enc);
 +              if (ret) {
 +                      dev_err(jrdev, "driver enc context update failed\n");
-+                      goto badkey;
++                      return ret;
 +              }
 +      }
 +
@@ -12776,450 +13073,427 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                                        ctx->sh_desc_dec);
 +              if (ret) {
 +                      dev_err(jrdev, "driver dec context update failed\n");
-+                      goto badkey;
++                      return ret;
 +              }
 +      }
 +
-+      return ret;
-+badkey:
-+      crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
 +      return 0;
 +}
 +
-+/*
-+ * aead_edesc - s/w-extended aead descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @assoclen: associated data length, in CAAM endianness
-+ * @assoclen_dma: bus physical mapped address of req->assoclen
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table
-+ */
-+struct aead_edesc {
-+      int src_nents;
-+      int dst_nents;
-+      dma_addr_t iv_dma;
-+      int qm_sg_bytes;
-+      dma_addr_t qm_sg_dma;
-+      unsigned int assoclen;
-+      dma_addr_t assoclen_dma;
-+      struct caam_drv_req drv_req;
-+#define CAAM_QI_MAX_AEAD_SG                                           \
-+      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
-+       sizeof(struct qm_sg_entry))
-+      struct qm_sg_entry sgt[0];
-+};
++static int rfc4543_set_sh_desc(struct crypto_aead *aead)
++{
++      struct caam_ctx *ctx = crypto_aead_ctx(aead);
++      unsigned int ivsize = crypto_aead_ivsize(aead);
++      int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
++                      ctx->cdata.keylen;
 +
-+/*
-+ * tls_edesc - s/w-extended tls descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table
-+ */
-+struct tls_edesc {
-+      int src_nents;
-+      int dst_nents;
-+      dma_addr_t iv_dma;
-+      int qm_sg_bytes;
-+      dma_addr_t qm_sg_dma;
-+      struct scatterlist tmp[2];
-+      struct scatterlist *dst;
-+      struct caam_drv_req drv_req;
-+      struct qm_sg_entry sgt[0];
-+};
++      if (!ctx->cdata.keylen || !ctx->authsize)
++              return 0;
 +
-+/*
-+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table
-+ */
-+struct ablkcipher_edesc {
-+      int src_nents;
-+      int dst_nents;
-+      dma_addr_t iv_dma;
-+      int qm_sg_bytes;
-+      dma_addr_t qm_sg_dma;
-+      struct caam_drv_req drv_req;
-+#define CAAM_QI_MAX_ABLKCIPHER_SG                                         \
-+      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
-+       sizeof(struct qm_sg_entry))
-+      struct qm_sg_entry sgt[0];
-+};
++      ctx->cdata.key_virt = ctx->key;
 +
-+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
-+                                      enum optype type)
-+{
 +      /*
-+       * This function is called on the fast path with values of 'type'
-+       * known at compile time. Invalid arguments are not expected and
-+       * thus no checks are made.
++       * Job Descriptor and Shared Descriptor
++       * must fit into the 64-word Descriptor h/w Buffer
 +       */
-+      struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
-+      u32 *desc;
++      if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
++              ctx->cdata.key_inline = true;
++      } else {
++              ctx->cdata.key_inline = false;
++              ctx->cdata.key_dma = ctx->key_dma;
++      }
 +
-+      if (unlikely(!drv_ctx)) {
-+              spin_lock(&ctx->lock);
++      cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++                                ctx->authsize, true);
 +
-+              /* Read again to check if some other core init drv_ctx */
-+              drv_ctx = ctx->drv_ctx[type];
-+              if (!drv_ctx) {
-+                      int cpu;
++      /*
++       * Job Descriptor and Shared Descriptor
++       * must fit into the 64-word Descriptor h/w Buffer
++       */
++      if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
++              ctx->cdata.key_inline = true;
++      } else {
++              ctx->cdata.key_inline = false;
++              ctx->cdata.key_dma = ctx->key_dma;
++      }
 +
-+                      if (type == ENCRYPT)
-+                              desc = ctx->sh_desc_enc;
-+                      else if (type == DECRYPT)
-+                              desc = ctx->sh_desc_dec;
-+                      else /* (type == GIVENCRYPT) */
-+                              desc = ctx->sh_desc_givenc;
++      cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++                                ctx->authsize, true);
 +
-+                      cpu = smp_processor_id();
-+                      drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
-+                      if (likely(!IS_ERR_OR_NULL(drv_ctx)))
-+                              drv_ctx->op_type = type;
-+
-+                      ctx->drv_ctx[type] = drv_ctx;
-+              }
-+
-+              spin_unlock(&ctx->lock);
-+      }
-+
-+      return drv_ctx;
++      return 0;
 +}
 +
-+static void caam_unmap(struct device *dev, struct scatterlist *src,
-+                     struct scatterlist *dst, int src_nents,
-+                     int dst_nents, dma_addr_t iv_dma, int ivsize,
-+                     enum optype op_type, dma_addr_t qm_sg_dma,
-+                     int qm_sg_bytes)
++static int rfc4543_setauthsize(struct crypto_aead *authenc,
++                             unsigned int authsize)
 +{
-+      if (dst != src) {
-+              if (src_nents)
-+                      dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-+              dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
-+      } else {
-+              dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
-+      }
-+
-+      if (iv_dma)
-+              dma_unmap_single(dev, iv_dma, ivsize,
-+                               op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
-+                                                       DMA_TO_DEVICE);
-+      if (qm_sg_bytes)
-+              dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
-+}
++      struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 +
-+static void aead_unmap(struct device *dev,
-+                     struct aead_edesc *edesc,
-+                     struct aead_request *req)
-+{
-+      struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+      int ivsize = crypto_aead_ivsize(aead);
++      ctx->authsize = authsize;
++      rfc4543_set_sh_desc(authenc);
 +
-+      caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+                 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-+                 edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+      dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++      return 0;
 +}
 +
-+static void tls_unmap(struct device *dev,
-+                    struct tls_edesc *edesc,
-+                    struct aead_request *req)
++static int rfc4543_setkey(struct crypto_aead *aead,
++                        const u8 *key, unsigned int keylen)
 +{
-+      struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+      int ivsize = crypto_aead_ivsize(aead);
-+
-+      caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
-+                 edesc->dst_nents, edesc->iv_dma, ivsize,
-+                 edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
-+                 edesc->qm_sg_bytes);
-+}
++      struct caam_ctx *ctx = crypto_aead_ctx(aead);
++      struct device *jrdev = ctx->jrdev;
++      int ret;
 +
-+static void ablkcipher_unmap(struct device *dev,
-+                           struct ablkcipher_edesc *edesc,
-+                           struct ablkcipher_request *req)
-+{
-+      struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++      if (keylen < 4)
++              return -EINVAL;
 +
-+      caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+                 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-+                 edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+}
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
 +
-+static void aead_done(struct caam_drv_req *drv_req, u32 status)
-+{
-+      struct device *qidev;
-+      struct aead_edesc *edesc;
-+      struct aead_request *aead_req = drv_req->app_ctx;
-+      struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
-+      struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
-+      int ecode = 0;
++      memcpy(ctx->key, key, keylen);
++      /*
++       * The last four bytes of the key material are used as the salt value
++       * in the nonce. Update the AES key length.
++       */
++      ctx->cdata.keylen = keylen - 4;
++      dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
++                                 ctx->dir);
 +
-+      qidev = caam_ctx->qidev;
++      ret = rfc4543_set_sh_desc(aead);
++      if (ret)
++              return ret;
 +
-+      if (unlikely(status)) {
-+              caam_jr_strstatus(qidev, status);
-+              ecode = -EIO;
++      /* Now update the driver contexts with the new shared descriptor */
++      if (ctx->drv_ctx[ENCRYPT]) {
++              ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++                                        ctx->sh_desc_enc);
++              if (ret) {
++                      dev_err(jrdev, "driver enc context update failed\n");
++                      return ret;
++              }
 +      }
 +
-+      edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+      aead_unmap(qidev, edesc, aead_req);
++      if (ctx->drv_ctx[DECRYPT]) {
++              ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++                                        ctx->sh_desc_dec);
++              if (ret) {
++                      dev_err(jrdev, "driver dec context update failed\n");
++                      return ret;
++              }
++      }
 +
-+      aead_request_complete(aead_req, ecode);
-+      qi_cache_free(edesc);
++      return 0;
 +}
 +
-+/*
-+ * allocate and map the aead extended descriptor
-+ */
-+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
-+                                         bool encrypt)
++static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++                           const u8 *key, unsigned int keylen)
 +{
-+      struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+      struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+      struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
-+                                               typeof(*alg), aead);
-+      struct device *qidev = ctx->qidev;
-+      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+                     GFP_KERNEL : GFP_ATOMIC;
-+      int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+      struct aead_edesc *edesc;
-+      dma_addr_t qm_sg_dma, iv_dma = 0;
-+      int ivsize = 0;
-+      unsigned int authsize = ctx->authsize;
-+      int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
-+      int in_len, out_len;
-+      struct qm_sg_entry *sg_table, *fd_sgt;
-+      struct caam_drv_ctx *drv_ctx;
-+      enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++      struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
++      const char *alg_name = crypto_tfm_alg_name(tfm);
++      struct device *jrdev = ctx->jrdev;
++      unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++      u32 ctx1_iv_off = 0;
++      const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
++                             OP_ALG_AAI_CTR_MOD128);
++      const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
++      int ret = 0;
 +
-+      drv_ctx = get_drv_ctx(ctx, op_type);
-+      if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+              return (struct aead_edesc *)drv_ctx;
++      memcpy(ctx->key, key, keylen);
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++      /*
++       * AES-CTR needs to load IV in CONTEXT1 reg
++       * at an offset of 128bits (16bytes)
++       * CONTEXT1[255:128] = IV
++       */
++      if (ctr_mode)
++              ctx1_iv_off = 16;
 +
-+      /* allocate space for base edesc and hw desc commands, link tables */
-+      edesc = qi_cache_alloc(GFP_DMA | flags);
-+      if (unlikely(!edesc)) {
-+              dev_err(qidev, "could not allocate extended descriptor\n");
-+              return ERR_PTR(-ENOMEM);
++      /*
++       * RFC3686 specific:
++       *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
++       *      | *key = {KEY, NONCE}
++       */
++      if (is_rfc3686) {
++              ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++              keylen -= CTR_RFC3686_NONCE_SIZE;
 +      }
 +
-+      if (likely(req->src == req->dst)) {
-+              src_nents = sg_nents_for_len(req->src, req->assoclen +
-+                                           req->cryptlen +
-+                                              (encrypt ? authsize : 0));
-+              if (unlikely(src_nents < 0)) {
-+                      dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-+                              req->assoclen + req->cryptlen +
-+                              (encrypt ? authsize : 0));
-+                      qi_cache_free(edesc);
-+                      return ERR_PTR(src_nents);
-+              }
++      dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++      ctx->cdata.keylen = keylen;
++      ctx->cdata.key_virt = ctx->key;
++      ctx->cdata.key_inline = true;
 +
-+              mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
-+                                            DMA_BIDIRECTIONAL);
-+              if (unlikely(!mapped_src_nents)) {
-+                      dev_err(qidev, "unable to map source\n");
-+                      qi_cache_free(edesc);
-+                      return ERR_PTR(-ENOMEM);
++      /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
++      cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++                                   is_rfc3686, ctx1_iv_off);
++      cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++                                   is_rfc3686, ctx1_iv_off);
++      cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
++                                      ivsize, is_rfc3686, ctx1_iv_off);
++
++      /* Now update the driver contexts with the new shared descriptor */
++      if (ctx->drv_ctx[ENCRYPT]) {
++              ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++                                        ctx->sh_desc_enc);
++              if (ret) {
++                      dev_err(jrdev, "driver enc context update failed\n");
++                      goto badkey;
 +              }
-+      } else {
-+              src_nents = sg_nents_for_len(req->src, req->assoclen +
-+                                           req->cryptlen);
-+              if (unlikely(src_nents < 0)) {
-+                      dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-+                              req->assoclen + req->cryptlen);
-+                      qi_cache_free(edesc);
-+                      return ERR_PTR(src_nents);
++      }
++
++      if (ctx->drv_ctx[DECRYPT]) {
++              ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++                                        ctx->sh_desc_dec);
++              if (ret) {
++                      dev_err(jrdev, "driver dec context update failed\n");
++                      goto badkey;
 +              }
++      }
 +
-+              dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-+                                           req->cryptlen +
-+                                           (encrypt ? authsize :
-+                                                      (-authsize)));
-+              if (unlikely(dst_nents < 0)) {
-+                      dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+                              req->assoclen + req->cryptlen +
-+                              (encrypt ? authsize : (-authsize)));
-+                      qi_cache_free(edesc);
-+                      return ERR_PTR(dst_nents);
++      if (ctx->drv_ctx[GIVENCRYPT]) {
++              ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
++                                        ctx->sh_desc_givenc);
++              if (ret) {
++                      dev_err(jrdev, "driver givenc context update failed\n");
++                      goto badkey;
 +              }
++      }
 +
-+              if (src_nents) {
-+                      mapped_src_nents = dma_map_sg(qidev, req->src,
-+                                                    src_nents, DMA_TO_DEVICE);
-+                      if (unlikely(!mapped_src_nents)) {
-+                              dev_err(qidev, "unable to map source\n");
-+                              qi_cache_free(edesc);
-+                              return ERR_PTR(-ENOMEM);
-+                      }
-+              } else {
-+                      mapped_src_nents = 0;
-+              }
++      return ret;
++badkey:
++      crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++      return -EINVAL;
++}
 +
-+              mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
-+                                            DMA_FROM_DEVICE);
-+              if (unlikely(!mapped_dst_nents)) {
-+                      dev_err(qidev, "unable to map destination\n");
-+                      dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
-+                      qi_cache_free(edesc);
-+                      return ERR_PTR(-ENOMEM);
-+              }
++static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++                               const u8 *key, unsigned int keylen)
++{
++      struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      struct device *jrdev = ctx->jrdev;
++      int ret = 0;
++
++      if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
++              crypto_ablkcipher_set_flags(ablkcipher,
++                                          CRYPTO_TFM_RES_BAD_KEY_LEN);
++              dev_err(jrdev, "key size mismatch\n");
++              return -EINVAL;
 +      }
 +
-+      if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
-+              ivsize = crypto_aead_ivsize(aead);
-+              iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
-+              if (dma_mapping_error(qidev, iv_dma)) {
-+                      dev_err(qidev, "unable to map IV\n");
-+                      caam_unmap(qidev, req->src, req->dst, src_nents,
-+                                 dst_nents, 0, 0, op_type, 0, 0);
-+                      qi_cache_free(edesc);
-+                      return ERR_PTR(-ENOMEM);
++      memcpy(ctx->key, key, keylen);
++      dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++      ctx->cdata.keylen = keylen;
++      ctx->cdata.key_virt = ctx->key;
++      ctx->cdata.key_inline = true;
++
++      /* xts ablkcipher encrypt, decrypt shared descriptors */
++      cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
++      cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++
++      /* Now update the driver contexts with the new shared descriptor */
++      if (ctx->drv_ctx[ENCRYPT]) {
++              ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++                                        ctx->sh_desc_enc);
++              if (ret) {
++                      dev_err(jrdev, "driver enc context update failed\n");
++                      goto badkey;
 +              }
 +      }
 +
-+      /*
-+       * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
-+       * Input is not contiguous.
-+       */
-+      qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
-+                   (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-+      if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
-+              dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
-+                      qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
-+              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+                         iv_dma, ivsize, op_type, 0, 0);
-+              qi_cache_free(edesc);
-+              return ERR_PTR(-ENOMEM);
++      if (ctx->drv_ctx[DECRYPT]) {
++              ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++                                        ctx->sh_desc_dec);
++              if (ret) {
++                      dev_err(jrdev, "driver dec context update failed\n");
++                      goto badkey;
++              }
 +      }
-+      sg_table = &edesc->sgt[0];
-+      qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
 +
-+      edesc->src_nents = src_nents;
-+      edesc->dst_nents = dst_nents;
-+      edesc->iv_dma = iv_dma;
-+      edesc->drv_req.app_ctx = req;
-+      edesc->drv_req.cbk = aead_done;
-+      edesc->drv_req.drv_ctx = drv_ctx;
++      return ret;
++badkey:
++      crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++      return 0;
++}
 +
-+      edesc->assoclen = cpu_to_caam32(req->assoclen);
-+      edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
-+                                           DMA_TO_DEVICE);
-+      if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
-+              dev_err(qidev, "unable to map assoclen\n");
-+              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+                         iv_dma, ivsize, op_type, 0, 0);
-+              qi_cache_free(edesc);
-+              return ERR_PTR(-ENOMEM);
-+      }
++/*
++ * aead_edesc - s/w-extended aead descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen: associated data length, in CAAM endianness
++ * @assoclen_dma: bus physical mapped address of req->assoclen
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct aead_edesc {
++      int src_nents;
++      int dst_nents;
++      dma_addr_t iv_dma;
++      int qm_sg_bytes;
++      dma_addr_t qm_sg_dma;
++      unsigned int assoclen;
++      dma_addr_t assoclen_dma;
++      struct caam_drv_req drv_req;
++#define CAAM_QI_MAX_AEAD_SG                                           \
++      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
++       sizeof(struct qm_sg_entry))
++      struct qm_sg_entry sgt[0];
++};
 +
-+      dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
-+      qm_sg_index++;
-+      if (ivsize) {
-+              dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
-+              qm_sg_index++;
-+      }
-+      sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
-+      qm_sg_index += mapped_src_nents;
++/*
++ * tls_edesc - s/w-extended tls descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct tls_edesc {
++      int src_nents;
++      int dst_nents;
++      dma_addr_t iv_dma;
++      int qm_sg_bytes;
++      dma_addr_t qm_sg_dma;
++      struct scatterlist tmp[2];
++      struct scatterlist *dst;
++      struct caam_drv_req drv_req;
++      struct qm_sg_entry sgt[0];
++};
 +
-+      if (mapped_dst_nents > 1)
-+              sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-+                               qm_sg_index, 0);
++/*
++ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table
++ */
++struct ablkcipher_edesc {
++      int src_nents;
++      int dst_nents;
++      dma_addr_t iv_dma;
++      int qm_sg_bytes;
++      dma_addr_t qm_sg_dma;
++      struct caam_drv_req drv_req;
++#define CAAM_QI_MAX_ABLKCIPHER_SG                                         \
++      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
++       sizeof(struct qm_sg_entry))
++      struct qm_sg_entry sgt[0];
++};
 +
-+      qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
-+      if (dma_mapping_error(qidev, qm_sg_dma)) {
-+              dev_err(qidev, "unable to map S/G table\n");
-+              dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+                         iv_dma, ivsize, op_type, 0, 0);
-+              qi_cache_free(edesc);
-+              return ERR_PTR(-ENOMEM);
-+      }
++static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
++                                      enum optype type)
++{
++      /*
++       * This function is called on the fast path with values of 'type'
++       * known at compile time. Invalid arguments are not expected and
++       * thus no checks are made.
++       */
++      struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
++      u32 *desc;
 +
-+      edesc->qm_sg_dma = qm_sg_dma;
-+      edesc->qm_sg_bytes = qm_sg_bytes;
++      if (unlikely(!drv_ctx)) {
++              spin_lock(&ctx->lock);
 +
-+      out_len = req->assoclen + req->cryptlen +
-+                (encrypt ? ctx->authsize : (-ctx->authsize));
-+      in_len = 4 + ivsize + req->assoclen + req->cryptlen;
++              /* Read again to check if some other core init drv_ctx */
++              drv_ctx = ctx->drv_ctx[type];
++              if (!drv_ctx) {
++                      int cpu;
 +
-+      fd_sgt = &edesc->drv_req.fd_sgt[0];
-+      dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
++                      if (type == ENCRYPT)
++                              desc = ctx->sh_desc_enc;
++                      else if (type == DECRYPT)
++                              desc = ctx->sh_desc_dec;
++                      else /* (type == GIVENCRYPT) */
++                              desc = ctx->sh_desc_givenc;
 +
-+      if (req->dst == req->src) {
-+              if (mapped_src_nents == 1)
-+                      dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-+                                       out_len, 0);
-+              else
-+                      dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+                                           (1 + !!ivsize) * sizeof(*sg_table),
-+                                           out_len, 0);
-+      } else if (mapped_dst_nents == 1) {
-+              dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
-+                               0);
-+      } else {
-+              dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
-+                                   qm_sg_index, out_len, 0);
++                      cpu = smp_processor_id();
++                      drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
++                      if (likely(!IS_ERR_OR_NULL(drv_ctx)))
++                              drv_ctx->op_type = type;
++
++                      ctx->drv_ctx[type] = drv_ctx;
++              }
++
++              spin_unlock(&ctx->lock);
 +      }
 +
-+      return edesc;
++      return drv_ctx;
 +}
 +
-+static inline int aead_crypt(struct aead_request *req, bool encrypt)
++static void caam_unmap(struct device *dev, struct scatterlist *src,
++                     struct scatterlist *dst, int src_nents,
++                     int dst_nents, dma_addr_t iv_dma, int ivsize,
++                     enum optype op_type, dma_addr_t qm_sg_dma,
++                     int qm_sg_bytes)
 +{
-+      struct aead_edesc *edesc;
-+      struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+      struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+      int ret;
-+
-+      if (unlikely(caam_congested))
-+              return -EAGAIN;
-+
-+      /* allocate extended descriptor */
-+      edesc = aead_edesc_alloc(req, encrypt);
-+      if (IS_ERR_OR_NULL(edesc))
-+              return PTR_ERR(edesc);
-+
-+      /* Create and submit job descriptor */
-+      ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
-+      if (!ret) {
-+              ret = -EINPROGRESS;
++      if (dst != src) {
++              if (src_nents)
++                      dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
++              dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 +      } else {
-+              aead_unmap(ctx->qidev, edesc, req);
-+              qi_cache_free(edesc);
++              dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 +      }
 +
-+      return ret;
++      if (iv_dma)
++              dma_unmap_single(dev, iv_dma, ivsize,
++                               op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
++                                                       DMA_TO_DEVICE);
++      if (qm_sg_bytes)
++              dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
 +}
 +
-+static int aead_encrypt(struct aead_request *req)
++static void aead_unmap(struct device *dev,
++                     struct aead_edesc *edesc,
++                     struct aead_request *req)
 +{
-+      return aead_crypt(req, true);
++      struct crypto_aead *aead = crypto_aead_reqtfm(req);
++      int ivsize = crypto_aead_ivsize(aead);
++
++      caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++                 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++                 edesc->qm_sg_dma, edesc->qm_sg_bytes);
++      dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
 +}
 +
-+static int aead_decrypt(struct aead_request *req)
++static void tls_unmap(struct device *dev,
++                    struct tls_edesc *edesc,
++                    struct aead_request *req)
 +{
-+      return aead_crypt(req, false);
++      struct crypto_aead *aead = crypto_aead_reqtfm(req);
++      int ivsize = crypto_aead_ivsize(aead);
++
++      caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
++                 edesc->dst_nents, edesc->iv_dma, ivsize,
++                 edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
++                 edesc->qm_sg_bytes);
 +}
 +
-+static void tls_done(struct caam_drv_req *drv_req, u32 status)
++static void ablkcipher_unmap(struct device *dev,
++                           struct ablkcipher_edesc *edesc,
++                           struct ablkcipher_request *req)
++{
++      struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++
++      caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++                 edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++                 edesc->qm_sg_dma, edesc->qm_sg_bytes);
++}
++
++static void aead_done(struct caam_drv_req *drv_req, u32 status)
 +{
 +      struct device *qidev;
-+      struct tls_edesc *edesc;
++      struct aead_edesc *edesc;
 +      struct aead_request *aead_req = drv_req->app_ctx;
 +      struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
 +      struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
@@ -13228,53 +13502,54 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      qidev = caam_ctx->qidev;
 +
 +      if (unlikely(status)) {
++              u32 ssrc = status & JRSTA_SSRC_MASK;
++              u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++
 +              caam_jr_strstatus(qidev, status);
-+              ecode = -EIO;
++              /*
++               * verify hw auth check passed else return -EBADMSG
++               */
++              if (ssrc == JRSTA_SSRC_CCB_ERROR &&
++                  err_id == JRSTA_CCBERR_ERRID_ICVCHK)
++                      ecode = -EBADMSG;
++              else
++                      ecode = -EIO;
 +      }
 +
 +      edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+      tls_unmap(qidev, edesc, aead_req);
++      aead_unmap(qidev, edesc, aead_req);
 +
 +      aead_request_complete(aead_req, ecode);
 +      qi_cache_free(edesc);
 +}
 +
 +/*
-+ * allocate and map the tls extended descriptor
++ * allocate and map the aead extended descriptor
 + */
-+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
++static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
++                                         bool encrypt)
 +{
 +      struct crypto_aead *aead = crypto_aead_reqtfm(req);
 +      struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+      unsigned int blocksize = crypto_aead_blocksize(aead);
-+      unsigned int padsize, authsize;
 +      struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
 +                                               typeof(*alg), aead);
 +      struct device *qidev = ctx->qidev;
-+      gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-+                     CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                     GFP_KERNEL : GFP_ATOMIC;
 +      int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+      struct tls_edesc *edesc;
++      struct aead_edesc *edesc;
 +      dma_addr_t qm_sg_dma, iv_dma = 0;
 +      int ivsize = 0;
-+      int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
++      unsigned int authsize = ctx->authsize;
++      int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
 +      int in_len, out_len;
 +      struct qm_sg_entry *sg_table, *fd_sgt;
 +      struct caam_drv_ctx *drv_ctx;
 +      enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
-+      struct scatterlist *dst;
-+
-+      if (encrypt) {
-+              padsize = blocksize - ((req->cryptlen + ctx->authsize) %
-+                                      blocksize);
-+              authsize = ctx->authsize + padsize;
-+      } else {
-+              authsize = ctx->authsize;
-+      }
 +
 +      drv_ctx = get_drv_ctx(ctx, op_type);
 +      if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+              return (struct tls_edesc *)drv_ctx;
++              return (struct aead_edesc *)drv_ctx;
 +
 +      /* allocate space for base edesc and hw desc commands, link tables */
 +      edesc = qi_cache_alloc(GFP_DMA | flags);
@@ -13286,7 +13561,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      if (likely(req->src == req->dst)) {
 +              src_nents = sg_nents_for_len(req->src, req->assoclen +
 +                                           req->cryptlen +
-+                                           (encrypt ? authsize : 0));
++                                              (encrypt ? authsize : 0));
 +              if (unlikely(src_nents < 0)) {
 +                      dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
 +                              req->assoclen + req->cryptlen +
@@ -13302,7 +13577,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                      qi_cache_free(edesc);
 +                      return ERR_PTR(-ENOMEM);
 +              }
-+              dst = req->dst;
 +      } else {
 +              src_nents = sg_nents_for_len(req->src, req->assoclen +
 +                                           req->cryptlen);
@@ -13313,13 +13587,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                      return ERR_PTR(src_nents);
 +              }
 +
-+              dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
-+              dst_nents = sg_nents_for_len(dst, req->cryptlen +
-+                                           (encrypt ? authsize : 0));
++              dst_nents = sg_nents_for_len(req->dst, req->assoclen +
++                                           req->cryptlen +
++                                           (encrypt ? authsize :
++                                                      (-authsize)));
 +              if (unlikely(dst_nents < 0)) {
 +                      dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+                              req->cryptlen +
-+                              (encrypt ? authsize : 0));
++                              req->assoclen + req->cryptlen +
++                              (encrypt ? authsize : (-authsize)));
 +                      qi_cache_free(edesc);
 +                      return ERR_PTR(dst_nents);
 +              }
@@ -13336,7 +13611,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                      mapped_src_nents = 0;
 +              }
 +
-+              mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
++              mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
 +                                            DMA_FROM_DEVICE);
 +              if (unlikely(!mapped_dst_nents)) {
 +                      dev_err(qidev, "unable to map destination\n");
@@ -13346,48 +13621,72 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              }
 +      }
 +
-+      ivsize = crypto_aead_ivsize(aead);
-+      iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
-+      if (dma_mapping_error(qidev, iv_dma)) {
-+              dev_err(qidev, "unable to map IV\n");
-+              caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
-+                         op_type, 0, 0);
-+              qi_cache_free(edesc);
-+              return ERR_PTR(-ENOMEM);
++      if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
++              ivsize = crypto_aead_ivsize(aead);
++              iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
++              if (dma_mapping_error(qidev, iv_dma)) {
++                      dev_err(qidev, "unable to map IV\n");
++                      caam_unmap(qidev, req->src, req->dst, src_nents,
++                                 dst_nents, 0, 0, op_type, 0, 0);
++                      qi_cache_free(edesc);
++                      return ERR_PTR(-ENOMEM);
++              }
 +      }
 +
 +      /*
-+       * Create S/G table: IV, src, dst.
++       * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
 +       * Input is not contiguous.
 +       */
-+      qm_sg_ents = 1 + mapped_src_nents +
++      qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
 +                   (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++      if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
++              dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
++                      qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++                         iv_dma, ivsize, op_type, 0, 0);
++              qi_cache_free(edesc);
++              return ERR_PTR(-ENOMEM);
++      }
 +      sg_table = &edesc->sgt[0];
 +      qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
 +
 +      edesc->src_nents = src_nents;
 +      edesc->dst_nents = dst_nents;
-+      edesc->dst = dst;
 +      edesc->iv_dma = iv_dma;
 +      edesc->drv_req.app_ctx = req;
-+      edesc->drv_req.cbk = tls_done;
++      edesc->drv_req.cbk = aead_done;
 +      edesc->drv_req.drv_ctx = drv_ctx;
 +
-+      dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+      qm_sg_index = 1;
++      edesc->assoclen = cpu_to_caam32(req->assoclen);
++      edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
++                                           DMA_TO_DEVICE);
++      if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
++              dev_err(qidev, "unable to map assoclen\n");
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++                         iv_dma, ivsize, op_type, 0, 0);
++              qi_cache_free(edesc);
++              return ERR_PTR(-ENOMEM);
++      }
 +
++      dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
++      qm_sg_index++;
++      if (ivsize) {
++              dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
++              qm_sg_index++;
++      }
 +      sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
 +      qm_sg_index += mapped_src_nents;
 +
 +      if (mapped_dst_nents > 1)
-+              sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
++              sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
 +                               qm_sg_index, 0);
 +
 +      qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
 +      if (dma_mapping_error(qidev, qm_sg_dma)) {
 +              dev_err(qidev, "unable to map S/G table\n");
-+              caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
-+                         ivsize, op_type, 0, 0);
++              dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++                         iv_dma, ivsize, op_type, 0, 0);
 +              qi_cache_free(edesc);
 +              return ERR_PTR(-ENOMEM);
 +      }
@@ -13395,29 +13694,35 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      edesc->qm_sg_dma = qm_sg_dma;
 +      edesc->qm_sg_bytes = qm_sg_bytes;
 +
-+      out_len = req->cryptlen + (encrypt ? authsize : 0);
-+      in_len = ivsize + req->assoclen + req->cryptlen;
++      out_len = req->assoclen + req->cryptlen +
++                (encrypt ? ctx->authsize : (-ctx->authsize));
++      in_len = 4 + ivsize + req->assoclen + req->cryptlen;
 +
 +      fd_sgt = &edesc->drv_req.fd_sgt[0];
-+
 +      dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
 +
-+      if (req->dst == req->src)
-+              dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+                                  (sg_nents_for_len(req->src, req->assoclen) +
-+                                   1) * sizeof(*sg_table), out_len, 0);
-+      else if (mapped_dst_nents == 1)
-+              dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
-+      else
++      if (req->dst == req->src) {
++              if (mapped_src_nents == 1)
++                      dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
++                                       out_len, 0);
++              else
++                      dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++                                           (1 + !!ivsize) * sizeof(*sg_table),
++                                           out_len, 0);
++      } else if (mapped_dst_nents == 1) {
++              dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
++                               0);
++      } else {
 +              dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
 +                                   qm_sg_index, out_len, 0);
++      }
 +
 +      return edesc;
 +}
 +
-+static int tls_crypt(struct aead_request *req, bool encrypt)
++static inline int aead_crypt(struct aead_request *req, bool encrypt)
 +{
-+      struct tls_edesc *edesc;
++      struct aead_edesc *edesc;
 +      struct crypto_aead *aead = crypto_aead_reqtfm(req);
 +      struct caam_ctx *ctx = crypto_aead_ctx(aead);
 +      int ret;
@@ -13425,233 +13730,485 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      if (unlikely(caam_congested))
 +              return -EAGAIN;
 +
-+      edesc = tls_edesc_alloc(req, encrypt);
++      /* allocate extended descriptor */
++      edesc = aead_edesc_alloc(req, encrypt);
 +      if (IS_ERR_OR_NULL(edesc))
 +              return PTR_ERR(edesc);
 +
++      /* Create and submit job descriptor */
 +      ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
 +      if (!ret) {
 +              ret = -EINPROGRESS;
 +      } else {
-+              tls_unmap(ctx->qidev, edesc, req);
++              aead_unmap(ctx->qidev, edesc, req);
 +              qi_cache_free(edesc);
 +      }
 +
 +      return ret;
 +}
 +
-+static int tls_encrypt(struct aead_request *req)
++static int aead_encrypt(struct aead_request *req)
 +{
-+      return tls_crypt(req, true);
++      return aead_crypt(req, true);
 +}
 +
-+static int tls_decrypt(struct aead_request *req)
++static int aead_decrypt(struct aead_request *req)
 +{
-+      return tls_crypt(req, false);
++      return aead_crypt(req, false);
 +}
 +
-+static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
++static int ipsec_gcm_encrypt(struct aead_request *req)
 +{
-+      struct ablkcipher_edesc *edesc;
-+      struct ablkcipher_request *req = drv_req->app_ctx;
-+      struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+      struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
-+      struct device *qidev = caam_ctx->qidev;
-+      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++      if (req->assoclen < 8)
++              return -EINVAL;
 +
-+#ifdef DEBUG
-+      dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
-+#endif
++      return aead_crypt(req, true);
++}
 +
-+      edesc = container_of(drv_req, typeof(*edesc), drv_req);
++static int ipsec_gcm_decrypt(struct aead_request *req)
++{
++      if (req->assoclen < 8)
++              return -EINVAL;
 +
-+      if (status)
-+              caam_jr_strstatus(qidev, status);
++      return aead_crypt(req, false);
++}
 +
-+#ifdef DEBUG
-+      print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
-+                     DUMP_PREFIX_ADDRESS, 16, 4, req->info,
-+                     edesc->src_nents > 1 ? 100 : ivsize, 1);
-+      caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
-+                   DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
-+                   edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
-+#endif
++static void tls_done(struct caam_drv_req *drv_req, u32 status)
++{
++      struct device *qidev;
++      struct tls_edesc *edesc;
++      struct aead_request *aead_req = drv_req->app_ctx;
++      struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
++      struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
++      int ecode = 0;
 +
-+      ablkcipher_unmap(qidev, edesc, req);
-+      qi_cache_free(edesc);
++      qidev = caam_ctx->qidev;
 +
-+      /*
-+       * The crypto API expects us to set the IV (req->info) to the last
-+       * ciphertext block. This is used e.g. by the CTS mode.
-+       */
-+      scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
-+                               ivsize, 0);
++      if (unlikely(status)) {
++              caam_jr_strstatus(qidev, status);
++              ecode = -EIO;
++      }
 +
-+      ablkcipher_request_complete(req, status);
++      edesc = container_of(drv_req, typeof(*edesc), drv_req);
++      tls_unmap(qidev, edesc, aead_req);
++
++      aead_request_complete(aead_req, ecode);
++      qi_cache_free(edesc);
 +}
 +
-+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
-+                                                     *req, bool encrypt)
++/*
++ * allocate and map the tls extended descriptor
++ */
++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
 +{
-+      struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+      struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      struct crypto_aead *aead = crypto_aead_reqtfm(req);
++      struct caam_ctx *ctx = crypto_aead_ctx(aead);
++      unsigned int blocksize = crypto_aead_blocksize(aead);
++      unsigned int padsize, authsize;
++      struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
++                                               typeof(*alg), aead);
 +      struct device *qidev = ctx->qidev;
-+      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+                     GFP_KERNEL : GFP_ATOMIC;
++      gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
++                     CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
 +      int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+      struct ablkcipher_edesc *edesc;
-+      dma_addr_t iv_dma;
-+      bool in_contig;
-+      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-+      int dst_sg_idx, qm_sg_ents;
++      struct tls_edesc *edesc;
++      dma_addr_t qm_sg_dma, iv_dma = 0;
++      int ivsize = 0;
++      int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
++      int in_len, out_len;
 +      struct qm_sg_entry *sg_table, *fd_sgt;
 +      struct caam_drv_ctx *drv_ctx;
 +      enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++      struct scatterlist *dst;
++
++      if (encrypt) {
++              padsize = blocksize - ((req->cryptlen + ctx->authsize) %
++                                      blocksize);
++              authsize = ctx->authsize + padsize;
++      } else {
++              authsize = ctx->authsize;
++      }
 +
 +      drv_ctx = get_drv_ctx(ctx, op_type);
 +      if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+              return (struct ablkcipher_edesc *)drv_ctx;
++              return (struct tls_edesc *)drv_ctx;
 +
-+      src_nents = sg_nents_for_len(req->src, req->nbytes);
-+      if (unlikely(src_nents < 0)) {
-+              dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
-+                      req->nbytes);
-+              return ERR_PTR(src_nents);
++      /* allocate space for base edesc and hw desc commands, link tables */
++      edesc = qi_cache_alloc(GFP_DMA | flags);
++      if (unlikely(!edesc)) {
++              dev_err(qidev, "could not allocate extended descriptor\n");
++              return ERR_PTR(-ENOMEM);
 +      }
 +
-+      if (unlikely(req->src != req->dst)) {
-+              dst_nents = sg_nents_for_len(req->dst, req->nbytes);
-+              if (unlikely(dst_nents < 0)) {
-+                      dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+                              req->nbytes);
-+                      return ERR_PTR(dst_nents);
++      if (likely(req->src == req->dst)) {
++              src_nents = sg_nents_for_len(req->src, req->assoclen +
++                                           req->cryptlen +
++                                           (encrypt ? authsize : 0));
++              if (unlikely(src_nents < 0)) {
++                      dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++                              req->assoclen + req->cryptlen +
++                              (encrypt ? authsize : 0));
++                      qi_cache_free(edesc);
++                      return ERR_PTR(src_nents);
 +              }
 +
 +              mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
-+                                            DMA_TO_DEVICE);
++                                            DMA_BIDIRECTIONAL);
 +              if (unlikely(!mapped_src_nents)) {
 +                      dev_err(qidev, "unable to map source\n");
++                      qi_cache_free(edesc);
 +                      return ERR_PTR(-ENOMEM);
 +              }
++              dst = req->dst;
++      } else {
++              src_nents = sg_nents_for_len(req->src, req->assoclen +
++                                           req->cryptlen);
++              if (unlikely(src_nents < 0)) {
++                      dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++                              req->assoclen + req->cryptlen);
++                      qi_cache_free(edesc);
++                      return ERR_PTR(src_nents);
++              }
 +
-+              mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++              dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
++              dst_nents = sg_nents_for_len(dst, req->cryptlen +
++                                           (encrypt ? authsize : 0));
++              if (unlikely(dst_nents < 0)) {
++                      dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
++                              req->cryptlen +
++                              (encrypt ? authsize : 0));
++                      qi_cache_free(edesc);
++                      return ERR_PTR(dst_nents);
++              }
++
++              if (src_nents) {
++                      mapped_src_nents = dma_map_sg(qidev, req->src,
++                                                    src_nents, DMA_TO_DEVICE);
++                      if (unlikely(!mapped_src_nents)) {
++                              dev_err(qidev, "unable to map source\n");
++                              qi_cache_free(edesc);
++                              return ERR_PTR(-ENOMEM);
++                      }
++              } else {
++                      mapped_src_nents = 0;
++              }
++
++              mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
 +                                            DMA_FROM_DEVICE);
 +              if (unlikely(!mapped_dst_nents)) {
 +                      dev_err(qidev, "unable to map destination\n");
 +                      dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
-+                      return ERR_PTR(-ENOMEM);
-+              }
-+      } else {
-+              mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
-+                                            DMA_BIDIRECTIONAL);
-+              if (unlikely(!mapped_src_nents)) {
-+                      dev_err(qidev, "unable to map source\n");
++                      qi_cache_free(edesc);
 +                      return ERR_PTR(-ENOMEM);
 +              }
 +      }
 +
-+      iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
++      ivsize = crypto_aead_ivsize(aead);
++      iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
 +      if (dma_mapping_error(qidev, iv_dma)) {
 +              dev_err(qidev, "unable to map IV\n");
-+              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-+                         0, 0, 0, 0);
-+              return ERR_PTR(-ENOMEM);
-+      }
-+
-+      if (mapped_src_nents == 1 &&
-+          iv_dma + ivsize == sg_dma_address(req->src)) {
-+              in_contig = true;
-+              qm_sg_ents = 0;
-+      } else {
-+              in_contig = false;
-+              qm_sg_ents = 1 + mapped_src_nents;
-+      }
-+      dst_sg_idx = qm_sg_ents;
-+
-+      qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
-+      if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
-+              dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
-+                      qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
-+              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+                         iv_dma, ivsize, op_type, 0, 0);
++              caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
++                         op_type, 0, 0);
++              qi_cache_free(edesc);
 +              return ERR_PTR(-ENOMEM);
 +      }
 +
-+      /* allocate space for base edesc and link tables */
-+      edesc = qi_cache_alloc(GFP_DMA | flags);
-+      if (unlikely(!edesc)) {
-+              dev_err(qidev, "could not allocate extended descriptor\n");
-+              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+                         iv_dma, ivsize, op_type, 0, 0);
-+              return ERR_PTR(-ENOMEM);
-+      }
++      /*
++       * Create S/G table: IV, src, dst.
++       * Input is not contiguous.
++       */
++      qm_sg_ents = 1 + mapped_src_nents +
++                   (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++      sg_table = &edesc->sgt[0];
++      qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
 +
 +      edesc->src_nents = src_nents;
 +      edesc->dst_nents = dst_nents;
++      edesc->dst = dst;
 +      edesc->iv_dma = iv_dma;
-+      sg_table = &edesc->sgt[0];
-+      edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
 +      edesc->drv_req.app_ctx = req;
-+      edesc->drv_req.cbk = ablkcipher_done;
++      edesc->drv_req.cbk = tls_done;
 +      edesc->drv_req.drv_ctx = drv_ctx;
 +
-+      if (!in_contig) {
-+              dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+              sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
-+      }
++      dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++      qm_sg_index = 1;
++
++      sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
++      qm_sg_index += mapped_src_nents;
 +
 +      if (mapped_dst_nents > 1)
-+              sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-+                               dst_sg_idx, 0);
++              sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
++                               qm_sg_index, 0);
 +
-+      edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
-+                                        DMA_TO_DEVICE);
-+      if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
++      qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
++      if (dma_mapping_error(qidev, qm_sg_dma)) {
 +              dev_err(qidev, "unable to map S/G table\n");
-+              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+                         iv_dma, ivsize, op_type, 0, 0);
++              caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
++                         ivsize, op_type, 0, 0);
 +              qi_cache_free(edesc);
 +              return ERR_PTR(-ENOMEM);
 +      }
 +
++      edesc->qm_sg_dma = qm_sg_dma;
++      edesc->qm_sg_bytes = qm_sg_bytes;
++
++      out_len = req->cryptlen + (encrypt ? authsize : 0);
++      in_len = ivsize + req->assoclen + req->cryptlen;
++
 +      fd_sgt = &edesc->drv_req.fd_sgt[0];
 +
-+      if (!in_contig)
-+              dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
-+                                        ivsize + req->nbytes, 0);
-+      else
-+              dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
-+                                    0);
++      dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
 +
-+      if (req->src == req->dst) {
-+              if (!in_contig)
-+                      dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
-+                                           sizeof(*sg_table), req->nbytes, 0);
-+              else
-+                      dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-+                                       req->nbytes, 0);
-+      } else if (mapped_dst_nents > 1) {
-+              dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
-+                                   sizeof(*sg_table), req->nbytes, 0);
-+      } else {
-+              dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
-+                               req->nbytes, 0);
-+      }
++      if (req->dst == req->src)
++              dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++                                  (sg_nents_for_len(req->src, req->assoclen) +
++                                   1) * sizeof(*sg_table), out_len, 0);
++      else if (mapped_dst_nents == 1)
++              dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
++      else
++              dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
++                                   qm_sg_index, out_len, 0);
 +
 +      return edesc;
 +}
 +
-+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
-+      struct skcipher_givcrypt_request *creq)
++static int tls_crypt(struct aead_request *req, bool encrypt)
 +{
-+      struct ablkcipher_request *req = &creq->creq;
-+      struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+      struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-+      struct device *qidev = ctx->qidev;
-+      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+                     GFP_KERNEL : GFP_ATOMIC;
-+      int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
-+      struct ablkcipher_edesc *edesc;
++      struct tls_edesc *edesc;
++      struct crypto_aead *aead = crypto_aead_reqtfm(req);
++      struct caam_ctx *ctx = crypto_aead_ctx(aead);
++      int ret;
++
++      if (unlikely(caam_congested))
++              return -EAGAIN;
++
++      edesc = tls_edesc_alloc(req, encrypt);
++      if (IS_ERR_OR_NULL(edesc))
++              return PTR_ERR(edesc);
++
++      ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
++      if (!ret) {
++              ret = -EINPROGRESS;
++      } else {
++              tls_unmap(ctx->qidev, edesc, req);
++              qi_cache_free(edesc);
++      }
++
++      return ret;
++}
++
++static int tls_encrypt(struct aead_request *req)
++{
++      return tls_crypt(req, true);
++}
++
++static int tls_decrypt(struct aead_request *req)
++{
++      return tls_crypt(req, false);
++}
++
++static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
++{
++      struct ablkcipher_edesc *edesc;
++      struct ablkcipher_request *req = drv_req->app_ctx;
++      struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++      struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
++      struct device *qidev = caam_ctx->qidev;
++      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++
++#ifdef DEBUG
++      dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
++#endif
++
++      edesc = container_of(drv_req, typeof(*edesc), drv_req);
++
++      if (status)
++              caam_jr_strstatus(qidev, status);
++
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "dstiv  @" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, req->info,
++                     edesc->src_nents > 1 ? 100 : ivsize, 1);
++      caam_dump_sg(KERN_ERR, "dst    @" __stringify(__LINE__)": ",
++                   DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
++                   edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
++#endif
++
++      ablkcipher_unmap(qidev, edesc, req);
++      qi_cache_free(edesc);
++
++      /*
++       * The crypto API expects us to set the IV (req->info) to the last
++       * ciphertext block. This is used e.g. by the CTS mode.
++       */
++      scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
++                               ivsize, 0);
++
++      ablkcipher_request_complete(req, status);
++}
++
++static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
++                                                     *req, bool encrypt)
++{
++      struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++      struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      struct device *qidev = ctx->qidev;
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                     GFP_KERNEL : GFP_ATOMIC;
++      int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
++      struct ablkcipher_edesc *edesc;
++      dma_addr_t iv_dma;
++      bool in_contig;
++      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++      int dst_sg_idx, qm_sg_ents;
++      struct qm_sg_entry *sg_table, *fd_sgt;
++      struct caam_drv_ctx *drv_ctx;
++      enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++
++      drv_ctx = get_drv_ctx(ctx, op_type);
++      if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
++              return (struct ablkcipher_edesc *)drv_ctx;
++
++      src_nents = sg_nents_for_len(req->src, req->nbytes);
++      if (unlikely(src_nents < 0)) {
++              dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
++                      req->nbytes);
++              return ERR_PTR(src_nents);
++      }
++
++      if (unlikely(req->src != req->dst)) {
++              dst_nents = sg_nents_for_len(req->dst, req->nbytes);
++              if (unlikely(dst_nents < 0)) {
++                      dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
++                              req->nbytes);
++                      return ERR_PTR(dst_nents);
++              }
++
++              mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++                                            DMA_TO_DEVICE);
++              if (unlikely(!mapped_src_nents)) {
++                      dev_err(qidev, "unable to map source\n");
++                      return ERR_PTR(-ENOMEM);
++              }
++
++              mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++                                            DMA_FROM_DEVICE);
++              if (unlikely(!mapped_dst_nents)) {
++                      dev_err(qidev, "unable to map destination\n");
++                      dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
++                      return ERR_PTR(-ENOMEM);
++              }
++      } else {
++              mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
++                                            DMA_BIDIRECTIONAL);
++              if (unlikely(!mapped_src_nents)) {
++                      dev_err(qidev, "unable to map source\n");
++                      return ERR_PTR(-ENOMEM);
++              }
++      }
++
++      iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
++      if (dma_mapping_error(qidev, iv_dma)) {
++              dev_err(qidev, "unable to map IV\n");
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++                         0, 0, 0, 0);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      if (mapped_src_nents == 1 &&
++          iv_dma + ivsize == sg_dma_address(req->src)) {
++              in_contig = true;
++              qm_sg_ents = 0;
++      } else {
++              in_contig = false;
++              qm_sg_ents = 1 + mapped_src_nents;
++      }
++      dst_sg_idx = qm_sg_ents;
++
++      qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++      if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
++              dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
++                      qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++                         iv_dma, ivsize, op_type, 0, 0);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      /* allocate space for base edesc and link tables */
++      edesc = qi_cache_alloc(GFP_DMA | flags);
++      if (unlikely(!edesc)) {
++              dev_err(qidev, "could not allocate extended descriptor\n");
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++                         iv_dma, ivsize, op_type, 0, 0);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      edesc->src_nents = src_nents;
++      edesc->dst_nents = dst_nents;
++      edesc->iv_dma = iv_dma;
++      sg_table = &edesc->sgt[0];
++      edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
++      edesc->drv_req.app_ctx = req;
++      edesc->drv_req.cbk = ablkcipher_done;
++      edesc->drv_req.drv_ctx = drv_ctx;
++
++      if (!in_contig) {
++              dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++              sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
++      }
++
++      if (mapped_dst_nents > 1)
++              sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++                               dst_sg_idx, 0);
++
++      edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
++                                        DMA_TO_DEVICE);
++      if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
++              dev_err(qidev, "unable to map S/G table\n");
++              caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++                         iv_dma, ivsize, op_type, 0, 0);
++              qi_cache_free(edesc);
++              return ERR_PTR(-ENOMEM);
++      }
++
++      fd_sgt = &edesc->drv_req.fd_sgt[0];
++
++      if (!in_contig)
++              dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
++                                        ivsize + req->nbytes, 0);
++      else
++              dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
++                                    0);
++
++      if (req->src == req->dst) {
++              if (!in_contig)
++                      dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
++                                           sizeof(*sg_table), req->nbytes, 0);
++              else
++                      dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
++                                       req->nbytes, 0);
++      } else if (mapped_dst_nents > 1) {
++              dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
++                                   sizeof(*sg_table), req->nbytes, 0);
++      } else {
++              dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
++                               req->nbytes, 0);
++      }
++
++      return edesc;
++}
++
++static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
++      struct skcipher_givcrypt_request *creq)
++{
++      struct ablkcipher_request *req = &creq->creq;
++      struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++      struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++      struct device *qidev = ctx->qidev;
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                     GFP_KERNEL : GFP_ATOMIC;
++      int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
++      struct ablkcipher_edesc *edesc;
 +      dma_addr_t iv_dma;
 +      bool out_contig;
 +      int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
@@ -13970,25 +14527,80 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +};
 +
 +static struct caam_aead_alg driver_aeads[] = {
-+      /* single-pass ipsec_esp descriptor */
 +      {
 +              .aead = {
 +                      .base = {
-+                              .cra_name = "authenc(hmac(md5),cbc(aes))",
-+                              .cra_driver_name = "authenc-hmac-md5-"
-+                                                 "cbc-aes-caam-qi",
-+                              .cra_blocksize = AES_BLOCK_SIZE,
++                              .cra_name = "rfc4106(gcm(aes))",
++                              .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
++                              .cra_blocksize = 1,
 +                      },
-+                      .setkey = aead_setkey,
-+                      .setauthsize = aead_setauthsize,
-+                      .encrypt = aead_encrypt,
-+                      .decrypt = aead_decrypt,
-+                      .ivsize = AES_BLOCK_SIZE,
-+                      .maxauthsize = MD5_DIGEST_SIZE,
++                      .setkey = rfc4106_setkey,
++                      .setauthsize = rfc4106_setauthsize,
++                      .encrypt = ipsec_gcm_encrypt,
++                      .decrypt = ipsec_gcm_decrypt,
++                      .ivsize = 8,
++                      .maxauthsize = AES_BLOCK_SIZE,
 +              },
 +              .caam = {
-+                      .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+                      .class2_alg_type = OP_ALG_ALGSEL_MD5 |
++                      .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++              },
++      },
++      {
++              .aead = {
++                      .base = {
++                              .cra_name = "rfc4543(gcm(aes))",
++                              .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
++                              .cra_blocksize = 1,
++                      },
++                      .setkey = rfc4543_setkey,
++                      .setauthsize = rfc4543_setauthsize,
++                      .encrypt = ipsec_gcm_encrypt,
++                      .decrypt = ipsec_gcm_decrypt,
++                      .ivsize = 8,
++                      .maxauthsize = AES_BLOCK_SIZE,
++              },
++              .caam = {
++                      .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++              },
++      },
++      /* Galois Counter Mode */
++      {
++              .aead = {
++                      .base = {
++                              .cra_name = "gcm(aes)",
++                              .cra_driver_name = "gcm-aes-caam-qi",
++                              .cra_blocksize = 1,
++                      },
++                      .setkey = gcm_setkey,
++                      .setauthsize = gcm_setauthsize,
++                      .encrypt = aead_encrypt,
++                      .decrypt = aead_decrypt,
++                      .ivsize = 12,
++                      .maxauthsize = AES_BLOCK_SIZE,
++              },
++              .caam = {
++                      .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
++              }
++      },
++      /* single-pass ipsec_esp descriptor */
++      {
++              .aead = {
++                      .base = {
++                              .cra_name = "authenc(hmac(md5),cbc(aes))",
++                              .cra_driver_name = "authenc-hmac-md5-"
++                                                 "cbc-aes-caam-qi",
++                              .cra_blocksize = AES_BLOCK_SIZE,
++                      },
++                      .setkey = aead_setkey,
++                      .setauthsize = aead_setauthsize,
++                      .encrypt = aead_encrypt,
++                      .decrypt = aead_decrypt,
++                      .ivsize = AES_BLOCK_SIZE,
++                      .maxauthsize = MD5_DIGEST_SIZE,
++              },
++              .caam = {
++                      .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++                      .class2_alg_type = OP_ALG_ALGSEL_MD5 |
 +                                         OP_ALG_AAI_HMAC_PRECOMP,
 +              }
 +      },
@@ -14808,7 +15420,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      struct caam_alg_entry caam;
 +};
 +
-+static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
++static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
++                         bool uses_dkp)
 +{
 +      struct caam_drv_private *priv;
 +      /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
@@ -14832,8 +15445,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return PTR_ERR(ctx->jrdev);
 +      }
 +
++      priv = dev_get_drvdata(ctx->jrdev->parent);
++      if (priv->era >= 6 && uses_dkp)
++              ctx->dir = DMA_BIDIRECTIONAL;
++      else
++              ctx->dir = DMA_TO_DEVICE;
++
 +      ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
-+                                    DMA_TO_DEVICE);
++                                    ctx->dir);
 +      if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
 +              dev_err(ctx->jrdev, "unable to map key\n");
 +              caam_jr_free(ctx->jrdev);
@@ -14860,7 +15479,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              ctx->authsize = 0;
 +      }
 +
-+      priv = dev_get_drvdata(ctx->jrdev->parent);
 +      ctx->qidev = priv->qidev;
 +
 +      spin_lock_init(&ctx->lock);
@@ -14878,7 +15496,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                                                      crypto_alg);
 +      struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
 +
-+      return caam_init_common(ctx, &caam_alg->caam);
++      return caam_init_common(ctx, &caam_alg->caam, false);
 +}
 +
 +static int caam_aead_init(struct crypto_aead *tfm)
@@ -14888,7 +15506,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                                                    aead);
 +      struct caam_ctx *ctx = crypto_aead_ctx(tfm);
 +
-+      return caam_init_common(ctx, &caam_alg->caam);
++      return caam_init_common(ctx, &caam_alg->caam,
++                              (alg->setkey == aead_setkey) ||
++                              (alg->setkey == tls_setkey));
 +}
 +
 +static void caam_exit_common(struct caam_ctx *ctx)
@@ -14897,8 +15517,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
 +      caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
 +
-+      dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
-+                       DMA_TO_DEVICE);
++      dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
 +
 +      caam_jr_free(ctx->jrdev);
 +}
@@ -15140,7 +15759,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +MODULE_AUTHOR("Freescale Semiconductor");
 --- /dev/null
 +++ b/drivers/crypto/caam/caamalg_qi2.c
-@@ -0,0 +1,4428 @@
+@@ -0,0 +1,5938 @@
 +/*
 + * Copyright 2015-2016 Freescale Semiconductor Inc.
 + * Copyright 2017 NXP
@@ -15175,6 +15794,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * POSSIBILITY OF SUCH DAMAGE.
 + */
 +
++#include <linux/fsl/mc.h>
 +#include "compat.h"
 +#include "regs.h"
 +#include "caamalg_qi2.h"
@@ -15185,7 +15805,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +#include "sg_sw_qm2.h"
 +#include "key_gen.h"
 +#include "caamalg_desc.h"
-+#include "../../../drivers/staging/fsl-mc/include/mc.h"
++#include "caamhash_desc.h"
 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
 +
@@ -15231,7 +15851,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * caam_ctx - per-session context
 + * @flc: Flow Contexts array
 + * @key:  virtual address of the key(s): [authentication key], encryption key
++ * @flc_dma: I/O virtual addresses of the Flow Contexts
 + * @key_dma: I/O virtual address of the key
++ * @dir: DMA direction for mapping key and Flow Contexts
 + * @dev: dpseci device
 + * @adata: authentication algorithm details
 + * @cdata: encryption algorithm details
@@ -15240,7 +15862,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +struct caam_ctx {
 +      struct caam_flc flc[NUM_OP];
 +      u8 key[CAAM_MAX_KEY_SIZE];
++      dma_addr_t flc_dma[NUM_OP];
 +      dma_addr_t key_dma;
++      enum dma_data_direction dir;
 +      struct device *dev;
 +      struct alginfo adata;
 +      struct alginfo cdata;
@@ -15259,9 +15883,9 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +}
 +
 +/*
-+ * qi_cache_alloc - Allocate buffers from CAAM-QI cache
++ * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
 + *
-+ * Allocate data on the hotpath. Instead of using kmalloc, one can use the
++ * Allocate data on the hotpath. Instead of using kzalloc, one can use the
 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
 + * hosting 16 SG entries.
@@ -15270,15 +15894,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + *
 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
 + */
-+static inline void *qi_cache_alloc(gfp_t flags)
++static inline void *qi_cache_zalloc(gfp_t flags)
 +{
-+      return kmem_cache_alloc(qi_cache, flags);
++      return kmem_cache_zalloc(qi_cache, flags);
 +}
 +
 +/*
 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
 + *
-+ * @obj - buffer previously allocated by qi_cache_alloc
++ * @obj - buffer previously allocated by qi_cache_zalloc
 + *
 + * No checking is being done, the call is a passthrough call to
 + * kmem_cache_free(...)
@@ -15297,6 +15921,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      case CRYPTO_ALG_TYPE_AEAD:
 +              return aead_request_ctx(container_of(areq, struct aead_request,
 +                                                   base));
++      case CRYPTO_ALG_TYPE_AHASH:
++              return ahash_request_ctx(ahash_request_cast(areq));
 +      default:
 +              return ERR_PTR(-EINVAL);
 +      }
@@ -15332,6 +15958,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      struct caam_ctx *ctx = crypto_aead_ctx(aead);
 +      unsigned int ivsize = crypto_aead_ivsize(aead);
 +      struct device *dev = ctx->dev;
++      struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
 +      struct caam_flc *flc;
 +      u32 *desc;
 +      u32 ctx1_iv_off = 0;
@@ -15393,19 +16020,17 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      if (alg->caam.geniv)
 +              cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
 +                                        ivsize, ctx->authsize, is_rfc3686,
-+                                        nonce, ctx1_iv_off, true);
++                                        nonce, ctx1_iv_off, true,
++                                        priv->sec_attr.era);
 +      else
 +              cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
 +                                     ivsize, ctx->authsize, is_rfc3686, nonce,
-+                                     ctx1_iv_off, true);
++                                     ctx1_iv_off, true, priv->sec_attr.era);
 +
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      /* aead_decrypt shared descriptor */
 +      if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
@@ -15429,18 +16054,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      flc = &ctx->flc[DECRYPT];
 +      desc = flc->sh_desc;
-+
 +      cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
 +                             ivsize, ctx->authsize, alg->caam.geniv,
-+                             is_rfc3686, nonce, ctx1_iv_off, true);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++                             is_rfc3686, nonce, ctx1_iv_off, true,
++                             priv->sec_attr.era);
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      return 0;
 +}
@@ -15476,137 +16097,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      complete(&res->completion);
 +}
 +
-+static int gen_split_key_sh(struct device *dev, u8 *key_out,
-+                          struct alginfo * const adata, const u8 *key_in,
-+                          u32 keylen)
-+{
-+      struct caam_request *req_ctx;
-+      u32 *desc;
-+      struct split_key_sh_result result;
-+      dma_addr_t dma_addr_in, dma_addr_out;
-+      struct caam_flc *flc;
-+      struct dpaa2_fl_entry *in_fle, *out_fle;
-+      int ret = -ENOMEM;
-+
-+      req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
-+      if (!req_ctx)
-+              return -ENOMEM;
-+
-+      in_fle = &req_ctx->fd_flt[1];
-+      out_fle = &req_ctx->fd_flt[0];
-+
-+      flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
-+      if (!flc)
-+              goto err_flc;
-+
-+      dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
-+                                   DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, dma_addr_in)) {
-+              dev_err(dev, "unable to map key input memory\n");
-+              goto err_dma_addr_in;
-+      }
-+
-+      dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
-+                                    DMA_FROM_DEVICE);
-+      if (dma_mapping_error(dev, dma_addr_out)) {
-+              dev_err(dev, "unable to map key output memory\n");
-+              goto err_dma_addr_out;
-+      }
-+
-+      desc = flc->sh_desc;
-+
-+      init_sh_desc(desc, 0);
-+      append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
-+
-+      /* Sets MDHA up into an HMAC-INIT */
-+      append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
-+                       OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
-+                       OP_ALG_AS_INIT);
-+
-+      /*
-+       * do a FIFO_LOAD of zero, this will trigger the internal key expansion
-+       * into both pads inside MDHA
-+       */
-+      append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
-+                              FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
-+
-+      /*
-+       * FIFO_STORE with the explicit split-key content store
-+       * (0x26 output type)
-+       */
-+      append_fifo_store(desc, dma_addr_out, adata->keylen,
-+                        LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              goto err_flc_dma;
-+      }
-+
-+      dpaa2_fl_set_final(in_fle, true);
-+      dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-+      dpaa2_fl_set_addr(in_fle, dma_addr_in);
-+      dpaa2_fl_set_len(in_fle, keylen);
-+      dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+      dpaa2_fl_set_addr(out_fle, dma_addr_out);
-+      dpaa2_fl_set_len(out_fle, adata->keylen_pad);
-+
-+#ifdef DEBUG
-+      print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+                     DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
-+      print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
-+                     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-+#endif
-+
-+      result.err = 0;
-+      init_completion(&result.completion);
-+      result.dev = dev;
-+
-+      req_ctx->flc = flc;
-+      req_ctx->cbk = split_key_sh_done;
-+      req_ctx->ctx = &result;
-+
-+      ret = dpaa2_caam_enqueue(dev, req_ctx);
-+      if (ret == -EINPROGRESS) {
-+              /* in progress */
-+              wait_for_completion(&result.completion);
-+              ret = result.err;
-+#ifdef DEBUG
-+              print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+                             DUMP_PREFIX_ADDRESS, 16, 4, key_out,
-+                             adata->keylen_pad, 1);
-+#endif
-+      }
-+
-+      dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
-+                       DMA_TO_DEVICE);
-+err_flc_dma:
-+      dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
-+err_dma_addr_out:
-+      dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
-+err_dma_addr_in:
-+      kfree(flc);
-+err_flc:
-+      kfree(req_ctx);
-+      return ret;
-+}
-+
-+static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
-+                            u32 authkeylen)
-+{
-+      return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
-+                              authkeylen);
-+}
-+
 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
 +                     unsigned int keylen)
 +{
 +      struct caam_ctx *ctx = crypto_aead_ctx(aead);
 +      struct device *dev = ctx->dev;
 +      struct crypto_authenc_keys keys;
-+      int ret;
 +
 +      if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 +              goto badkey;
@@ -15619,34 +16115,17 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 +#endif
 +
-+      ctx->adata.keylen = split_key_len(ctx->adata.algtype &
-+                                        OP_ALG_ALGSEL_MASK);
-+      ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
-+                                                OP_ALG_ALGSEL_MASK);
-+
-+#ifdef DEBUG
-+      dev_err(dev, "split keylen %d split keylen padded %d\n",
-+              ctx->adata.keylen, ctx->adata.keylen_pad);
-+      print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+                     DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
-+#endif
++      ctx->adata.keylen = keys.authkeylen;
++      ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++                                            OP_ALG_ALGSEL_MASK);
 +
 +      if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
 +              goto badkey;
 +
-+      ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
-+      if (ret)
-+              goto badkey;
-+
-+      /* postpend encryption key to auth split key */
++      memcpy(ctx->key, keys.authkey, keys.authkeylen);
 +      memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+
-+      ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
-+                                    keys.enckeylen, DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, ctx->key_dma)) {
-+              dev_err(dev, "unable to map key i/o memory\n");
-+              return -ENOMEM;
-+      }
++      dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
++                                 keys.enckeylen, ctx->dir);
 +#ifdef DEBUG
 +      print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
 +                     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
@@ -15655,12 +16134,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      ctx->cdata.keylen = keys.enckeylen;
 +
-+      ret = aead_set_sh_desc(aead);
-+      if (ret)
-+              dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+                               keys.enckeylen, DMA_TO_DEVICE);
-+
-+      return ret;
++      return aead_set_sh_desc(aead);
 +badkey:
 +      crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
 +      return -EINVAL;
@@ -15690,7 +16164,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
 +
 +      /* allocate space for base edesc and link tables */
-+      edesc = qi_cache_alloc(GFP_DMA | flags);
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
 +      if (unlikely(!edesc)) {
 +              dev_err(dev, "could not allocate extended descriptor\n");
 +              return ERR_PTR(-ENOMEM);
@@ -15792,7 +16266,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      edesc->dst_nents = dst_nents;
 +      edesc->iv_dma = iv_dma;
 +
-+      edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
++      edesc->assoclen = cpu_to_caam32(req->assoclen);
++      edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
 +                                           DMA_TO_DEVICE);
 +      if (dma_mapping_error(dev, edesc->assoclen_dma)) {
 +              dev_err(dev, "unable to map assoclen\n");
@@ -15895,7 +16370,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      }
 +
 +      /* allocate space for base edesc and link tables */
-+      edesc = qi_cache_alloc(GFP_DMA | flags);
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
 +      if (unlikely(!edesc)) {
 +              dev_err(dev, "could not allocate extended descriptor\n");
 +              return ERR_PTR(-ENOMEM);
@@ -16044,6 +16519,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      unsigned int ivsize = crypto_aead_ivsize(tls);
 +      unsigned int blocksize = crypto_aead_blocksize(tls);
 +      struct device *dev = ctx->dev;
++      struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
 +      struct caam_flc *flc;
 +      u32 *desc;
 +      unsigned int assoclen = 13; /* always 13 bytes for TLS */
@@ -16080,39 +16556,30 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      flc = &ctx->flc[ENCRYPT];
 +      desc = flc->sh_desc;
-+
 +      cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
-+                            assoclen, ivsize, ctx->authsize, blocksize);
-+
-+      flc->flc[1] = desc_len(desc);
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++                            assoclen, ivsize, ctx->authsize, blocksize,
++                            priv->sec_attr.era);
++      flc->flc[1] = cpu_to_caam32(desc_len(desc));
++      dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      /*
 +       * TLS 1.0 decrypt shared descriptor
 +       * Keys do not fit inline, regardless of algorithms used
 +       */
++      ctx->adata.key_inline = false;
 +      ctx->adata.key_dma = ctx->key_dma;
 +      ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
 +
 +      flc = &ctx->flc[DECRYPT];
 +      desc = flc->sh_desc;
-+
 +      cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
-+                            ctx->authsize, blocksize);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++                            ctx->authsize, blocksize, priv->sec_attr.era);
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      return 0;
 +}
@@ -16123,7 +16590,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      struct caam_ctx *ctx = crypto_aead_ctx(tls);
 +      struct device *dev = ctx->dev;
 +      struct crypto_authenc_keys keys;
-+      int ret;
 +
 +      if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 +              goto badkey;
@@ -16136,35 +16602,17 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 +#endif
 +
-+      ctx->adata.keylen = split_key_len(ctx->adata.algtype &
-+                                        OP_ALG_ALGSEL_MASK);
-+      ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
-+                                                OP_ALG_ALGSEL_MASK);
-+
-+#ifdef DEBUG
-+      dev_err(dev, "split keylen %d split keylen padded %d\n",
-+              ctx->adata.keylen, ctx->adata.keylen_pad);
-+      print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+                     DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
-+                     keys.authkeylen + keys.enckeylen, 1);
-+#endif
++      ctx->adata.keylen = keys.authkeylen;
++      ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++                                            OP_ALG_ALGSEL_MASK);
 +
 +      if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
 +              goto badkey;
 +
-+      ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
-+      if (ret)
-+              goto badkey;
-+
-+      /* postpend encryption key to auth split key */
++      memcpy(ctx->key, keys.authkey, keys.authkeylen);
 +      memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+
-+      ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
-+                                    keys.enckeylen, DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, ctx->key_dma)) {
-+              dev_err(dev, "unable to map key i/o memory\n");
-+              return -ENOMEM;
-+      }
++      dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
++                                 keys.enckeylen, ctx->dir);
 +#ifdef DEBUG
 +      print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
 +                     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
@@ -16173,12 +16621,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      ctx->cdata.keylen = keys.enckeylen;
 +
-+      ret = tls_set_sh_desc(tls);
-+      if (ret)
-+              dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+                               keys.enckeylen, DMA_TO_DEVICE);
-+
-+      return ret;
++      return tls_set_sh_desc(tls);
 +badkey:
 +      crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
 +      return -EINVAL;
@@ -16223,14 +16666,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      flc = &ctx->flc[ENCRYPT];
 +      desc = flc->sh_desc;
 +      cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      /*
 +       * Job Descriptor and Shared Descriptors
@@ -16247,14 +16686,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      flc = &ctx->flc[DECRYPT];
 +      desc = flc->sh_desc;
 +      cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      return 0;
 +}
@@ -16274,7 +16709,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +{
 +      struct caam_ctx *ctx = crypto_aead_ctx(aead);
 +      struct device *dev = ctx->dev;
-+      int ret;
 +
 +#ifdef DEBUG
 +      print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
@@ -16282,19 +16716,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +#endif
 +
 +      memcpy(ctx->key, key, keylen);
-+      ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, ctx->key_dma)) {
-+              dev_err(dev, "unable to map key i/o memory\n");
-+              return -ENOMEM;
-+      }
++      dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
 +      ctx->cdata.keylen = keylen;
 +
-+      ret = gcm_set_sh_desc(aead);
-+      if (ret)
-+              dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+                               DMA_TO_DEVICE);
-+
-+      return ret;
++      return gcm_set_sh_desc(aead);
 +}
 +
 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
@@ -16328,14 +16753,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      desc = flc->sh_desc;
 +      cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 +                                true);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      /*
 +       * Job Descriptor and Shared Descriptors
@@ -16352,14 +16773,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      desc = flc->sh_desc;
 +      cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 +                                true);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      return 0;
 +}
@@ -16380,7 +16797,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +{
 +      struct caam_ctx *ctx = crypto_aead_ctx(aead);
 +      struct device *dev = ctx->dev;
-+      int ret;
 +
 +      if (keylen < 4)
 +              return -EINVAL;
@@ -16396,19 +16812,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +       * in the nonce. Update the AES key length.
 +       */
 +      ctx->cdata.keylen = keylen - 4;
-+      ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
-+                                    DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, ctx->key_dma)) {
-+              dev_err(dev, "unable to map key i/o memory\n");
-+              return -ENOMEM;
-+      }
++      dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
++                                 ctx->dir);
 +
-+      ret = rfc4106_set_sh_desc(aead);
-+      if (ret)
-+              dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+                               DMA_TO_DEVICE);
-+
-+      return ret;
++      return rfc4106_set_sh_desc(aead);
 +}
 +
 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
@@ -16442,14 +16849,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      desc = flc->sh_desc;
 +      cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 +                                true);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      /*
 +       * Job Descriptor and Shared Descriptors
@@ -16466,14 +16869,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      desc = flc->sh_desc;
 +      cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 +                                true);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      return 0;
 +}
@@ -16494,7 +16893,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +{
 +      struct caam_ctx *ctx = crypto_aead_ctx(aead);
 +      struct device *dev = ctx->dev;
-+      int ret;
 +
 +      if (keylen < 4)
 +              return -EINVAL;
@@ -16510,19 +16908,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +       * in the nonce. Update the AES key length.
 +       */
 +      ctx->cdata.keylen = keylen - 4;
-+      ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
-+                                    DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, ctx->key_dma)) {
-+              dev_err(dev, "unable to map key i/o memory\n");
-+              return -ENOMEM;
-+      }
-+
-+      ret = rfc4543_set_sh_desc(aead);
-+      if (ret)
-+              dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+                               DMA_TO_DEVICE);
++      dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
++                                 ctx->dir);
 +
-+      return ret;
++      return rfc4543_set_sh_desc(aead);
 +}
 +
 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -16540,7 +16929,6 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                             OP_ALG_AAI_CTR_MOD128);
 +      const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
 +
-+      memcpy(ctx->key, key, keylen);
 +#ifdef DEBUG
 +      print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
 +                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
@@ -16563,59 +16951,39 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              keylen -= CTR_RFC3686_NONCE_SIZE;
 +      }
 +
-+      ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, ctx->key_dma)) {
-+              dev_err(dev, "unable to map key i/o memory\n");
-+              return -ENOMEM;
-+      }
 +      ctx->cdata.keylen = keylen;
-+      ctx->cdata.key_virt = ctx->key;
++      ctx->cdata.key_virt = key;
 +      ctx->cdata.key_inline = true;
 +
 +      /* ablkcipher_encrypt shared descriptor */
 +      flc = &ctx->flc[ENCRYPT];
 +      desc = flc->sh_desc;
-+
 +      cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
 +                                   is_rfc3686, ctx1_iv_off);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      /* ablkcipher_decrypt shared descriptor */
 +      flc = &ctx->flc[DECRYPT];
 +      desc = flc->sh_desc;
-+
 +      cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
 +                                   is_rfc3686, ctx1_iv_off);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      /* ablkcipher_givencrypt shared descriptor */
 +      flc = &ctx->flc[GIVENCRYPT];
 +      desc = flc->sh_desc;
-+
 +      cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
 +                                      ivsize, is_rfc3686, ctx1_iv_off);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      return 0;
 +}
@@ -16635,42 +17003,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return -EINVAL;
 +      }
 +
-+      memcpy(ctx->key, key, keylen);
-+      ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, ctx->key_dma)) {
-+              dev_err(dev, "unable to map key i/o memory\n");
-+              return -ENOMEM;
-+      }
 +      ctx->cdata.keylen = keylen;
-+      ctx->cdata.key_virt = ctx->key;
++      ctx->cdata.key_virt = key;
 +      ctx->cdata.key_inline = true;
 +
 +      /* xts_ablkcipher_encrypt shared descriptor */
 +      flc = &ctx->flc[ENCRYPT];
 +      desc = flc->sh_desc;
 +      cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      /* xts_ablkcipher_decrypt shared descriptor */
 +      flc = &ctx->flc[DECRYPT];
 +      desc = flc->sh_desc;
-+
 +      cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
-+
-+      flc->flc[1] = desc_len(desc); /* SDL */
-+      flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+                                    desc_bytes(desc), DMA_TO_DEVICE);
-+      if (dma_mapping_error(dev, flc->flc_dma)) {
-+              dev_err(dev, "unable to map shared descriptor\n");
-+              return -ENOMEM;
-+      }
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++                                 sizeof(flc->flc) + desc_bytes(desc),
++                                 ctx->dir);
 +
 +      return 0;
 +}
@@ -16761,7 +17114,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      }
 +
 +      /* allocate space for base edesc and link tables */
-+      edesc = qi_cache_alloc(GFP_DMA | flags);
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
 +      if (unlikely(!edesc)) {
 +              dev_err(dev, "could not allocate extended descriptor\n");
 +              caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
@@ -16916,7 +17269,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      }
 +
 +      /* allocate space for base edesc and link tables */
-+      edesc = qi_cache_alloc(GFP_DMA | flags);
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
 +      if (!edesc) {
 +              dev_err(dev, "could not allocate extended descriptor\n");
 +              caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
@@ -17083,6 +17436,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return PTR_ERR(edesc);
 +
 +      caam_req->flc = &ctx->flc[ENCRYPT];
++      caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
 +      caam_req->op_type = ENCRYPT;
 +      caam_req->cbk = aead_encrypt_done;
 +      caam_req->ctx = &req->base;
@@ -17111,6 +17465,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return PTR_ERR(edesc);
 +
 +      caam_req->flc = &ctx->flc[DECRYPT];
++      caam_req->flc_dma = ctx->flc_dma[DECRYPT];
 +      caam_req->op_type = DECRYPT;
 +      caam_req->cbk = aead_decrypt_done;
 +      caam_req->ctx = &req->base;
@@ -17196,6 +17551,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return PTR_ERR(edesc);
 +
 +      caam_req->flc = &ctx->flc[ENCRYPT];
++      caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
 +      caam_req->op_type = ENCRYPT;
 +      caam_req->cbk = tls_encrypt_done;
 +      caam_req->ctx = &req->base;
@@ -17224,6 +17580,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return PTR_ERR(edesc);
 +
 +      caam_req->flc = &ctx->flc[DECRYPT];
++      caam_req->flc_dma = ctx->flc_dma[DECRYPT];
 +      caam_req->op_type = DECRYPT;
 +      caam_req->cbk = tls_decrypt_done;
 +      caam_req->ctx = &req->base;
@@ -17310,6 +17667,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return PTR_ERR(edesc);
 +
 +      caam_req->flc = &ctx->flc[ENCRYPT];
++      caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
 +      caam_req->op_type = ENCRYPT;
 +      caam_req->cbk = ablkcipher_done;
 +      caam_req->ctx = &req->base;
@@ -17339,6 +17697,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return PTR_ERR(edesc);
 +
 +      caam_req->flc = &ctx->flc[GIVENCRYPT];
++      caam_req->flc_dma = ctx->flc_dma[GIVENCRYPT];
 +      caam_req->op_type = GIVENCRYPT;
 +      caam_req->cbk = ablkcipher_done;
 +      caam_req->ctx = &req->base;
@@ -17367,6 +17726,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return PTR_ERR(edesc);
 +
 +      caam_req->flc = &ctx->flc[DECRYPT];
++      caam_req->flc_dma = ctx->flc_dma[DECRYPT];
 +      caam_req->op_type = DECRYPT;
 +      caam_req->cbk = ablkcipher_done;
 +      caam_req->ctx = &req->base;
@@ -17387,12 +17747,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      struct caam_alg_entry caam;
 +};
 +
-+static int caam_cra_init(struct crypto_tfm *tfm)
++static int caam_cra_init(struct crypto_tfm *tfm, bool uses_dkp)
 +{
 +      struct crypto_alg *alg = tfm->__crt_alg;
 +      struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
 +                                                      crypto_alg);
 +      struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
++      dma_addr_t dma_addr;
++      int i;
 +
 +      /* copy descriptor header template value */
 +      ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
@@ -17401,6 +17763,19 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +                           caam_alg->caam.class2_alg_type;
 +
 +      ctx->dev = caam_alg->caam.dev;
++      ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
++
++      dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
++                                      offsetof(struct caam_ctx, flc_dma),
++                                      ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
++      if (dma_mapping_error(ctx->dev, dma_addr)) {
++              dev_err(ctx->dev, "unable to map key, shared descriptors\n");
++              return -ENOMEM;
++      }
++
++      for (i = 0; i < NUM_OP; i++)
++              ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
++      ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
 +
 +      return 0;
 +}
@@ -17411,32 +17786,24 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
 +
 +      ablkcipher_tfm->reqsize = sizeof(struct caam_request);
-+      return caam_cra_init(tfm);
++      return caam_cra_init(tfm, false);
 +}
 +
 +static int caam_cra_init_aead(struct crypto_aead *tfm)
 +{
++      struct aead_alg *alg = crypto_aead_alg(tfm);
++
 +      crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
-+      return caam_cra_init(crypto_aead_tfm(tfm));
++      return caam_cra_init(crypto_aead_tfm(tfm),
++                           (alg->setkey == aead_setkey) ||
++                           (alg->setkey == tls_setkey));
 +}
 +
 +static void caam_exit_common(struct caam_ctx *ctx)
 +{
-+      int i;
-+
-+      for (i = 0; i < NUM_OP; i++) {
-+              if (!ctx->flc[i].flc_dma)
-+                      continue;
-+              dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
-+                               sizeof(ctx->flc[i].flc) +
-+                                      desc_bytes(ctx->flc[i].sh_desc),
-+                               DMA_TO_DEVICE);
-+      }
-+
-+      if (ctx->key_dma)
-+              dma_unmap_single(ctx->dev, ctx->key_dma,
-+                               ctx->cdata.keylen + ctx->adata.keylen_pad,
-+                               DMA_TO_DEVICE);
++      dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
++                             offsetof(struct caam_ctx, flc_dma), ctx->dir,
++                             DMA_ATTR_SKIP_CPU_SYNC);
 +}
 +
 +static void caam_cra_exit(struct crypto_tfm *tfm)
@@ -18680,131 +19047,1816 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              },
 +      },
 +      {
-+              .aead = {
-+                      .base = {
-+                              .cra_name = "authenc(hmac(sha512),"
-+                                          "rfc3686(ctr(aes)))",
-+                              .cra_driver_name = "authenc-hmac-sha512-"
-+                                                 "rfc3686-ctr-aes-caam-qi2",
-+                              .cra_blocksize = 1,
++              .aead = {
++                      .base = {
++                              .cra_name = "authenc(hmac(sha512),"
++                                          "rfc3686(ctr(aes)))",
++                              .cra_driver_name = "authenc-hmac-sha512-"
++                                                 "rfc3686-ctr-aes-caam-qi2",
++                              .cra_blocksize = 1,
++                      },
++                      .setkey = aead_setkey,
++                      .setauthsize = aead_setauthsize,
++                      .encrypt = aead_encrypt,
++                      .decrypt = aead_decrypt,
++                      .ivsize = CTR_RFC3686_IV_SIZE,
++                      .maxauthsize = SHA512_DIGEST_SIZE,
++              },
++              .caam = {
++                      .class1_alg_type = OP_ALG_ALGSEL_AES |
++                                         OP_ALG_AAI_CTR_MOD128,
++                      .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++                                         OP_ALG_AAI_HMAC_PRECOMP,
++                      .rfc3686 = true,
++              },
++      },
++      {
++              .aead = {
++                      .base = {
++                              .cra_name = "seqiv(authenc(hmac(sha512),"
++                                          "rfc3686(ctr(aes))))",
++                              .cra_driver_name = "seqiv-authenc-hmac-sha512-"
++                                                 "rfc3686-ctr-aes-caam-qi2",
++                              .cra_blocksize = 1,
++                      },
++                      .setkey = aead_setkey,
++                      .setauthsize = aead_setauthsize,
++                      .encrypt = aead_encrypt,
++                      .decrypt = aead_decrypt,
++                      .ivsize = CTR_RFC3686_IV_SIZE,
++                      .maxauthsize = SHA512_DIGEST_SIZE,
++              },
++              .caam = {
++                      .class1_alg_type = OP_ALG_ALGSEL_AES |
++                                         OP_ALG_AAI_CTR_MOD128,
++                      .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
++                                         OP_ALG_AAI_HMAC_PRECOMP,
++                      .rfc3686 = true,
++                      .geniv = true,
++              },
++      },
++      {
++              .aead = {
++                      .base = {
++                              .cra_name = "tls10(hmac(sha1),cbc(aes))",
++                              .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
++                              .cra_blocksize = AES_BLOCK_SIZE,
++                      },
++                      .setkey = tls_setkey,
++                      .setauthsize = tls_setauthsize,
++                      .encrypt = tls_encrypt,
++                      .decrypt = tls_decrypt,
++                      .ivsize = AES_BLOCK_SIZE,
++                      .maxauthsize = SHA1_DIGEST_SIZE,
++              },
++              .caam = {
++                      .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
++                      .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
++                                         OP_ALG_AAI_HMAC_PRECOMP,
++              },
++      },
++};
++
++static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
++                                            *template)
++{
++      struct caam_crypto_alg *t_alg;
++      struct crypto_alg *alg;
++
++      t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
++      if (!t_alg)
++              return ERR_PTR(-ENOMEM);
++
++      alg = &t_alg->crypto_alg;
++
++      snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
++      snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++               template->driver_name);
++      alg->cra_module = THIS_MODULE;
++      alg->cra_exit = caam_cra_exit;
++      alg->cra_priority = CAAM_CRA_PRIORITY;
++      alg->cra_blocksize = template->blocksize;
++      alg->cra_alignmask = 0;
++      alg->cra_ctxsize = sizeof(struct caam_ctx);
++      alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
++                       template->type;
++      switch (template->type) {
++      case CRYPTO_ALG_TYPE_GIVCIPHER:
++              alg->cra_init = caam_cra_init_ablkcipher;
++              alg->cra_type = &crypto_givcipher_type;
++              alg->cra_ablkcipher = template->template_ablkcipher;
++              break;
++      case CRYPTO_ALG_TYPE_ABLKCIPHER:
++              alg->cra_init = caam_cra_init_ablkcipher;
++              alg->cra_type = &crypto_ablkcipher_type;
++              alg->cra_ablkcipher = template->template_ablkcipher;
++              break;
++      }
++
++      t_alg->caam.class1_alg_type = template->class1_alg_type;
++      t_alg->caam.class2_alg_type = template->class2_alg_type;
++
++      return t_alg;
++}
++
++static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
++{
++      struct aead_alg *alg = &t_alg->aead;
++
++      alg->base.cra_module = THIS_MODULE;
++      alg->base.cra_priority = CAAM_CRA_PRIORITY;
++      alg->base.cra_ctxsize = sizeof(struct caam_ctx);
++      alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
++
++      alg->init = caam_cra_init_aead;
++      alg->exit = caam_cra_exit_aead;
++}
++
++/* max hash key is max split key size */
++#define CAAM_MAX_HASH_KEY_SIZE                (SHA512_DIGEST_SIZE * 2)
++
++#define CAAM_MAX_HASH_BLOCK_SIZE      SHA512_BLOCK_SIZE
++#define CAAM_MAX_HASH_DIGEST_SIZE     SHA512_DIGEST_SIZE
++
++#define DESC_HASH_MAX_USED_BYTES      (DESC_AHASH_FINAL_LEN + \
++                                       CAAM_MAX_HASH_KEY_SIZE)
++#define DESC_HASH_MAX_USED_LEN                (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
++
++/* caam context sizes for hashes: running digest + 8 */
++#define HASH_MSG_LEN                  8
++#define MAX_CTX_LEN                   (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
++
++enum hash_optype {
++      UPDATE = 0,
++      UPDATE_FIRST,
++      FINALIZE,
++      DIGEST,
++      HASH_NUM_OP
++};
++
++/**
++ * caam_hash_ctx - ahash per-session context
++ * @flc: Flow Contexts array
++ * @flc_dma: I/O virtual addresses of the Flow Contexts
++ * @key:  virtual address of the authentication key
++ * @dev: dpseci device
++ * @ctx_len: size of Context Register
++ * @adata: hashing algorithm details
++ */
++struct caam_hash_ctx {
++      struct caam_flc flc[HASH_NUM_OP];
++      dma_addr_t flc_dma[HASH_NUM_OP];
++      u8 key[CAAM_MAX_HASH_KEY_SIZE];
++      struct device *dev;
++      int ctx_len;
++      struct alginfo adata;
++};
++
++/* ahash state */
++struct caam_hash_state {
++      struct caam_request caam_req;
++      dma_addr_t buf_dma;
++      dma_addr_t ctx_dma;
++      u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
++      int buflen_0;
++      u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
++      int buflen_1;
++      u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
++      int (*update)(struct ahash_request *req);
++      int (*final)(struct ahash_request *req);
++      int (*finup)(struct ahash_request *req);
++      int current_buf;
++};
++
++struct caam_export_state {
++      u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
++      u8 caam_ctx[MAX_CTX_LEN];
++      int buflen;
++      int (*update)(struct ahash_request *req);
++      int (*final)(struct ahash_request *req);
++      int (*finup)(struct ahash_request *req);
++};
++
++static inline void switch_buf(struct caam_hash_state *state)
++{
++      state->current_buf ^= 1;
++}
++
++static inline u8 *current_buf(struct caam_hash_state *state)
++{
++      return state->current_buf ? state->buf_1 : state->buf_0;
++}
++
++static inline u8 *alt_buf(struct caam_hash_state *state)
++{
++      return state->current_buf ? state->buf_0 : state->buf_1;
++}
++
++static inline int *current_buflen(struct caam_hash_state *state)
++{
++      return state->current_buf ? &state->buflen_1 : &state->buflen_0;
++}
++
++static inline int *alt_buflen(struct caam_hash_state *state)
++{
++      return state->current_buf ? &state->buflen_0 : &state->buflen_1;
++}
++
++/* Map current buffer in state (if length > 0) and put it in link table */
++static inline int buf_map_to_qm_sg(struct device *dev,
++                                 struct dpaa2_sg_entry *qm_sg,
++                                 struct caam_hash_state *state)
++{
++      int buflen = *current_buflen(state);
++
++      if (!buflen)
++              return 0;
++
++      state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
++                                      DMA_TO_DEVICE);
++      if (dma_mapping_error(dev, state->buf_dma)) {
++              dev_err(dev, "unable to map buf\n");
++              state->buf_dma = 0;
++              return -ENOMEM;
++      }
++
++      dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
++
++      return 0;
++}
++
++/* Map state->caam_ctx, and add it to link table */
++static inline int ctx_map_to_qm_sg(struct device *dev,
++                                 struct caam_hash_state *state, int ctx_len,
++                                 struct dpaa2_sg_entry *qm_sg, u32 flag)
++{
++      state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
++      if (dma_mapping_error(dev, state->ctx_dma)) {
++              dev_err(dev, "unable to map ctx\n");
++              state->ctx_dma = 0;
++              return -ENOMEM;
++      }
++
++      dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
++
++      return 0;
++}
++
++static int ahash_set_sh_desc(struct crypto_ahash *ahash)
++{
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      int digestsize = crypto_ahash_digestsize(ahash);
++      struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
++      struct caam_flc *flc;
++      u32 *desc;
++
++      ctx->adata.key_virt = ctx->key;
++      ctx->adata.key_inline = true;
++
++      /* ahash_update shared descriptor */
++      flc = &ctx->flc[UPDATE];
++      desc = flc->sh_desc;
++      cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
++                        ctx->ctx_len, true, priv->sec_attr.era);
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
++                                 desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR,
++                     "ahash update shdesc@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++      /* ahash_update_first shared descriptor */
++      flc = &ctx->flc[UPDATE_FIRST];
++      desc = flc->sh_desc;
++      cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
++                        ctx->ctx_len, false, priv->sec_attr.era);
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
++                                 desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR,
++                     "ahash update first shdesc@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++      /* ahash_final shared descriptor */
++      flc = &ctx->flc[FINALIZE];
++      desc = flc->sh_desc;
++      cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
++                        ctx->ctx_len, true, priv->sec_attr.era);
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
++                                 desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR,
++                     "ahash final shdesc@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++      /* ahash_digest shared descriptor */
++      flc = &ctx->flc[DIGEST];
++      desc = flc->sh_desc;
++      cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
++                        ctx->ctx_len, false, priv->sec_attr.era);
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
++                                 desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR,
++                     "ahash digest shdesc@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++      return 0;
++}
++
++/* Digest hash size if it is too large */
++static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
++                         u32 *keylen, u8 *key_out, u32 digestsize)
++{
++      struct caam_request *req_ctx;
++      u32 *desc;
++      struct split_key_sh_result result;
++      dma_addr_t src_dma, dst_dma;
++      struct caam_flc *flc;
++      dma_addr_t flc_dma;
++      int ret = -ENOMEM;
++      struct dpaa2_fl_entry *in_fle, *out_fle;
++
++      req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
++      if (!req_ctx)
++              return -ENOMEM;
++
++      in_fle = &req_ctx->fd_flt[1];
++      out_fle = &req_ctx->fd_flt[0];
++
++      flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
++      if (!flc)
++              goto err_flc;
++
++      src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
++                               DMA_TO_DEVICE);
++      if (dma_mapping_error(ctx->dev, src_dma)) {
++              dev_err(ctx->dev, "unable to map key input memory\n");
++              goto err_src_dma;
++      }
++      dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
++                               DMA_FROM_DEVICE);
++      if (dma_mapping_error(ctx->dev, dst_dma)) {
++              dev_err(ctx->dev, "unable to map key output memory\n");
++              goto err_dst_dma;
++      }
++
++      desc = flc->sh_desc;
++
++      init_sh_desc(desc, 0);
++
++      /* descriptor to perform unkeyed hash on key_in */
++      append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
++                       OP_ALG_AS_INITFINAL);
++      append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
++                           FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
++      append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
++                       LDST_SRCDST_BYTE_CONTEXT);
++
++      flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++      flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
++                               desc_bytes(desc), DMA_TO_DEVICE);
++      if (dma_mapping_error(ctx->dev, flc_dma)) {
++              dev_err(ctx->dev, "unable to map shared descriptor\n");
++              goto err_flc_dma;
++      }
++
++      dpaa2_fl_set_final(in_fle, true);
++      dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++      dpaa2_fl_set_addr(in_fle, src_dma);
++      dpaa2_fl_set_len(in_fle, *keylen);
++      dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++      dpaa2_fl_set_addr(out_fle, dst_dma);
++      dpaa2_fl_set_len(out_fle, digestsize);
++
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
++      print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++      result.err = 0;
++      init_completion(&result.completion);
++      result.dev = ctx->dev;
++
++      req_ctx->flc = flc;
++      req_ctx->flc_dma = flc_dma;
++      req_ctx->cbk = split_key_sh_done;
++      req_ctx->ctx = &result;
++
++      ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++      if (ret == -EINPROGRESS) {
++              /* in progress */
++              wait_for_completion(&result.completion);
++              ret = result.err;
++#ifdef DEBUG
++              print_hex_dump(KERN_ERR,
++                             "digested key@" __stringify(__LINE__)": ",
++                             DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
++                             1);
++#endif
++      }
++
++      dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
++                       DMA_TO_DEVICE);
++err_flc_dma:
++      dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
++err_dst_dma:
++      dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
++err_src_dma:
++      kfree(flc);
++err_flc:
++      kfree(req_ctx);
++
++      *keylen = digestsize;
++
++      return ret;
++}
++
++static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
++                      unsigned int keylen)
++{
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
++      unsigned int digestsize = crypto_ahash_digestsize(ahash);
++      int ret;
++      u8 *hashed_key = NULL;
++
++#ifdef DEBUG
++      dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
++#endif
++
++      if (keylen > blocksize) {
++              hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
++                                         GFP_KERNEL | GFP_DMA);
++              if (!hashed_key)
++                      return -ENOMEM;
++              ret = hash_digest_key(ctx, key, &keylen, hashed_key,
++                                    digestsize);
++              if (ret)
++                      goto bad_free_key;
++              key = hashed_key;
++      }
++
++      ctx->adata.keylen = keylen;
++      ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++                                            OP_ALG_ALGSEL_MASK);
++      if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
++              goto bad_free_key;
++
++      memcpy(ctx->key, key, keylen);
++
++      kfree(hashed_key);
++      return ahash_set_sh_desc(ahash);
++bad_free_key:
++      kfree(hashed_key);
++      crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
++      return -EINVAL;
++}
++
++static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
++                             struct ahash_request *req, int dst_len)
++{
++      struct caam_hash_state *state = ahash_request_ctx(req);
++
++      if (edesc->src_nents)
++              dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
++      if (edesc->dst_dma)
++              dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
++
++      if (edesc->qm_sg_bytes)
++              dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
++                               DMA_TO_DEVICE);
++
++      if (state->buf_dma) {
++              dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
++                               DMA_TO_DEVICE);
++              state->buf_dma = 0;
++      }
++}
++
++static inline void ahash_unmap_ctx(struct device *dev,
++                                 struct ahash_edesc *edesc,
++                                 struct ahash_request *req, int dst_len,
++                                 u32 flag)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++
++      if (state->ctx_dma) {
++              dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
++              state->ctx_dma = 0;
++      }
++      ahash_unmap(dev, edesc, req, dst_len);
++}
++
++static void ahash_done(void *cbk_ctx, u32 status)
++{
++      struct crypto_async_request *areq = cbk_ctx;
++      struct ahash_request *req = ahash_request_cast(areq);
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct ahash_edesc *edesc = state->caam_req.edesc;
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      int digestsize = crypto_ahash_digestsize(ahash);
++      int ecode = 0;
++
++#ifdef DEBUG
++      dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++      if (unlikely(status)) {
++              caam_qi2_strstatus(ctx->dev, status);
++              ecode = -EIO;
++      }
++
++      ahash_unmap(ctx->dev, edesc, req, digestsize);
++      qi_cache_free(edesc);
++
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++                     ctx->ctx_len, 1);
++      if (req->result)
++              print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++                             DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++                             digestsize, 1);
++#endif
++
++      req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_bi(void *cbk_ctx, u32 status)
++{
++      struct crypto_async_request *areq = cbk_ctx;
++      struct ahash_request *req = ahash_request_cast(areq);
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct ahash_edesc *edesc = state->caam_req.edesc;
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      int ecode = 0;
++#ifdef DEBUG
++      int digestsize = crypto_ahash_digestsize(ahash);
++
++      dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++      if (unlikely(status)) {
++              caam_qi2_strstatus(ctx->dev, status);
++              ecode = -EIO;
++      }
++
++      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++      switch_buf(state);
++      qi_cache_free(edesc);
++
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++                     ctx->ctx_len, 1);
++      if (req->result)
++              print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++                             DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++                             digestsize, 1);
++#endif
++
++      req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
++{
++      struct crypto_async_request *areq = cbk_ctx;
++      struct ahash_request *req = ahash_request_cast(areq);
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct ahash_edesc *edesc = state->caam_req.edesc;
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      int digestsize = crypto_ahash_digestsize(ahash);
++      int ecode = 0;
++
++#ifdef DEBUG
++      dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++      if (unlikely(status)) {
++              caam_qi2_strstatus(ctx->dev, status);
++              ecode = -EIO;
++      }
++
++      ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
++      qi_cache_free(edesc);
++
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++                     ctx->ctx_len, 1);
++      if (req->result)
++              print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++                             DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++                             digestsize, 1);
++#endif
++
++      req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
++{
++      struct crypto_async_request *areq = cbk_ctx;
++      struct ahash_request *req = ahash_request_cast(areq);
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct ahash_edesc *edesc = state->caam_req.edesc;
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      int ecode = 0;
++#ifdef DEBUG
++      int digestsize = crypto_ahash_digestsize(ahash);
++
++      dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++      if (unlikely(status)) {
++              caam_qi2_strstatus(ctx->dev, status);
++              ecode = -EIO;
++      }
++
++      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
++      switch_buf(state);
++      qi_cache_free(edesc);
++
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++                     ctx->ctx_len, 1);
++      if (req->result)
++              print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++                             DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++                             digestsize, 1);
++#endif
++
++      req->base.complete(&req->base, ecode);
++}
++
++static int ahash_update_ctx(struct ahash_request *req)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_request *req_ctx = &state->caam_req;
++      struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++      struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                    GFP_KERNEL : GFP_ATOMIC;
++      u8 *buf = current_buf(state);
++      int *buflen = current_buflen(state);
++      u8 *next_buf = alt_buf(state);
++      int *next_buflen = alt_buflen(state), last_buflen;
++      int in_len = *buflen + req->nbytes, to_hash;
++      int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
++      struct ahash_edesc *edesc;
++      int ret = 0;
++
++      last_buflen = *next_buflen;
++      *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
++      to_hash = in_len - *next_buflen;
++
++      if (to_hash) {
++              struct dpaa2_sg_entry *sg_table;
++
++              src_nents = sg_nents_for_len(req->src,
++                                           req->nbytes - (*next_buflen));
++              if (src_nents < 0) {
++                      dev_err(ctx->dev, "Invalid number of src SG.\n");
++                      return src_nents;
++              }
++
++              if (src_nents) {
++                      mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++                                                DMA_TO_DEVICE);
++                      if (!mapped_nents) {
++                              dev_err(ctx->dev, "unable to DMA map source\n");
++                              return -ENOMEM;
++                      }
++              } else {
++                      mapped_nents = 0;
++              }
++
++              /* allocate space for base edesc and link tables */
++              edesc = qi_cache_zalloc(GFP_DMA | flags);
++              if (!edesc) {
++                      dma_unmap_sg(ctx->dev, req->src, src_nents,
++                                   DMA_TO_DEVICE);
++                      return -ENOMEM;
++              }
++
++              edesc->src_nents = src_nents;
++              qm_sg_src_index = 1 + (*buflen ? 1 : 0);
++              qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
++                            sizeof(*sg_table);
++              sg_table = &edesc->sgt[0];
++
++              ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++                                     DMA_BIDIRECTIONAL);
++              if (ret)
++                      goto unmap_ctx;
++
++              ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++              if (ret)
++                      goto unmap_ctx;
++
++              if (mapped_nents) {
++                      sg_to_qm_sg_last(req->src, mapped_nents,
++                                       sg_table + qm_sg_src_index, 0);
++                      if (*next_buflen)
++                              scatterwalk_map_and_copy(next_buf, req->src,
++                                                       to_hash - *buflen,
++                                                       *next_buflen, 0);
++              } else {
++                      dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
++                                         true);
++              }
++
++              edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++                                                qm_sg_bytes, DMA_TO_DEVICE);
++              if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++                      dev_err(ctx->dev, "unable to map S/G table\n");
++                      ret = -ENOMEM;
++                      goto unmap_ctx;
++              }
++              edesc->qm_sg_bytes = qm_sg_bytes;
++
++              memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++              dpaa2_fl_set_final(in_fle, true);
++              dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++              dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++              dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
++              dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++              dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++              dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++              req_ctx->flc = &ctx->flc[UPDATE];
++              req_ctx->flc_dma = ctx->flc_dma[UPDATE];
++              req_ctx->cbk = ahash_done_bi;
++              req_ctx->ctx = &req->base;
++              req_ctx->edesc = edesc;
++
++              ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++              if (ret != -EINPROGRESS &&
++                  !(ret == -EBUSY &&
++                    req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++                      goto unmap_ctx;
++      } else if (*next_buflen) {
++              scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
++                                       req->nbytes, 0);
++              *buflen = *next_buflen;
++              *next_buflen = last_buflen;
++      }
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
++      print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
++                     *next_buflen, 1);
++#endif
++
++      return ret;
++unmap_ctx:
++      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++      qi_cache_free(edesc);
++      return ret;
++}
++
++static int ahash_final_ctx(struct ahash_request *req)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_request *req_ctx = &state->caam_req;
++      struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++      struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                    GFP_KERNEL : GFP_ATOMIC;
++      int buflen = *current_buflen(state);
++      int qm_sg_bytes, qm_sg_src_index;
++      int digestsize = crypto_ahash_digestsize(ahash);
++      struct ahash_edesc *edesc;
++      struct dpaa2_sg_entry *sg_table;
++      int ret;
++
++      /* allocate space for base edesc and link tables */
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
++      if (!edesc)
++              return -ENOMEM;
++
++      qm_sg_src_index = 1 + (buflen ? 1 : 0);
++      qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
++      sg_table = &edesc->sgt[0];
++
++      ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++                             DMA_TO_DEVICE);
++      if (ret)
++              goto unmap_ctx;
++
++      ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++      if (ret)
++              goto unmap_ctx;
++
++      dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
++
++      edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++                                        DMA_TO_DEVICE);
++      if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++              dev_err(ctx->dev, "unable to map S/G table\n");
++              ret = -ENOMEM;
++              goto unmap_ctx;
++      }
++      edesc->qm_sg_bytes = qm_sg_bytes;
++
++      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++                                      DMA_FROM_DEVICE);
++      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++              dev_err(ctx->dev, "unable to map dst\n");
++              edesc->dst_dma = 0;
++              ret = -ENOMEM;
++              goto unmap_ctx;
++      }
++
++      memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++      dpaa2_fl_set_final(in_fle, true);
++      dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++      dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++      dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
++      dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_len(out_fle, digestsize);
++
++      req_ctx->flc = &ctx->flc[FINALIZE];
++      req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
++      req_ctx->cbk = ahash_done_ctx_src;
++      req_ctx->ctx = &req->base;
++      req_ctx->edesc = edesc;
++
++      ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++      if (ret == -EINPROGRESS ||
++          (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++              return ret;
++
++unmap_ctx:
++      ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
++      qi_cache_free(edesc);
++      return ret;
++}
++
++static int ahash_finup_ctx(struct ahash_request *req)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_request *req_ctx = &state->caam_req;
++      struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++      struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                    GFP_KERNEL : GFP_ATOMIC;
++      int buflen = *current_buflen(state);
++      int qm_sg_bytes, qm_sg_src_index;
++      int src_nents, mapped_nents;
++      int digestsize = crypto_ahash_digestsize(ahash);
++      struct ahash_edesc *edesc;
++      struct dpaa2_sg_entry *sg_table;
++      int ret;
++
++      src_nents = sg_nents_for_len(req->src, req->nbytes);
++      if (src_nents < 0) {
++              dev_err(ctx->dev, "Invalid number of src SG.\n");
++              return src_nents;
++      }
++
++      if (src_nents) {
++              mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++                                        DMA_TO_DEVICE);
++              if (!mapped_nents) {
++                      dev_err(ctx->dev, "unable to DMA map source\n");
++                      return -ENOMEM;
++              }
++      } else {
++              mapped_nents = 0;
++      }
++
++      /* allocate space for base edesc and link tables */
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
++      if (!edesc) {
++              dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++              return -ENOMEM;
++      }
++
++      edesc->src_nents = src_nents;
++      qm_sg_src_index = 1 + (buflen ? 1 : 0);
++      qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
++      sg_table = &edesc->sgt[0];
++
++      ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++                             DMA_TO_DEVICE);
++      if (ret)
++              goto unmap_ctx;
++
++      ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++      if (ret)
++              goto unmap_ctx;
++
++      sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
++
++      edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++                                        DMA_TO_DEVICE);
++      if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++              dev_err(ctx->dev, "unable to map S/G table\n");
++              ret = -ENOMEM;
++              goto unmap_ctx;
++      }
++      edesc->qm_sg_bytes = qm_sg_bytes;
++
++      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++                                      DMA_FROM_DEVICE);
++      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++              dev_err(ctx->dev, "unable to map dst\n");
++              edesc->dst_dma = 0;
++              ret = -ENOMEM;
++              goto unmap_ctx;
++      }
++
++      memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++      dpaa2_fl_set_final(in_fle, true);
++      dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++      dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++      dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
++      dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_len(out_fle, digestsize);
++
++      req_ctx->flc = &ctx->flc[FINALIZE];
++      req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
++      req_ctx->cbk = ahash_done_ctx_src;
++      req_ctx->ctx = &req->base;
++      req_ctx->edesc = edesc;
++
++      ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++      if (ret == -EINPROGRESS ||
++          (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++              return ret;
++
++unmap_ctx:
++      ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
++      qi_cache_free(edesc);
++      return ret;
++}
++
++static int ahash_digest(struct ahash_request *req)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_request *req_ctx = &state->caam_req;
++      struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++      struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                    GFP_KERNEL : GFP_ATOMIC;
++      int digestsize = crypto_ahash_digestsize(ahash);
++      int src_nents, mapped_nents;
++      struct ahash_edesc *edesc;
++      int ret = -ENOMEM;
++
++      state->buf_dma = 0;
++
++      src_nents = sg_nents_for_len(req->src, req->nbytes);
++      if (src_nents < 0) {
++              dev_err(ctx->dev, "Invalid number of src SG.\n");
++              return src_nents;
++      }
++
++      if (src_nents) {
++              mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++                                        DMA_TO_DEVICE);
++              if (!mapped_nents) {
++                      dev_err(ctx->dev, "unable to map source for DMA\n");
++                      return ret;
++              }
++      } else {
++              mapped_nents = 0;
++      }
++
++      /* allocate space for base edesc and link tables */
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
++      if (!edesc) {
++              dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++              return ret;
++      }
++
++      edesc->src_nents = src_nents;
++      memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++
++      if (mapped_nents > 1) {
++              int qm_sg_bytes;
++              struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
++
++              qm_sg_bytes = mapped_nents * sizeof(*sg_table);
++              sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
++              edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++                                                qm_sg_bytes, DMA_TO_DEVICE);
++              if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++                      dev_err(ctx->dev, "unable to map S/G table\n");
++                      goto unmap;
++              }
++              edesc->qm_sg_bytes = qm_sg_bytes;
++              dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++              dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++      } else {
++              dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++              dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
++      }
++
++      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++                                      DMA_FROM_DEVICE);
++      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++              dev_err(ctx->dev, "unable to map dst\n");
++              edesc->dst_dma = 0;
++              goto unmap;
++      }
++
++      dpaa2_fl_set_final(in_fle, true);
++      dpaa2_fl_set_len(in_fle, req->nbytes);
++      dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_len(out_fle, digestsize);
++
++      req_ctx->flc = &ctx->flc[DIGEST];
++      req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++      req_ctx->cbk = ahash_done;
++      req_ctx->ctx = &req->base;
++      req_ctx->edesc = edesc;
++      ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++      if (ret == -EINPROGRESS ||
++          (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++              return ret;
++
++unmap:
++      ahash_unmap(ctx->dev, edesc, req, digestsize);
++      qi_cache_free(edesc);
++      return ret;
++}
++
++static int ahash_final_no_ctx(struct ahash_request *req)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_request *req_ctx = &state->caam_req;
++      struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++      struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                    GFP_KERNEL : GFP_ATOMIC;
++      u8 *buf = current_buf(state);
++      int buflen = *current_buflen(state);
++      int digestsize = crypto_ahash_digestsize(ahash);
++      struct ahash_edesc *edesc;
++      int ret = -ENOMEM;
++
++      /* allocate space for base edesc and link tables */
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
++      if (!edesc)
++              return ret;
++
++      state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
++      if (dma_mapping_error(ctx->dev, state->buf_dma)) {
++              dev_err(ctx->dev, "unable to map src\n");
++              goto unmap;
++      }
++
++      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++                                      DMA_FROM_DEVICE);
++      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++              dev_err(ctx->dev, "unable to map dst\n");
++              edesc->dst_dma = 0;
++              goto unmap;
++      }
++
++      memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++      dpaa2_fl_set_final(in_fle, true);
++      dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++      dpaa2_fl_set_addr(in_fle, state->buf_dma);
++      dpaa2_fl_set_len(in_fle, buflen);
++      dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_len(out_fle, digestsize);
++
++      req_ctx->flc = &ctx->flc[DIGEST];
++      req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++      req_ctx->cbk = ahash_done;
++      req_ctx->ctx = &req->base;
++      req_ctx->edesc = edesc;
++
++      ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++      if (ret == -EINPROGRESS ||
++          (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++              return ret;
++
++unmap:
++      ahash_unmap(ctx->dev, edesc, req, digestsize);
++      qi_cache_free(edesc);
++      return ret;
++}
++
++static int ahash_update_no_ctx(struct ahash_request *req)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_request *req_ctx = &state->caam_req;
++      struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++      struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                    GFP_KERNEL : GFP_ATOMIC;
++      u8 *buf = current_buf(state);
++      int *buflen = current_buflen(state);
++      u8 *next_buf = alt_buf(state);
++      int *next_buflen = alt_buflen(state);
++      int in_len = *buflen + req->nbytes, to_hash;
++      int qm_sg_bytes, src_nents, mapped_nents;
++      struct ahash_edesc *edesc;
++      int ret = 0;
++
++      *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
++      to_hash = in_len - *next_buflen;
++
++      if (to_hash) {
++              struct dpaa2_sg_entry *sg_table;
++
++              src_nents = sg_nents_for_len(req->src,
++                                           req->nbytes - *next_buflen);
++              if (src_nents < 0) {
++                      dev_err(ctx->dev, "Invalid number of src SG.\n");
++                      return src_nents;
++              }
++
++              if (src_nents) {
++                      mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++                                                DMA_TO_DEVICE);
++                      if (!mapped_nents) {
++                              dev_err(ctx->dev, "unable to DMA map source\n");
++                              return -ENOMEM;
++                      }
++              } else {
++                      mapped_nents = 0;
++              }
++
++              /* allocate space for base edesc and link tables */
++              edesc = qi_cache_zalloc(GFP_DMA | flags);
++              if (!edesc) {
++                      dma_unmap_sg(ctx->dev, req->src, src_nents,
++                                   DMA_TO_DEVICE);
++                      return -ENOMEM;
++              }
++
++              edesc->src_nents = src_nents;
++              qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
++              sg_table = &edesc->sgt[0];
++
++              ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
++              if (ret)
++                      goto unmap_ctx;
++
++              sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
++
++              if (*next_buflen)
++                      scatterwalk_map_and_copy(next_buf, req->src,
++                                               to_hash - *buflen,
++                                               *next_buflen, 0);
++
++              edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++                                                qm_sg_bytes, DMA_TO_DEVICE);
++              if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++                      dev_err(ctx->dev, "unable to map S/G table\n");
++                      ret = -ENOMEM;
++                      goto unmap_ctx;
++              }
++              edesc->qm_sg_bytes = qm_sg_bytes;
++
++              state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
++                                              ctx->ctx_len, DMA_FROM_DEVICE);
++              if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++                      dev_err(ctx->dev, "unable to map ctx\n");
++                      state->ctx_dma = 0;
++                      ret = -ENOMEM;
++                      goto unmap_ctx;
++              }
++
++              memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++              dpaa2_fl_set_final(in_fle, true);
++              dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++              dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++              dpaa2_fl_set_len(in_fle, to_hash);
++              dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++              dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++              dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++              req_ctx->flc = &ctx->flc[UPDATE_FIRST];
++              req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
++              req_ctx->cbk = ahash_done_ctx_dst;
++              req_ctx->ctx = &req->base;
++              req_ctx->edesc = edesc;
++
++              ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++              if (ret != -EINPROGRESS &&
++                  !(ret == -EBUSY &&
++                    req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++                      goto unmap_ctx;
++
++              state->update = ahash_update_ctx;
++              state->finup = ahash_finup_ctx;
++              state->final = ahash_final_ctx;
++      } else if (*next_buflen) {
++              scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
++                                       req->nbytes, 0);
++              *buflen = *next_buflen;
++              *next_buflen = 0;
++      }
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
++      print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
++                     *next_buflen, 1);
++#endif
++
++      return ret;
++unmap_ctx:
++      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
++      qi_cache_free(edesc);
++      return ret;
++}
++
++static int ahash_finup_no_ctx(struct ahash_request *req)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_request *req_ctx = &state->caam_req;
++      struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++      struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                    GFP_KERNEL : GFP_ATOMIC;
++      int buflen = *current_buflen(state);
++      int qm_sg_bytes, src_nents, mapped_nents;
++      int digestsize = crypto_ahash_digestsize(ahash);
++      struct ahash_edesc *edesc;
++      struct dpaa2_sg_entry *sg_table;
++      int ret;
++
++      src_nents = sg_nents_for_len(req->src, req->nbytes);
++      if (src_nents < 0) {
++              dev_err(ctx->dev, "Invalid number of src SG.\n");
++              return src_nents;
++      }
++
++      if (src_nents) {
++              mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++                                        DMA_TO_DEVICE);
++              if (!mapped_nents) {
++                      dev_err(ctx->dev, "unable to DMA map source\n");
++                      return -ENOMEM;
++              }
++      } else {
++              mapped_nents = 0;
++      }
++
++      /* allocate space for base edesc and link tables */
++      edesc = qi_cache_zalloc(GFP_DMA | flags);
++      if (!edesc) {
++              dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++              return -ENOMEM;
++      }
++
++      edesc->src_nents = src_nents;
++      qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
++      sg_table = &edesc->sgt[0];
++
++      ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
++      if (ret)
++              goto unmap;
++
++      sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
++
++      edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++                                        DMA_TO_DEVICE);
++      if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++              dev_err(ctx->dev, "unable to map S/G table\n");
++              ret = -ENOMEM;
++              goto unmap;
++      }
++      edesc->qm_sg_bytes = qm_sg_bytes;
++
++      edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++                                      DMA_FROM_DEVICE);
++      if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++              dev_err(ctx->dev, "unable to map dst\n");
++              edesc->dst_dma = 0;
++              ret = -ENOMEM;
++              goto unmap;
++      }
++
++      memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++      dpaa2_fl_set_final(in_fle, true);
++      dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++      dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++      dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
++      dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++      dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++      dpaa2_fl_set_len(out_fle, digestsize);
++
++      req_ctx->flc = &ctx->flc[DIGEST];
++      req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++      req_ctx->cbk = ahash_done;
++      req_ctx->ctx = &req->base;
++      req_ctx->edesc = edesc;
++      ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++      if (ret != -EINPROGRESS &&
++          !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++              goto unmap;
++
++      return ret;
++unmap:
++      ahash_unmap(ctx->dev, edesc, req, digestsize);
++      qi_cache_free(edesc);
++      return -ENOMEM;
++}
++
++static int ahash_update_first(struct ahash_request *req)
++{
++      struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++      struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_request *req_ctx = &state->caam_req;
++      struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++      struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++      gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++                    GFP_KERNEL : GFP_ATOMIC;
++      u8 *next_buf = alt_buf(state);
++      int *next_buflen = alt_buflen(state);
++      int to_hash;
++      int src_nents, mapped_nents;
++      struct ahash_edesc *edesc;
++      int ret = 0;
++
++      *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
++                                    1);
++      to_hash = req->nbytes - *next_buflen;
++
++      if (to_hash) {
++              struct dpaa2_sg_entry *sg_table;
++
++              src_nents = sg_nents_for_len(req->src,
++                                           req->nbytes - (*next_buflen));
++              if (src_nents < 0) {
++                      dev_err(ctx->dev, "Invalid number of src SG.\n");
++                      return src_nents;
++              }
++
++              if (src_nents) {
++                      mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++                                                DMA_TO_DEVICE);
++                      if (!mapped_nents) {
++                              dev_err(ctx->dev, "unable to map source for DMA\n");
++                              return -ENOMEM;
++                      }
++              } else {
++                      mapped_nents = 0;
++              }
++
++              /* allocate space for base edesc and link tables */
++              edesc = qi_cache_zalloc(GFP_DMA | flags);
++              if (!edesc) {
++                      dma_unmap_sg(ctx->dev, req->src, src_nents,
++                                   DMA_TO_DEVICE);
++                      return -ENOMEM;
++              }
++
++              edesc->src_nents = src_nents;
++              sg_table = &edesc->sgt[0];
++
++              memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++              dpaa2_fl_set_final(in_fle, true);
++              dpaa2_fl_set_len(in_fle, to_hash);
++
++              if (mapped_nents > 1) {
++                      int qm_sg_bytes;
++
++                      sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
++                      qm_sg_bytes = mapped_nents * sizeof(*sg_table);
++                      edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++                                                        qm_sg_bytes,
++                                                        DMA_TO_DEVICE);
++                      if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++                              dev_err(ctx->dev, "unable to map S/G table\n");
++                              ret = -ENOMEM;
++                              goto unmap_ctx;
++                      }
++                      edesc->qm_sg_bytes = qm_sg_bytes;
++                      dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++                      dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++              } else {
++                      dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++                      dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
++              }
++
++              if (*next_buflen)
++                      scatterwalk_map_and_copy(next_buf, req->src, to_hash,
++                                               *next_buflen, 0);
++
++              state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
++                                              ctx->ctx_len, DMA_FROM_DEVICE);
++              if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++                      dev_err(ctx->dev, "unable to map ctx\n");
++                      state->ctx_dma = 0;
++                      ret = -ENOMEM;
++                      goto unmap_ctx;
++              }
++
++              dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++              dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++              dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++              req_ctx->flc = &ctx->flc[UPDATE_FIRST];
++              req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
++              req_ctx->cbk = ahash_done_ctx_dst;
++              req_ctx->ctx = &req->base;
++              req_ctx->edesc = edesc;
++
++              ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++              if (ret != -EINPROGRESS &&
++                  !(ret == -EBUSY && req->base.flags &
++                    CRYPTO_TFM_REQ_MAY_BACKLOG))
++                      goto unmap_ctx;
++
++              state->update = ahash_update_ctx;
++              state->finup = ahash_finup_ctx;
++              state->final = ahash_final_ctx;
++      } else if (*next_buflen) {
++              state->update = ahash_update_no_ctx;
++              state->finup = ahash_finup_no_ctx;
++              state->final = ahash_final_no_ctx;
++              scatterwalk_map_and_copy(next_buf, req->src, 0,
++                                       req->nbytes, 0);
++              switch_buf(state);
++      }
++#ifdef DEBUG
++      print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++                     DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
++#endif
++
++      return ret;
++unmap_ctx:
++      ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
++      qi_cache_free(edesc);
++      return ret;
++}
++
++static int ahash_finup_first(struct ahash_request *req)
++{
++      return ahash_digest(req);
++}
++
++static int ahash_init(struct ahash_request *req)
++{
++      struct caam_hash_state *state = ahash_request_ctx(req);
++
++      state->update = ahash_update_first;
++      state->finup = ahash_finup_first;
++      state->final = ahash_final_no_ctx;
++
++      state->ctx_dma = 0;
++      state->current_buf = 0;
++      state->buf_dma = 0;
++      state->buflen_0 = 0;
++      state->buflen_1 = 0;
++
++      return 0;
++}
++
++static int ahash_update(struct ahash_request *req)
++{
++      struct caam_hash_state *state = ahash_request_ctx(req);
++
++      return state->update(req);
++}
++
++static int ahash_finup(struct ahash_request *req)
++{
++      struct caam_hash_state *state = ahash_request_ctx(req);
++
++      return state->finup(req);
++}
++
++static int ahash_final(struct ahash_request *req)
++{
++      struct caam_hash_state *state = ahash_request_ctx(req);
++
++      return state->final(req);
++}
++
++static int ahash_export(struct ahash_request *req, void *out)
++{
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      struct caam_export_state *export = out;
++      int len;
++      u8 *buf;
++
++      if (state->current_buf) {
++              buf = state->buf_1;
++              len = state->buflen_1;
++      } else {
++              buf = state->buf_0;
++              len = state->buflen_0;
++      }
++
++      memcpy(export->buf, buf, len);
++      memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
++      export->buflen = len;
++      export->update = state->update;
++      export->final = state->final;
++      export->finup = state->finup;
++
++      return 0;
++}
++
++static int ahash_import(struct ahash_request *req, const void *in)
++{
++      struct caam_hash_state *state = ahash_request_ctx(req);
++      const struct caam_export_state *export = in;
++
++      memset(state, 0, sizeof(*state));
++      memcpy(state->buf_0, export->buf, export->buflen);
++      memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
++      state->buflen_0 = export->buflen;
++      state->update = export->update;
++      state->final = export->final;
++      state->finup = export->finup;
++
++      return 0;
++}
++
++struct caam_hash_template {
++      char name[CRYPTO_MAX_ALG_NAME];
++      char driver_name[CRYPTO_MAX_ALG_NAME];
++      char hmac_name[CRYPTO_MAX_ALG_NAME];
++      char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
++      unsigned int blocksize;
++      struct ahash_alg template_ahash;
++      u32 alg_type;
++};
++
++/* ahash descriptors */
++static struct caam_hash_template driver_hash[] = {
++      {
++              .name = "sha1",
++              .driver_name = "sha1-caam-qi2",
++              .hmac_name = "hmac(sha1)",
++              .hmac_driver_name = "hmac-sha1-caam-qi2",
++              .blocksize = SHA1_BLOCK_SIZE,
++              .template_ahash = {
++                      .init = ahash_init,
++                      .update = ahash_update,
++                      .final = ahash_final,
++                      .finup = ahash_finup,
++                      .digest = ahash_digest,
++                      .export = ahash_export,
++                      .import = ahash_import,
++                      .setkey = ahash_setkey,
++                      .halg = {
++                              .digestsize = SHA1_DIGEST_SIZE,
++                              .statesize = sizeof(struct caam_export_state),
 +                      },
-+                      .setkey = aead_setkey,
-+                      .setauthsize = aead_setauthsize,
-+                      .encrypt = aead_encrypt,
-+                      .decrypt = aead_decrypt,
-+                      .ivsize = CTR_RFC3686_IV_SIZE,
-+                      .maxauthsize = SHA512_DIGEST_SIZE,
 +              },
-+              .caam = {
-+                      .class1_alg_type = OP_ALG_ALGSEL_AES |
-+                                         OP_ALG_AAI_CTR_MOD128,
-+                      .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+                                         OP_ALG_AAI_HMAC_PRECOMP,
-+                      .rfc3686 = true,
++              .alg_type = OP_ALG_ALGSEL_SHA1,
++      }, {
++              .name = "sha224",
++              .driver_name = "sha224-caam-qi2",
++              .hmac_name = "hmac(sha224)",
++              .hmac_driver_name = "hmac-sha224-caam-qi2",
++              .blocksize = SHA224_BLOCK_SIZE,
++              .template_ahash = {
++                      .init = ahash_init,
++                      .update = ahash_update,
++                      .final = ahash_final,
++                      .finup = ahash_finup,
++                      .digest = ahash_digest,
++                      .export = ahash_export,
++                      .import = ahash_import,
++                      .setkey = ahash_setkey,
++                      .halg = {
++                              .digestsize = SHA224_DIGEST_SIZE,
++                              .statesize = sizeof(struct caam_export_state),
++                      },
 +              },
-+      },
-+      {
-+              .aead = {
-+                      .base = {
-+                              .cra_name = "seqiv(authenc(hmac(sha512),"
-+                                          "rfc3686(ctr(aes))))",
-+                              .cra_driver_name = "seqiv-authenc-hmac-sha512-"
-+                                                 "rfc3686-ctr-aes-caam-qi2",
-+                              .cra_blocksize = 1,
++              .alg_type = OP_ALG_ALGSEL_SHA224,
++      }, {
++              .name = "sha256",
++              .driver_name = "sha256-caam-qi2",
++              .hmac_name = "hmac(sha256)",
++              .hmac_driver_name = "hmac-sha256-caam-qi2",
++              .blocksize = SHA256_BLOCK_SIZE,
++              .template_ahash = {
++                      .init = ahash_init,
++                      .update = ahash_update,
++                      .final = ahash_final,
++                      .finup = ahash_finup,
++                      .digest = ahash_digest,
++                      .export = ahash_export,
++                      .import = ahash_import,
++                      .setkey = ahash_setkey,
++                      .halg = {
++                              .digestsize = SHA256_DIGEST_SIZE,
++                              .statesize = sizeof(struct caam_export_state),
 +                      },
-+                      .setkey = aead_setkey,
-+                      .setauthsize = aead_setauthsize,
-+                      .encrypt = aead_encrypt,
-+                      .decrypt = aead_decrypt,
-+                      .ivsize = CTR_RFC3686_IV_SIZE,
-+                      .maxauthsize = SHA512_DIGEST_SIZE,
 +              },
-+              .caam = {
-+                      .class1_alg_type = OP_ALG_ALGSEL_AES |
-+                                         OP_ALG_AAI_CTR_MOD128,
-+                      .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-+                                         OP_ALG_AAI_HMAC_PRECOMP,
-+                      .rfc3686 = true,
-+                      .geniv = true,
++              .alg_type = OP_ALG_ALGSEL_SHA256,
++      }, {
++              .name = "sha384",
++              .driver_name = "sha384-caam-qi2",
++              .hmac_name = "hmac(sha384)",
++              .hmac_driver_name = "hmac-sha384-caam-qi2",
++              .blocksize = SHA384_BLOCK_SIZE,
++              .template_ahash = {
++                      .init = ahash_init,
++                      .update = ahash_update,
++                      .final = ahash_final,
++                      .finup = ahash_finup,
++                      .digest = ahash_digest,
++                      .export = ahash_export,
++                      .import = ahash_import,
++                      .setkey = ahash_setkey,
++                      .halg = {
++                              .digestsize = SHA384_DIGEST_SIZE,
++                              .statesize = sizeof(struct caam_export_state),
++                      },
 +              },
-+      },
-+      {
-+              .aead = {
-+                      .base = {
-+                              .cra_name = "tls10(hmac(sha1),cbc(aes))",
-+                              .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
-+                              .cra_blocksize = AES_BLOCK_SIZE,
++              .alg_type = OP_ALG_ALGSEL_SHA384,
++      }, {
++              .name = "sha512",
++              .driver_name = "sha512-caam-qi2",
++              .hmac_name = "hmac(sha512)",
++              .hmac_driver_name = "hmac-sha512-caam-qi2",
++              .blocksize = SHA512_BLOCK_SIZE,
++              .template_ahash = {
++                      .init = ahash_init,
++                      .update = ahash_update,
++                      .final = ahash_final,
++                      .finup = ahash_finup,
++                      .digest = ahash_digest,
++                      .export = ahash_export,
++                      .import = ahash_import,
++                      .setkey = ahash_setkey,
++                      .halg = {
++                              .digestsize = SHA512_DIGEST_SIZE,
++                              .statesize = sizeof(struct caam_export_state),
 +                      },
-+                      .setkey = tls_setkey,
-+                      .setauthsize = tls_setauthsize,
-+                      .encrypt = tls_encrypt,
-+                      .decrypt = tls_decrypt,
-+                      .ivsize = AES_BLOCK_SIZE,
-+                      .maxauthsize = SHA1_DIGEST_SIZE,
 +              },
-+              .caam = {
-+                      .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-+                      .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
-+                                         OP_ALG_AAI_HMAC_PRECOMP,
++              .alg_type = OP_ALG_ALGSEL_SHA512,
++      }, {
++              .name = "md5",
++              .driver_name = "md5-caam-qi2",
++              .hmac_name = "hmac(md5)",
++              .hmac_driver_name = "hmac-md5-caam-qi2",
++              .blocksize = MD5_BLOCK_WORDS * 4,
++              .template_ahash = {
++                      .init = ahash_init,
++                      .update = ahash_update,
++                      .final = ahash_final,
++                      .finup = ahash_finup,
++                      .digest = ahash_digest,
++                      .export = ahash_export,
++                      .import = ahash_import,
++                      .setkey = ahash_setkey,
++                      .halg = {
++                              .digestsize = MD5_DIGEST_SIZE,
++                              .statesize = sizeof(struct caam_export_state),
++                      },
 +              },
-+      },
++              .alg_type = OP_ALG_ALGSEL_MD5,
++      }
 +};
 +
-+static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
-+                                            *template)
++struct caam_hash_alg {
++      struct list_head entry;
++      struct device *dev;
++      int alg_type;
++      struct ahash_alg ahash_alg;
++};
++
++static int caam_hash_cra_init(struct crypto_tfm *tfm)
++{
++      struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
++      struct crypto_alg *base = tfm->__crt_alg;
++      struct hash_alg_common *halg =
++               container_of(base, struct hash_alg_common, base);
++      struct ahash_alg *alg =
++               container_of(halg, struct ahash_alg, halg);
++      struct caam_hash_alg *caam_hash =
++               container_of(alg, struct caam_hash_alg, ahash_alg);
++      struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++      /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
++      static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
++                                       HASH_MSG_LEN + SHA1_DIGEST_SIZE,
++                                       HASH_MSG_LEN + 32,
++                                       HASH_MSG_LEN + SHA256_DIGEST_SIZE,
++                                       HASH_MSG_LEN + 64,
++                                       HASH_MSG_LEN + SHA512_DIGEST_SIZE };
++      dma_addr_t dma_addr;
++      int i;
++
++      ctx->dev = caam_hash->dev;
++
++      dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
++                                      DMA_BIDIRECTIONAL,
++                                      DMA_ATTR_SKIP_CPU_SYNC);
++      if (dma_mapping_error(ctx->dev, dma_addr)) {
++              dev_err(ctx->dev, "unable to map shared descriptors\n");
++              return -ENOMEM;
++      }
++
++      for (i = 0; i < HASH_NUM_OP; i++)
++              ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
++
++      /* copy descriptor header template value */
++      ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
++
++      ctx->ctx_len = runninglen[(ctx->adata.algtype &
++                                 OP_ALG_ALGSEL_SUBMASK) >>
++                                OP_ALG_ALGSEL_SHIFT];
++
++      crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
++                               sizeof(struct caam_hash_state));
++
++      return ahash_set_sh_desc(ahash);
++}
++
++static void caam_hash_cra_exit(struct crypto_tfm *tfm)
 +{
-+      struct caam_crypto_alg *t_alg;
++      struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++      dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
++                             DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
++}
++
++static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
++      struct caam_hash_template *template, bool keyed)
++{
++      struct caam_hash_alg *t_alg;
++      struct ahash_alg *halg;
 +      struct crypto_alg *alg;
 +
 +      t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
 +      if (!t_alg)
 +              return ERR_PTR(-ENOMEM);
 +
-+      alg = &t_alg->crypto_alg;
++      t_alg->ahash_alg = template->template_ahash;
++      halg = &t_alg->ahash_alg;
++      alg = &halg->halg.base;
 +
-+      snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
-+      snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
-+               template->driver_name);
++      if (keyed) {
++              snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
++                       template->hmac_name);
++              snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++                       template->hmac_driver_name);
++      } else {
++              snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
++                       template->name);
++              snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++                       template->driver_name);
++              t_alg->ahash_alg.setkey = NULL;
++      }
 +      alg->cra_module = THIS_MODULE;
-+      alg->cra_exit = caam_cra_exit;
++      alg->cra_init = caam_hash_cra_init;
++      alg->cra_exit = caam_hash_cra_exit;
++      alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
 +      alg->cra_priority = CAAM_CRA_PRIORITY;
 +      alg->cra_blocksize = template->blocksize;
 +      alg->cra_alignmask = 0;
-+      alg->cra_ctxsize = sizeof(struct caam_ctx);
-+      alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
-+                       template->type;
-+      switch (template->type) {
-+      case CRYPTO_ALG_TYPE_GIVCIPHER:
-+              alg->cra_init = caam_cra_init_ablkcipher;
-+              alg->cra_type = &crypto_givcipher_type;
-+              alg->cra_ablkcipher = template->template_ablkcipher;
-+              break;
-+      case CRYPTO_ALG_TYPE_ABLKCIPHER:
-+              alg->cra_init = caam_cra_init_ablkcipher;
-+              alg->cra_type = &crypto_ablkcipher_type;
-+              alg->cra_ablkcipher = template->template_ablkcipher;
-+              break;
-+      }
++      alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
++      alg->cra_type = &crypto_ahash_type;
 +
-+      t_alg->caam.class1_alg_type = template->class1_alg_type;
-+      t_alg->caam.class2_alg_type = template->class2_alg_type;
++      t_alg->alg_type = template->alg_type;
++      t_alg->dev = dev;
 +
 +      return t_alg;
 +}
 +
-+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
-+{
-+      struct aead_alg *alg = &t_alg->aead;
-+
-+      alg->base.cra_module = THIS_MODULE;
-+      alg->base.cra_priority = CAAM_CRA_PRIORITY;
-+      alg->base.cra_ctxsize = sizeof(struct caam_ctx);
-+      alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
-+
-+      alg->init = caam_cra_init_aead;
-+      alg->exit = caam_cra_exit_aead;
-+}
-+
 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
 +{
 +      struct dpaa2_caam_priv_per_cpu *ppriv;
@@ -18832,8 +20884,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              /* Register notification callbacks */
 +              err = dpaa2_io_service_register(NULL, nctx);
 +              if (unlikely(err)) {
-+                      dev_err(dev, "notification register failed\n");
++                      dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
 +                      nctx->cb = NULL;
++                      /*
++                       * If no affine DPIO for this core, there's probably
++                       * none available for next cores either. Signal we want
++                       * to retry later, in case the DPIO devices weren't
++                       * probed yet.
++                       */
++                      err = -EPROBE_DEFER;
 +                      goto err;
 +              }
 +
@@ -19265,6 +21324,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +}
 +
 +static struct list_head alg_list;
++static struct list_head hash_list;
 +
 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
 +{
@@ -19310,7 +21370,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      /* Obtain a MC portal */
 +      err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
 +      if (err) {
-+              dev_err(dev, "MC portal allocation failed\n");
++              if (err == -ENXIO)
++                      err = -EPROBE_DEFER;
++              else
++                      dev_err(dev, "MC portal allocation failed\n");
++
 +              goto err_dma_mask;
 +      }
 +
@@ -19428,6 +21492,61 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      if (registered)
 +              dev_info(dev, "algorithms registered in /proc/crypto\n");
 +
++      /* register hash algorithms the device supports */
++      INIT_LIST_HEAD(&hash_list);
++
++      /*
++       * Skip registration of any hashing algorithms if MD block
++       * is not present.
++       */
++      if (!priv->sec_attr.md_acc_num)
++              return 0;
++
++      for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
++              struct caam_hash_alg *t_alg;
++              struct caam_hash_template *alg = driver_hash + i;
++
++              /* register hmac version */
++              t_alg = caam_hash_alloc(dev, alg, true);
++              if (IS_ERR(t_alg)) {
++                      err = PTR_ERR(t_alg);
++                      dev_warn(dev, "%s hash alg allocation failed: %d\n",
++                               alg->driver_name, err);
++                      continue;
++              }
++
++              err = crypto_register_ahash(&t_alg->ahash_alg);
++              if (err) {
++                      dev_warn(dev, "%s alg registration failed: %d\n",
++                               t_alg->ahash_alg.halg.base.cra_driver_name,
++                               err);
++                      kfree(t_alg);
++              } else {
++                      list_add_tail(&t_alg->entry, &hash_list);
++              }
++
++              /* register unkeyed version */
++              t_alg = caam_hash_alloc(dev, alg, false);
++              if (IS_ERR(t_alg)) {
++                      err = PTR_ERR(t_alg);
++                      dev_warn(dev, "%s alg allocation failed: %d\n",
++                               alg->driver_name, err);
++                      continue;
++              }
++
++              err = crypto_register_ahash(&t_alg->ahash_alg);
++              if (err) {
++                      dev_warn(dev, "%s alg registration failed: %d\n",
++                               t_alg->ahash_alg.halg.base.cra_driver_name,
++                               err);
++                      kfree(t_alg);
++              } else {
++                      list_add_tail(&t_alg->entry, &hash_list);
++              }
++      }
++      if (!list_empty(&hash_list))
++              dev_info(dev, "hash algorithms registered in /proc/crypto\n");
++
 +      return err;
 +
 +err_bind:
@@ -19472,6 +21591,16 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              }
 +      }
 +
++      if (hash_list.next) {
++              struct caam_hash_alg *t_hash_alg, *p;
++
++              list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
++                      crypto_unregister_ahash(&t_hash_alg->ahash_alg);
++                      list_del(&t_hash_alg->entry);
++                      kfree(t_hash_alg);
++              }
++      }
++
 +      dpaa2_dpseci_disable(priv);
 +      dpaa2_dpseci_dpio_free(priv);
 +      dpaa2_dpseci_free(priv);
@@ -19502,7 +21631,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              }
 +      }
 +
-+      dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
++      dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
 +
 +      req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
 +                                       DMA_BIDIRECTIONAL);
@@ -19514,8 +21643,8 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      memset(&fd, 0, sizeof(fd));
 +      dpaa2_fd_set_format(&fd, dpaa2_fd_list);
 +      dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
-+      dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
-+      dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
++      dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
++      dpaa2_fd_set_flc(&fd, req->flc_dma);
 +
 +      /*
 +       * There is no guarantee that preemption is disabled here,
@@ -19571,7 +21700,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +module_fsl_mc_driver(dpaa2_caam_driver);
 --- /dev/null
 +++ b/drivers/crypto/caam/caamalg_qi2.h
-@@ -0,0 +1,265 @@
+@@ -0,0 +1,283 @@
 +/*
 + * Copyright 2015-2016 Freescale Semiconductor Inc.
 + * Copyright 2017 NXP
@@ -19728,6 +21857,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * @iv_dma: dma address of iv for checking continuity and link table
 + * @qm_sg_bytes: length of dma mapped h/w link table
 + * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen: associated data length, in CAAM endianness
 + * @assoclen_dma: bus physical mapped address of req->assoclen
 + * @sgt: the h/w link table
 + */
@@ -19737,6 +21867,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      dma_addr_t iv_dma;
 +      int qm_sg_bytes;
 +      dma_addr_t qm_sg_dma;
++      unsigned int assoclen;
 +      dma_addr_t assoclen_dma;
 +#define CAAM_QI_MAX_AEAD_SG                                           \
 +      ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /   \
@@ -19787,16 +21918,30 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      struct dpaa2_sg_entry sgt[0];
 +};
 +
++/*
++ * ahash_edesc - s/w-extended ahash descriptor
++ * @dst_dma: I/O virtual address of req->result
++ * @qm_sg_dma: I/O virtual address of h/w link table
++ * @src_nents: number of segments in input scatterlist
++ * @qm_sg_bytes: length of dma mapped qm_sg space
++ * @sgt: pointer to h/w link table
++ */
++struct ahash_edesc {
++      dma_addr_t dst_dma;
++      dma_addr_t qm_sg_dma;
++      int src_nents;
++      int qm_sg_bytes;
++      struct dpaa2_sg_entry sgt[0];
++};
++
 +/**
 + * caam_flc - Flow Context (FLC)
 + * @flc: Flow Context options
 + * @sh_desc: Shared Descriptor
-+ * @flc_dma: DMA address of the Flow Context
 + */
 +struct caam_flc {
 +      u32 flc[16];
 +      u32 sh_desc[MAX_SDLEN];
-+      dma_addr_t flc_dma;
 +} ____cacheline_aligned;
 +
 +enum optype {
@@ -19814,6 +21959,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + *          fd_flt[1] - FLE pointing to input buffer
 + * @fd_flt_dma: DMA address for the frame list table
 + * @flc: Flow Context
++ * @flc_dma: I/O virtual address of Flow Context
 + * @op_type: operation type
 + * @cbk: Callback function to invoke when job is completed
 + * @ctx: arbit context attached with request by the application
@@ -19823,6 +21969,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      struct dpaa2_fl_entry fd_flt[2];
 +      dma_addr_t fd_flt_dma;
 +      struct caam_flc *flc;
++      dma_addr_t flc_dma;
 +      enum optype op_type;
 +      void (*cbk)(void *ctx, u32 err);
 +      void *ctx;
@@ -19839,16 +21986,30 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +#endif        /* _CAAMALG_QI2_H_ */
 --- a/drivers/crypto/caam/caamhash.c
 +++ b/drivers/crypto/caam/caamhash.c
-@@ -72,7 +72,7 @@
+@@ -62,6 +62,7 @@
+ #include "error.h"
+ #include "sg_sw_sec4.h"
+ #include "key_gen.h"
++#include "caamhash_desc.h"
+ #define CAAM_CRA_PRIORITY             3000
+@@ -71,14 +72,6 @@
+ #define CAAM_MAX_HASH_BLOCK_SIZE      SHA512_BLOCK_SIZE
  #define CAAM_MAX_HASH_DIGEST_SIZE     SHA512_DIGEST_SIZE
  
- /* length of descriptors text */
+-/* length of descriptors text */
 -#define DESC_AHASH_BASE                       (4 * CAAM_CMD_SZ)
-+#define DESC_AHASH_BASE                       (3 * CAAM_CMD_SZ)
- #define DESC_AHASH_UPDATE_LEN         (6 * CAAM_CMD_SZ)
- #define DESC_AHASH_UPDATE_FIRST_LEN   (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
- #define DESC_AHASH_FINAL_LEN          (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-@@ -103,20 +103,14 @@ struct caam_hash_ctx {
+-#define DESC_AHASH_UPDATE_LEN         (6 * CAAM_CMD_SZ)
+-#define DESC_AHASH_UPDATE_FIRST_LEN   (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+-#define DESC_AHASH_FINAL_LEN          (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+-#define DESC_AHASH_FINUP_LEN          (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+-#define DESC_AHASH_DIGEST_LEN         (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+-
+ #define DESC_HASH_MAX_USED_BYTES      (DESC_AHASH_FINAL_LEN + \
+                                        CAAM_MAX_HASH_KEY_SIZE)
+ #define DESC_HASH_MAX_USED_LEN                (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+@@ -103,20 +96,15 @@ struct caam_hash_ctx {
        u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
        u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
        u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
@@ -19858,6 +22019,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        dma_addr_t sh_desc_fin_dma;
        dma_addr_t sh_desc_digest_dma;
 -      dma_addr_t sh_desc_finup_dma;
++      enum dma_data_direction dir;
        struct device *jrdev;
 -      u32 alg_type;
 -      u32 alg_op;
@@ -19870,7 +22032,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  };
  
  /* ahash state */
-@@ -143,6 +137,31 @@ struct caam_export_state {
+@@ -143,6 +131,31 @@ struct caam_export_state {
        int (*finup)(struct ahash_request *req);
  };
  
@@ -19902,7 +22064,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  /* Common job descriptor seq in/out ptr routines */
  
  /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-@@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr
+@@ -175,40 +188,31 @@ static inline dma_addr_t map_seq_out_ptr
        return dst_dma;
  }
  
@@ -19955,7 +22117,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  }
  
  /* Map state->caam_ctx, and add it to link table */
-@@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32
+-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
++static inline int ctx_map_to_sec4_sg(struct device *jrdev,
+                                    struct caam_hash_state *state, int ctx_len,
+                                    struct sec4_sg_entry *sec4_sg, u32 flag)
+ {
+@@ -224,124 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
        return 0;
  }
  
@@ -19988,39 +22155,25 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
 -}
 -
- /*
+-/*
 - * For ahash read data from seqin following state->caam_ctx,
 - * and write resulting class2 context to seqout, which may be state->caam_ctx
 - * or req->result
-+ * For ahash update, final and finup (import_ctx = true)
-+ *     import context, read and write to seqout
-+ * For ahash firsts and digest (import_ctx = false)
-+ *     read and write to seqout
-  */
+- */
 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
-+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
-+                                   struct caam_hash_ctx *ctx, bool import_ctx)
- {
+-{
 -      /* Calculate remaining bytes to read */
 -      append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-+      u32 op = ctx->adata.algtype;
-+      u32 *skip_key_load;
+-
 -      /* Read remaining bytes */
 -      append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
 -                           FIFOLD_TYPE_MSG | KEY_VLF);
-+      init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
 -      /* Store class2 context bytes */
 -      append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
 -                       LDST_SRCDST_BYTE_CONTEXT);
 -}
-+      /* Append key if it has been set; ahash update excluded */
-+      if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
-+              /* Skip key loading if already shared */
-+              skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+                                          JUMP_COND_SHRD);
+-
 -/*
 - * For ahash update, final and finup, import context, read and write to seqout
 - */
@@ -20033,60 +22186,44 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      /* Import context from software */
 -      append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
 -                 LDST_CLASS_2_CCB | ctx->ctx_len);
-+              append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
-+                                ctx->adata.keylen, CLASS_2 |
-+                                KEY_DEST_MDHA_SPLIT | KEY_ENC);
+-
 -      /* Class 2 operation */
 -      append_operation(desc, op | state | OP_ALG_ENCRYPT);
-+              set_jump_tgt_here(desc, skip_key_load);
+-
 -      /*
 -       * Load from buf and/or src and write to req->result or state->context
 -       */
 -      ahash_append_load_str(desc, digestsize);
--}
-+              op |= OP_ALG_AAI_HMAC_PRECOMP;
-+      }
--/* For ahash firsts and digest, read and write to seqout */
--static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
--                                   int digestsize, struct caam_hash_ctx *ctx)
--{
--      init_sh_desc_key_ahash(desc, ctx);
-+      /* If needed, import context from software */
-+      if (import_ctx)
-+              append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
-+                              LDST_SRCDST_BYTE_CONTEXT);
-       /* Class 2 operation */
-       append_operation(desc, op | state | OP_ALG_ENCRYPT);
-       /*
-        * Load from buf and/or src and write to req->result or state->context
-+       * Calculate remaining bytes to read
-        */
+-}
+-
+-/* For ahash firsts and digest, read and write to seqout */
+-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
+-                                   int digestsize, struct caam_hash_ctx *ctx)
+-{
+-      init_sh_desc_key_ahash(desc, ctx);
+-
+-      /* Class 2 operation */
+-      append_operation(desc, op | state | OP_ALG_ENCRYPT);
+-
+-      /*
+-       * Load from buf and/or src and write to req->result or state->context
+-       */
 -      ahash_append_load_str(desc, digestsize);
-+      append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-+      /* Read remaining bytes */
-+      append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
-+                           FIFOLD_TYPE_MSG | KEY_VLF);
-+      /* Store class2 context bytes */
-+      append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
-+                       LDST_SRCDST_BYTE_CONTEXT);
- }
+-}
+-
  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
-@@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct cryp
+ {
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        int digestsize = crypto_ahash_digestsize(ahash);
        struct device *jrdev = ctx->jrdev;
 -      u32 have_key = 0;
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
        u32 *desc;
  
 -      if (ctx->split_key_len)
 -              have_key = OP_ALG_AAI_HMAC_PRECOMP;
--
++      ctx->adata.key_virt = ctx->key;
        /* ahash_update shared descriptor */
        desc = ctx->sh_desc_update;
 -
@@ -20109,13 +22246,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -              dev_err(jrdev, "unable to map shared descriptor\n");
 -              return -ENOMEM;
 -      }
-+      ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
++      cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
++                        ctx->ctx_len, true, ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash update shdesc@"__stringify(__LINE__)": ",
-@@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct cryp
+@@ -350,17 +252,10 @@ static int ahash_set_sh_desc(struct cryp
  
        /* ahash_update_first shared descriptor */
        desc = ctx->sh_desc_update_first;
@@ -20130,13 +22268,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -              dev_err(jrdev, "unable to map shared descriptor\n");
 -              return -ENOMEM;
 -      }
-+      ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
++      cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
++                        ctx->ctx_len, false, ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash update first shdesc@"__stringify(__LINE__)": ",
-@@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct cryp
+@@ -369,53 +264,22 @@ static int ahash_set_sh_desc(struct cryp
  
        /* ahash_final shared descriptor */
        desc = ctx->sh_desc_fin;
@@ -20150,9 +22289,10 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -              dev_err(jrdev, "unable to map shared descriptor\n");
 -              return -ENOMEM;
 -      }
-+      ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
++      cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
++                        ctx->ctx_len, true, ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  #ifdef DEBUG
        print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -20190,13 +22330,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -              dev_err(jrdev, "unable to map shared descriptor\n");
 -              return -ENOMEM;
 -      }
-+      ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
++      cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
++                        ctx->ctx_len, false, ctrlpriv->era);
 +      dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
-+                                 desc_bytes(desc), DMA_TO_DEVICE);
++                                 desc_bytes(desc), ctx->dir);
  #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash digest shdesc@"__stringify(__LINE__)": ",
-@@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct cryp
+@@ -426,14 +290,6 @@ static int ahash_set_sh_desc(struct cryp
        return 0;
  }
  
@@ -20211,7 +22352,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  /* Digest hash size if it is too large */
  static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
                           u32 *keylen, u8 *key_out, u32 digestsize)
-@@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_h
+@@ -469,7 +325,7 @@ static int hash_digest_key(struct caam_h
        }
  
        /* Job descriptor to perform unkeyed hash on key_in */
@@ -20220,7 +22361,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                         OP_ALG_AS_INITFINAL);
        append_seq_in_ptr(desc, src_dma, *keylen, 0);
        append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
-@@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_h
+@@ -513,12 +369,10 @@ static int hash_digest_key(struct caam_h
  static int ahash_setkey(struct crypto_ahash *ahash,
                        const u8 *key, unsigned int keylen)
  {
@@ -20230,8 +22371,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      struct device *jrdev = ctx->jrdev;
        int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
        int digestsize = crypto_ahash_digestsize(ahash);
++      struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
        int ret;
-@@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ah
+       u8 *hashed_key = NULL;
+@@ -539,43 +393,29 @@ static int ahash_setkey(struct crypto_ah
                key = hashed_key;
        }
  
@@ -20246,12 +22390,21 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
 -                     DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 -#endif
--
++      /*
++       * If DKP is supported, use it in the shared descriptor to generate
++       * the split key.
++       */
++      if (ctrlpriv->era >= 6) {
++              ctx->adata.key_inline = true;
++              ctx->adata.keylen = keylen;
++              ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++                                                    OP_ALG_ALGSEL_MASK);
 -      ret = gen_split_hash_key(ctx, key, keylen);
-+      ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
-+                          CAAM_MAX_HASH_KEY_SIZE);
-       if (ret)
-               goto bad_free_key;
+-      if (ret)
+-              goto bad_free_key;
++              if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
++                      goto bad_free_key;
  
 -      ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
 -                                    DMA_TO_DEVICE);
@@ -20259,13 +22412,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -              dev_err(jrdev, "unable to map key i/o memory\n");
 -              ret = -ENOMEM;
 -              goto error_free_key;
--      }
- #ifdef DEBUG
-       print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
-                      DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++              memcpy(ctx->key, key, keylen);
++      } else {
++              ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
++                                  keylen, CAAM_MAX_HASH_KEY_SIZE);
++              if (ret)
++                      goto bad_free_key;
+       }
+-#ifdef DEBUG
+-      print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+-                     DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 -                     ctx->split_key_pad_len, 1);
-+                     ctx->adata.keylen_pad, 1);
- #endif
+-#endif
  
 -      ret = ahash_set_sh_desc(ahash);
 -      if (ret) {
@@ -20279,7 +22437,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
   bad_free_key:
        kfree(hashed_key);
        crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-@@ -604,6 +482,8 @@ static inline void ahash_unmap(struct de
+@@ -604,6 +444,8 @@ static inline void ahash_unmap(struct de
                        struct ahash_edesc *edesc,
                        struct ahash_request *req, int dst_len)
  {
@@ -20288,7 +22446,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (edesc->src_nents)
                dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
        if (edesc->dst_dma)
-@@ -612,6 +492,12 @@ static inline void ahash_unmap(struct de
+@@ -612,6 +454,12 @@ static inline void ahash_unmap(struct de
        if (edesc->sec4_sg_bytes)
                dma_unmap_single(dev, edesc->sec4_sg_dma,
                                 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
@@ -20301,7 +22459,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  }
  
  static inline void ahash_unmap_ctx(struct device *dev,
-@@ -643,8 +529,7 @@ static void ahash_done(struct device *jr
+@@ -643,8 +491,7 @@ static void ahash_done(struct device *jr
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  #endif
  
@@ -20311,7 +22469,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (err)
                caam_jr_strstatus(jrdev, err);
  
-@@ -671,19 +556,19 @@ static void ahash_done_bi(struct device
+@@ -671,19 +518,19 @@ static void ahash_done_bi(struct device
        struct ahash_edesc *edesc;
        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -20334,7 +22492,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        kfree(edesc);
  
  #ifdef DEBUG
-@@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct de
+@@ -713,8 +560,7 @@ static void ahash_done_ctx_src(struct de
        dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  #endif
  
@@ -20344,7 +22502,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        if (err)
                caam_jr_strstatus(jrdev, err);
  
-@@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct de
+@@ -741,19 +587,19 @@ static void ahash_done_ctx_dst(struct de
        struct ahash_edesc *edesc;
        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -20367,7 +22525,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        kfree(edesc);
  
  #ifdef DEBUG
-@@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash
+@@ -835,13 +681,12 @@ static int ahash_update_ctx(struct ahash
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct device *jrdev = ctx->jrdev;
@@ -20387,7 +22545,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        int in_len = *buflen + req->nbytes, to_hash;
        u32 *desc;
        int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
-@@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash
+@@ -890,15 +735,14 @@ static int ahash_update_ctx(struct ahash
+               edesc->src_nents = src_nents;
+               edesc->sec4_sg_bytes = sec4_sg_bytes;
+-              ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++              ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+                                        edesc->sec4_sg, DMA_BIDIRECTIONAL);
                if (ret)
                        goto unmap_ctx;
  
@@ -20401,7 +22565,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
                if (mapped_nents) {
                        sg_to_sec4_sg_last(req->src, mapped_nents,
-@@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash
+@@ -909,12 +753,10 @@ static int ahash_update_ctx(struct ahash
                                                         to_hash - *buflen,
                                                         *next_buflen, 0);
                } else {
@@ -20416,7 +22580,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                desc = edesc->hw_desc;
  
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-@@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_
+@@ -969,12 +811,9 @@ static int ahash_final_ctx(struct ahash_
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct device *jrdev = ctx->jrdev;
@@ -20432,7 +22596,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        u32 *desc;
        int sec4_sg_bytes, sec4_sg_src_index;
        int digestsize = crypto_ahash_digestsize(ahash);
-@@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_
+@@ -994,18 +833,17 @@ static int ahash_final_ctx(struct ahash_
+       desc = edesc->hw_desc;
+       edesc->sec4_sg_bytes = sec4_sg_bytes;
+-      edesc->src_nents = 0;
+-      ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++      ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+                                edesc->sec4_sg, DMA_TO_DEVICE);
        if (ret)
                goto unmap_ctx;
  
@@ -20449,7 +22621,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
-@@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1048,12 +886,9 @@ static int ahash_finup_ctx(struct ahash_
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct device *jrdev = ctx->jrdev;
@@ -20465,7 +22637,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        u32 *desc;
        int sec4_sg_src_index;
        int src_nents, mapped_nents;
-@@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1082,7 +917,7 @@ static int ahash_finup_ctx(struct ahash_
  
        /* allocate space for base edesc and hw desc commands, link tables */
        edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
@@ -20474,7 +22646,13 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                                  flags);
        if (!edesc) {
                dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
-@@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1093,14 +928,14 @@ static int ahash_finup_ctx(struct ahash_
+       edesc->src_nents = src_nents;
+-      ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++      ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+                                edesc->sec4_sg, DMA_TO_DEVICE);
        if (ret)
                goto unmap_ctx;
  
@@ -20487,7 +22665,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
                                  sec4_sg_src_index, ctx->ctx_len + buflen,
-@@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_req
+@@ -1136,15 +971,18 @@ static int ahash_digest(struct ahash_req
  {
        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -20508,7 +22686,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        src_nents = sg_nents_for_len(req->src, req->nbytes);
        if (src_nents < 0) {
                dev_err(jrdev, "Invalid number of src SG.\n");
-@@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct aha
+@@ -1215,10 +1053,10 @@ static int ahash_final_no_ctx(struct aha
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct device *jrdev = ctx->jrdev;
@@ -20523,7 +22701,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        u32 *desc;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
-@@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ah
+@@ -1246,7 +1084,6 @@ static int ahash_final_no_ctx(struct aha
+               dev_err(jrdev, "unable to map dst\n");
+               goto unmap;
+       }
+-      edesc->src_nents = 0;
+ #ifdef DEBUG
+       print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+@@ -1276,13 +1113,12 @@ static int ahash_update_no_ctx(struct ah
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct device *jrdev = ctx->jrdev;
@@ -20543,9 +22729,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        int in_len = *buflen + req->nbytes, to_hash;
        int sec4_sg_bytes, src_nents, mapped_nents;
        struct ahash_edesc *edesc;
-@@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ah
+@@ -1329,10 +1165,11 @@ static int ahash_update_no_ctx(struct ah
+               edesc->src_nents = src_nents;
                edesc->sec4_sg_bytes = sec4_sg_bytes;
-               edesc->dst_dma = 0;
+-              edesc->dst_dma = 0;
  
 -              state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
 -                                                  buf, *buflen);
@@ -20556,7 +22744,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                sg_to_sec4_sg_last(req->src, mapped_nents,
                                   edesc->sec4_sg + 1, 0);
  
-@@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ah
+@@ -1342,8 +1179,6 @@ static int ahash_update_no_ctx(struct ah
                                                 *next_buflen, 0);
                }
  
@@ -20565,7 +22753,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                desc = edesc->hw_desc;
  
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-@@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct aha
+@@ -1403,12 +1238,9 @@ static int ahash_finup_no_ctx(struct aha
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct device *jrdev = ctx->jrdev;
@@ -20581,7 +22769,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        u32 *desc;
        int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
        int digestsize = crypto_ahash_digestsize(ahash);
-@@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct aha
+@@ -1450,9 +1282,9 @@ static int ahash_finup_no_ctx(struct aha
        edesc->src_nents = src_nents;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
  
@@ -20594,7 +22782,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
                                  req->nbytes);
-@@ -1496,11 +1369,10 @@ static int ahash_update_first(struct aha
+@@ -1496,11 +1328,10 @@ static int ahash_update_first(struct aha
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct device *jrdev = ctx->jrdev;
@@ -20610,7 +22798,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        int to_hash;
        u32 *desc;
        int src_nents, mapped_nents;
-@@ -1582,6 +1454,7 @@ static int ahash_update_first(struct aha
+@@ -1545,7 +1376,6 @@ static int ahash_update_first(struct aha
+               }
+               edesc->src_nents = src_nents;
+-              edesc->dst_dma = 0;
+               ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+                                         to_hash);
+@@ -1582,6 +1412,7 @@ static int ahash_update_first(struct aha
                state->final = ahash_final_no_ctx;
                scatterwalk_map_and_copy(next_buf, req->src, 0,
                                         req->nbytes, 0);
@@ -20618,7 +22814,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }
  #ifdef DEBUG
        print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
-@@ -1688,7 +1561,6 @@ struct caam_hash_template {
+@@ -1688,7 +1519,6 @@ struct caam_hash_template {
        unsigned int blocksize;
        struct ahash_alg template_ahash;
        u32 alg_type;
@@ -20626,7 +22822,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  };
  
  /* ahash descriptors */
-@@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_
+@@ -1714,7 +1544,6 @@ static struct caam_hash_template driver_
                        },
                },
                .alg_type = OP_ALG_ALGSEL_SHA1,
@@ -20634,7 +22830,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }, {
                .name = "sha224",
                .driver_name = "sha224-caam",
-@@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_
+@@ -1736,7 +1565,6 @@ static struct caam_hash_template driver_
                        },
                },
                .alg_type = OP_ALG_ALGSEL_SHA224,
@@ -20642,7 +22838,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }, {
                .name = "sha256",
                .driver_name = "sha256-caam",
-@@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_
+@@ -1758,7 +1586,6 @@ static struct caam_hash_template driver_
                        },
                },
                .alg_type = OP_ALG_ALGSEL_SHA256,
@@ -20650,7 +22846,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }, {
                .name = "sha384",
                .driver_name = "sha384-caam",
-@@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_
+@@ -1780,7 +1607,6 @@ static struct caam_hash_template driver_
                        },
                },
                .alg_type = OP_ALG_ALGSEL_SHA384,
@@ -20658,7 +22854,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }, {
                .name = "sha512",
                .driver_name = "sha512-caam",
-@@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_
+@@ -1802,7 +1628,6 @@ static struct caam_hash_template driver_
                        },
                },
                .alg_type = OP_ALG_ALGSEL_SHA512,
@@ -20666,7 +22862,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }, {
                .name = "md5",
                .driver_name = "md5-caam",
-@@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_
+@@ -1824,14 +1649,12 @@ static struct caam_hash_template driver_
                        },
                },
                .alg_type = OP_ALG_ALGSEL_MD5,
@@ -20681,23 +22877,27 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        struct ahash_alg ahash_alg;
  };
  
-@@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct cry
+@@ -1853,6 +1676,8 @@ static int caam_hash_cra_init(struct cry
                                         HASH_MSG_LEN + SHA256_DIGEST_SIZE,
                                         HASH_MSG_LEN + 64,
                                         HASH_MSG_LEN + SHA512_DIGEST_SIZE };
 +      dma_addr_t dma_addr;
++      struct caam_drv_private *priv;
  
        /*
         * Get a Job ring from Job Ring driver to ensure in-order
-@@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct cry
+@@ -1863,11 +1688,34 @@ static int caam_hash_cra_init(struct cry
                pr_err("Job Ring Device allocation for transform failed\n");
                return PTR_ERR(ctx->jrdev);
        }
 +
++      priv = dev_get_drvdata(ctx->jrdev->parent);
++      ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
++
 +      dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
 +                                      offsetof(struct caam_hash_ctx,
 +                                               sh_desc_update_dma),
-+                                      DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++                                      ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
 +      if (dma_mapping_error(ctx->jrdev, dma_addr)) {
 +              dev_err(ctx->jrdev, "unable to map shared descriptors\n");
 +              caam_jr_free(ctx->jrdev);
@@ -20724,7 +22924,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                                  OP_ALG_ALGSEL_SHIFT];
  
        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-@@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct cr
+@@ -1879,30 +1727,10 @@ static void caam_hash_cra_exit(struct cr
  {
        struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  
@@ -20755,11 +22955,11 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
 +                             offsetof(struct caam_hash_ctx,
 +                                      sh_desc_update_dma),
-+                             DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
++                             ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
        caam_jr_free(ctx->jrdev);
  }
  
-@@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_templat
+@@ -1961,7 +1789,6 @@ caam_hash_alloc(struct caam_hash_templat
        alg->cra_type = &crypto_ahash_type;
  
        t_alg->alg_type = template->alg_type;
@@ -20767,6 +22967,169 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        return t_alg;
  }
+--- /dev/null
++++ b/drivers/crypto/caam/caamhash_desc.c
+@@ -0,0 +1,108 @@
++/*
++ * Shared descriptors for ahash algorithms
++ *
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *     * Redistributions of source code must retain the above copyright
++ *     notice, this list of conditions and the following disclaimer.
++ *     * Redistributions in binary form must reproduce the above copyright
++ *     notice, this list of conditions and the following disclaimer in the
++ *     documentation and/or other materials provided with the distribution.
++ *     * Neither the names of the above-listed copyright holders nor the
++ *     names of any contributors may be used to endorse or promote products
++ *     derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "compat.h"
++#include "desc_constr.h"
++#include "caamhash_desc.h"
++
++/**
++ * cnstr_shdsc_ahash - ahash shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @adata: pointer to authentication transform definitions.
++ *         A split key is required for SEC Era < 6; the size of the split key
++ *         is specified in this case.
++ *         Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
++ *         SHA256, SHA384, SHA512}.
++ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
++ * @digestsize: algorithm's digest size
++ * @ctx_len: size of Context Register
++ * @import_ctx: true if previous Context Register needs to be restored
++ *              must be true for ahash update and final
++ *              must be false for for ahash first and digest
++ * @era: SEC Era
++ */
++void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
++                     int digestsize, int ctx_len, bool import_ctx, int era)
++{
++      u32 op = adata->algtype;
++
++      init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++      /* Append key if it has been set; ahash update excluded */
++      if (state != OP_ALG_AS_UPDATE && adata->keylen) {
++              u32 *skip_key_load;
++
++              /* Skip key loading if already shared */
++              skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++                                          JUMP_COND_SHRD);
++
++              if (era < 6)
++                      append_key_as_imm(desc, adata->key_virt,
++                                        adata->keylen_pad,
++                                        adata->keylen, CLASS_2 |
++                                        KEY_DEST_MDHA_SPLIT | KEY_ENC);
++              else
++                      append_proto_dkp(desc, adata);
++
++              set_jump_tgt_here(desc, skip_key_load);
++
++              op |= OP_ALG_AAI_HMAC_PRECOMP;
++      }
++
++      /* If needed, import context from software */
++      if (import_ctx)
++              append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
++                              LDST_SRCDST_BYTE_CONTEXT);
++
++      /* Class 2 operation */
++      append_operation(desc, op | state | OP_ALG_ENCRYPT);
++
++      /*
++       * Load from buf and/or src and write to req->result or state->context
++       * Calculate remaining bytes to read
++       */
++      append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++      /* Read remaining bytes */
++      append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
++                           FIFOLD_TYPE_MSG | KEY_VLF);
++      /* Store class2 context bytes */
++      append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
++                       LDST_SRCDST_BYTE_CONTEXT);
++}
++EXPORT_SYMBOL(cnstr_shdsc_ahash);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
++MODULE_AUTHOR("NXP Semiconductors");
+--- /dev/null
++++ b/drivers/crypto/caam/caamhash_desc.h
+@@ -0,0 +1,49 @@
++/*
++ * Shared descriptors for ahash algorithms
++ *
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *     * Redistributions of source code must retain the above copyright
++ *     notice, this list of conditions and the following disclaimer.
++ *     * Redistributions in binary form must reproduce the above copyright
++ *     notice, this list of conditions and the following disclaimer in the
++ *     documentation and/or other materials provided with the distribution.
++ *     * Neither the names of the above-listed copyright holders nor the
++ *     names of any contributors may be used to endorse or promote products
++ *     derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _CAAMHASH_DESC_H_
++#define _CAAMHASH_DESC_H_
++
++/* length of descriptors text */
++#define DESC_AHASH_BASE                       (3 * CAAM_CMD_SZ)
++#define DESC_AHASH_UPDATE_LEN         (6 * CAAM_CMD_SZ)
++#define DESC_AHASH_UPDATE_FIRST_LEN   (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
++#define DESC_AHASH_FINAL_LEN          (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
++#define DESC_AHASH_DIGEST_LEN         (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
++
++void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
++                     int digestsize, int ctx_len, bool import_ctx, int era);
++
++#endif /* _CAAMHASH_DESC_H_ */
 --- a/drivers/crypto/caam/caampkc.c
 +++ b/drivers/crypto/caam/caampkc.c
 @@ -18,6 +18,10 @@
@@ -21545,7 +23908,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
  /*
   * Descriptor to instantiate RNG State Handle 0 in normal mode and
-@@ -270,7 +271,7 @@ static int deinstantiate_rng(struct devi
+@@ -274,7 +275,7 @@ static int deinstantiate_rng(struct devi
                /*
                 * If the corresponding bit is set, then it means the state
                 * handle was initialized by us, and thus it needs to be
@@ -21554,7 +23917,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                 */
                if ((1 << sh_idx) & state_handle_mask) {
                        /*
-@@ -303,20 +304,24 @@ static int caam_remove(struct platform_d
+@@ -307,20 +308,24 @@ static int caam_remove(struct platform_d
        struct device *ctrldev;
        struct caam_drv_private *ctrlpriv;
        struct caam_ctrl __iomem *ctrl;
@@ -21587,7 +23950,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
  
        /* Shut down debug views */
-@@ -331,8 +336,8 @@ static int caam_remove(struct platform_d
+@@ -335,8 +340,8 @@ static int caam_remove(struct platform_d
        clk_disable_unprepare(ctrlpriv->caam_ipg);
        clk_disable_unprepare(ctrlpriv->caam_mem);
        clk_disable_unprepare(ctrlpriv->caam_aclk);
@@ -21598,7 +23961,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        return 0;
  }
  
-@@ -366,11 +371,8 @@ static void kick_trng(struct platform_de
+@@ -370,11 +375,8 @@ static void kick_trng(struct platform_de
         */
        val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
              >> RTSDCTL_ENT_DLY_SHIFT;
@@ -21612,7 +23975,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        val = rd_reg32(&r4tst->rtsdctl);
        val = (val & ~RTSDCTL_ENT_DLY_MASK) |
-@@ -382,15 +384,12 @@ static void kick_trng(struct platform_de
+@@ -386,15 +388,12 @@ static void kick_trng(struct platform_de
        wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
        /* read the control register */
        val = rd_reg32(&r4tst->rtmctl);
@@ -21631,7 +23994,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  }
  
  /**
-@@ -411,28 +410,26 @@ int caam_get_era(void)
+@@ -415,28 +414,26 @@ int caam_get_era(void)
  }
  EXPORT_SYMBOL(caam_get_era);
  
@@ -21675,7 +24038,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        struct device *dev;
        struct device_node *nprop, *np;
        struct caam_ctrl __iomem *ctrl;
-@@ -452,9 +449,10 @@ static int caam_probe(struct platform_de
+@@ -456,9 +453,10 @@ static int caam_probe(struct platform_de
  
        dev = &pdev->dev;
        dev_set_drvdata(dev, ctrlpriv);
@@ -21687,7 +24050,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        /* Enable clocking */
        clk = caam_drv_identify_clk(&pdev->dev, "ipg");
        if (IS_ERR(clk)) {
-@@ -483,14 +481,16 @@ static int caam_probe(struct platform_de
+@@ -487,14 +485,16 @@ static int caam_probe(struct platform_de
        }
        ctrlpriv->caam_aclk = clk;
  
@@ -21711,7 +24074,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        ret = clk_prepare_enable(ctrlpriv->caam_ipg);
        if (ret < 0) {
-@@ -511,11 +511,13 @@ static int caam_probe(struct platform_de
+@@ -515,11 +515,13 @@ static int caam_probe(struct platform_de
                goto disable_caam_mem;
        }
  
@@ -21730,7 +24093,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }
  
        /* Get configuration properties from device tree */
-@@ -542,13 +544,13 @@ static int caam_probe(struct platform_de
+@@ -546,13 +548,13 @@ static int caam_probe(struct platform_de
        else
                BLOCK_OFFSET = PG_SIZE_64K;
  
@@ -21749,7 +24112,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                         BLOCK_OFFSET * DECO_BLOCK_NUMBER
                         );
  
-@@ -557,12 +559,17 @@ static int caam_probe(struct platform_de
+@@ -561,12 +563,17 @@ static int caam_probe(struct platform_de
  
        /*
         * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
@@ -21772,7 +24135,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        /*
         *  Read the Compile Time paramters and SCFGR to determine
-@@ -590,64 +597,67 @@ static int caam_probe(struct platform_de
+@@ -594,64 +601,69 @@ static int caam_probe(struct platform_de
                              JRSTART_JR1_START | JRSTART_JR2_START |
                              JRSTART_JR3_START);
  
@@ -21788,7 +24151,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -                      dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
 -      else
 -              dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
--
++                      ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
++      } else {
++              ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
++      }
++      if (ret) {
++              dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
++              goto iounmap_ctrl;
++      }
 -      /*
 -       * Detect and enable JobRs
 -       * First, find out how many ring spec'ed, allocate references
@@ -21799,14 +24170,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -              if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
 -                  of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
 -                      rspec++;
-+                      ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
-+      } else {
-+              ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-+      }
-+      if (ret) {
-+              dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
-+              goto iounmap_ctrl;
-+      }
++      ctrlpriv->era = caam_get_era();
  
 -      ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
 -                                      sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
@@ -21879,7 +24243,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        }
  
        /* If no QI and no rings specified, quit and go home */
-@@ -662,8 +672,10 @@ static int caam_probe(struct platform_de
+@@ -666,8 +678,10 @@ static int caam_probe(struct platform_de
        /*
         * If SEC has RNG version >= 4 and RNG state handle has not been
         * already instantiated, do RNG instantiation
@@ -21891,12 +24255,14 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                ctrlpriv->rng4_sh_init =
                        rd_reg32(&ctrl->r4tst[0].rdsta);
                /*
-@@ -731,77 +743,46 @@ static int caam_probe(struct platform_de
+@@ -734,78 +748,47 @@ static int caam_probe(struct platform_de
        /* Report "alive" for developer to see */
        dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
-                caam_get_era());
+-               caam_get_era());
 -      dev_info(dev, "job rings = %d, qi = %d\n",
 -               ctrlpriv->total_jobrs, ctrlpriv->qi_present);
++               ctrlpriv->era);
 +      dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
 +               ctrlpriv->total_jobrs, ctrlpriv->qi_present,
 +               caam_dpaa2 ? "yes" : "no");
@@ -22003,7 +24369,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
        ctrlpriv->ctl_kek = debugfs_create_blob("kek",
                                                S_IRUSR |
-@@ -809,7 +790,7 @@ static int caam_probe(struct platform_de
+@@ -813,7 +796,7 @@ static int caam_probe(struct platform_de
                                                ctrlpriv->ctl,
                                                &ctrlpriv->ctl_kek_wrap);
  
@@ -22012,7 +24378,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
        ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
                                                 S_IRUSR |
-@@ -817,7 +798,7 @@ static int caam_probe(struct platform_de
+@@ -821,7 +804,7 @@ static int caam_probe(struct platform_de
                                                 ctrlpriv->ctl,
                                                 &ctrlpriv->ctl_tkek_wrap);
  
@@ -22021,7 +24387,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
        ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
                                                 S_IRUSR |
-@@ -828,13 +809,17 @@ static int caam_probe(struct platform_de
+@@ -832,13 +815,17 @@ static int caam_probe(struct platform_de
        return 0;
  
  caam_remove:
@@ -22040,7 +24406,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  disable_caam_aclk:
        clk_disable_unprepare(ctrlpriv->caam_aclk);
  disable_caam_mem:
-@@ -844,17 +829,6 @@ disable_caam_ipg:
+@@ -848,17 +835,6 @@ disable_caam_ipg:
        return ret;
  }
  
@@ -22122,7 +24488,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
  #define LDST_SRCDST_WORD_PKHA_A_SZ    (0x10 << LDST_SRCDST_SHIFT)
  #define LDST_SRCDST_WORD_PKHA_B_SZ    (0x11 << LDST_SRCDST_SHIFT)
-@@ -400,7 +395,7 @@ struct sec4_sg_entry {
+@@ -360,6 +355,7 @@ struct sec4_sg_entry {
+ #define FIFOLD_TYPE_PK_N      (0x08 << FIFOLD_TYPE_SHIFT)
+ #define FIFOLD_TYPE_PK_A      (0x0c << FIFOLD_TYPE_SHIFT)
+ #define FIFOLD_TYPE_PK_B      (0x0d << FIFOLD_TYPE_SHIFT)
++#define FIFOLD_TYPE_IFIFO     (0x0f << FIFOLD_TYPE_SHIFT)
+ /* Other types. Need to OR in last/flush bits as desired */
+ #define FIFOLD_TYPE_MSG_MASK  (0x38 << FIFOLD_TYPE_SHIFT)
+@@ -400,7 +396,7 @@ struct sec4_sg_entry {
  #define FIFOST_TYPE_PKHA_N     (0x08 << FIFOST_TYPE_SHIFT)
  #define FIFOST_TYPE_PKHA_A     (0x0c << FIFOST_TYPE_SHIFT)
  #define FIFOST_TYPE_PKHA_B     (0x0d << FIFOST_TYPE_SHIFT)
@@ -22131,7 +24505,57 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
  #define FIFOST_TYPE_PKHA_E_JKEK        (0x22 << FIFOST_TYPE_SHIFT)
  #define FIFOST_TYPE_PKHA_E_TKEK        (0x23 << FIFOST_TYPE_SHIFT)
-@@ -1107,8 +1102,8 @@ struct sec4_sg_entry {
+@@ -413,6 +409,7 @@ struct sec4_sg_entry {
+ #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_RNGSTORE   (0x34 << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_RNGFIFO    (0x35 << FIFOST_TYPE_SHIFT)
++#define FIFOST_TYPE_METADATA   (0x3e << FIFOST_TYPE_SHIFT)
+ #define FIFOST_TYPE_SKIP       (0x3f << FIFOST_TYPE_SHIFT)
+ /*
+@@ -449,6 +446,18 @@ struct sec4_sg_entry {
+ #define OP_PCLID_DSAVERIFY    (0x16 << OP_PCLID_SHIFT)
+ #define OP_PCLID_RSAENC_PUBKEY  (0x18 << OP_PCLID_SHIFT)
+ #define OP_PCLID_RSADEC_PRVKEY  (0x19 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_MD5      (0x20 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA1     (0x21 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA224   (0x22 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA256   (0x23 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA384   (0x24 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA512   (0x25 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_MD5  (0x60 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA224       (0x62 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA256       (0x63 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA384       (0x64 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA512       (0x65 << OP_PCLID_SHIFT)
+ /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+ #define OP_PCLID_IPSEC                (0x01 << OP_PCLID_SHIFT)
+@@ -1098,6 +1107,22 @@ struct sec4_sg_entry {
+ /* MacSec protinfos */
+ #define OP_PCL_MACSEC                          0x0001
++/* Derived Key Protocol (DKP) Protinfo */
++#define OP_PCL_DKP_SRC_SHIFT  14
++#define OP_PCL_DKP_SRC_MASK   (3 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_IMM    (0 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_SEQ    (1 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_PTR    (2 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_SGF    (3 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_DST_SHIFT  12
++#define OP_PCL_DKP_DST_MASK   (3 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_IMM    (0 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_SEQ    (1 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_PTR    (2 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_SGF    (3 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_KEY_SHIFT  0
++#define OP_PCL_DKP_KEY_MASK   (0xfff << OP_PCL_DKP_KEY_SHIFT)
++
+ /* PKI unidirectional protocol protinfo bits */
+ #define OP_PCL_PKPROT_TEST                     0x0008
+ #define OP_PCL_PKPROT_DECRYPT                  0x0004
+@@ -1107,8 +1132,8 @@ struct sec4_sg_entry {
  /* For non-protocol/alg-only op commands */
  #define OP_ALG_TYPE_SHIFT     24
  #define OP_ALG_TYPE_MASK      (0x7 << OP_ALG_TYPE_SHIFT)
@@ -22142,7 +24566,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
  #define OP_ALG_ALGSEL_SHIFT   16
  #define OP_ALG_ALGSEL_MASK    (0xff << OP_ALG_ALGSEL_SHIFT)
-@@ -1249,7 +1244,7 @@ struct sec4_sg_entry {
+@@ -1249,7 +1274,7 @@ struct sec4_sg_entry {
  #define OP_ALG_PKMODE_MOD_PRIMALITY   0x00f
  
  /* PKHA mode copy-memory functions */
@@ -22151,7 +24575,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  #define OP_ALG_PKMODE_SRC_REG_MASK    (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
  #define OP_ALG_PKMODE_DST_REG_SHIFT   10
  #define OP_ALG_PKMODE_DST_REG_MASK    (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
-@@ -1445,7 +1440,7 @@ struct sec4_sg_entry {
+@@ -1445,10 +1470,11 @@ struct sec4_sg_entry {
  #define MATH_SRC1_REG2                (0x02 << MATH_SRC1_SHIFT)
  #define MATH_SRC1_REG3                (0x03 << MATH_SRC1_SHIFT)
  #define MATH_SRC1_IMM         (0x04 << MATH_SRC1_SHIFT)
@@ -22160,7 +24584,19 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  #define MATH_SRC1_INFIFO      (0x0a << MATH_SRC1_SHIFT)
  #define MATH_SRC1_OUTFIFO     (0x0b << MATH_SRC1_SHIFT)
  #define MATH_SRC1_ONE         (0x0c << MATH_SRC1_SHIFT)
-@@ -1629,4 +1624,31 @@ struct sec4_sg_entry {
++#define MATH_SRC1_ZERO                (0x0f << MATH_SRC1_SHIFT)
+ /* Destination selectors */
+ #define MATH_DEST_SHIFT               8
+@@ -1457,6 +1483,7 @@ struct sec4_sg_entry {
+ #define MATH_DEST_REG1                (0x01 << MATH_DEST_SHIFT)
+ #define MATH_DEST_REG2                (0x02 << MATH_DEST_SHIFT)
+ #define MATH_DEST_REG3                (0x03 << MATH_DEST_SHIFT)
++#define MATH_DEST_DPOVRD      (0x07 << MATH_DEST_SHIFT)
+ #define MATH_DEST_SEQINLEN    (0x08 << MATH_DEST_SHIFT)
+ #define MATH_DEST_SEQOUTLEN   (0x09 << MATH_DEST_SHIFT)
+ #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+@@ -1629,4 +1656,31 @@ struct sec4_sg_entry {
  /* Frame Descriptor Command for Replacement Job Descriptor */
  #define FD_CMD_REPLACE_JOB_DESC                               0x20000000
  
@@ -22291,7 +24727,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  }
  
 -static inline void append_data(u32 *desc, void *data, int len)
-+static inline void append_data(u32 * const desc, void *data, int len)
++static inline void append_data(u32 * const desc, const void *data, int len)
  {
        u32 *offset = desc_end(desc);
  
@@ -22344,7 +24780,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  }
  
 -static inline void append_cmd_data(u32 *desc, void *data, int len,
-+static inline void append_cmd_data(u32 * const desc, void *data, int len,
++static inline void append_cmd_data(u32 * const desc, const void *data, int len,
                                   u32 command)
  {
        append_cmd(desc, command | IMMEDIATE | len);
@@ -22431,7 +24867,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
  #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
-+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
++static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
                                         unsigned int len, u32 options) \
  { \
        PRINT_POS; \
@@ -22458,7 +24894,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
   */
  #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
-+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
++static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
                                         unsigned int data_len, \
                                         unsigned int len, u32 options) \
  { \
@@ -22471,7 +24907,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
                                             u32 options) \
  { \
        PRINT_POS; \
-@@ -426,3 +434,66 @@ do { \
+@@ -426,3 +434,107 @@ do { \
        APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
  #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
        APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
@@ -22493,7 +24929,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      unsigned int keylen_pad;
 +      union {
 +              dma_addr_t key_dma;
-+              void *key_virt;
++              const void *key_virt;
 +      };
 +      bool key_inline;
 +};
@@ -22537,10 +24973,51 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +      return (rem_bytes >= 0) ? 0 : -1;
 +}
 +
++/**
++ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
++ * @desc: pointer to buffer used for descriptor construction
++ * @adata: pointer to authentication transform definitions.
++ *         keylen should be the length of initial key, while keylen_pad
++ *         the length of the derived (split) key.
++ *         Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
++ *         SHA256, SHA384, SHA512}.
++ */
++static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
++{
++      u32 protid;
++
++      /*
++       * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
++       * to OP_PCLID_DKP_{MD5, SHA*}
++       */
++      protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
++               (0x20 << OP_ALG_ALGSEL_SHIFT);
++
++      if (adata->key_inline) {
++              int words;
++
++              append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
++                               OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
++                               adata->keylen);
++              append_data(desc, adata->key_virt, adata->keylen);
++
++              /* Reserve space in descriptor buffer for the derived key */
++              words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
++                       ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
++              if (words)
++                      (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
++      } else {
++              append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
++                               OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
++                               adata->keylen);
++              append_ptr(desc, adata->key_dma);
++      }
++}
++
 +#endif /* DESC_CONSTR_H */
 --- /dev/null
 +++ b/drivers/crypto/caam/dpseci.c
-@@ -0,0 +1,859 @@
+@@ -0,0 +1,858 @@
 +/*
 + * Copyright 2013-2016 Freescale Semiconductor Inc.
 + * Copyright 2017 NXP
@@ -22575,8 +25052,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + * POSSIBILITY OF SUCH DAMAGE.
 + */
 +
-+#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
-+#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
++#include <linux/fsl/mc.h>
 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
 +#include "dpseci.h"
 +#include "dpseci_cmd.h"
@@ -22600,7 +25076,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
 +              u16 *token)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_open *cmd_params;
 +      int err;
 +
@@ -22631,7 +25107,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + */
 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
 +                                        cmd_flags,
@@ -22664,7 +25140,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
 +                const struct dpseci_cfg *cfg, u32 *obj_id)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_create *cmd_params;
 +      int i, err;
 +
@@ -22704,7 +25180,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
 +                 u32 object_id)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_destroy *cmd_params;
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
@@ -22726,7 +25202,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + */
 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
 +                                        cmd_flags,
@@ -22744,7 +25220,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + */
 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
 +                                        cmd_flags,
@@ -22765,7 +25241,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                    int *en)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_rsp_is_enabled *rsp_params;
 +      int err;
 +
@@ -22792,7 +25268,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 + */
 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
 +                                        cmd_flags,
@@ -22814,7 +25290,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                        u8 irq_index, u8 *en)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_irq_enable *cmd_params;
 +      struct dpseci_rsp_get_irq_enable *rsp_params;
 +      int err;
@@ -22852,7 +25328,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                        u8 irq_index, u8 en)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_irq_enable *cmd_params;
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
@@ -22881,7 +25357,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                      u8 irq_index, u32 *mask)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_irq_mask *cmd_params;
 +      int err;
 +
@@ -22918,7 +25394,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                      u8 irq_index, u32 mask)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_irq_mask *cmd_params;
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
@@ -22946,7 +25422,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                        u8 irq_index, u32 *status)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_irq_status *cmd_params;
 +      int err;
 +
@@ -22980,7 +25456,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                          u8 irq_index, u32 status)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_irq_status *cmd_params;
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
@@ -23005,7 +25481,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                        struct dpseci_attr *attr)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_rsp_get_attributes *rsp_params;
 +      int err;
 +
@@ -23040,7 +25516,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                      u8 queue, const struct dpseci_rx_queue_cfg *cfg)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_queue *cmd_params;
 +
 +      cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
@@ -23073,7 +25549,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                      u8 queue, struct dpseci_rx_queue_attr *attr)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_queue *cmd_params;
 +      int err;
 +
@@ -23111,7 +25587,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                      u8 queue, struct dpseci_tx_queue_attr *attr)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_queue *cmd_params;
 +      struct dpseci_rsp_get_tx_queue *rsp_params;
 +      int err;
@@ -23144,7 +25620,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                      struct dpseci_sec_attr *attr)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_rsp_get_sec_attr *rsp_params;
 +      int err;
 +
@@ -23189,7 +25665,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
 +                          struct dpseci_sec_counters *counters)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_rsp_get_sec_counters *rsp_params;
 +      int err;
 +
@@ -23225,7 +25701,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
 +                         u16 *major_ver, u16 *minor_ver)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_rsp_get_api_version *rsp_params;
 +      int err;
 +
@@ -23257,7 +25733,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
 +                 u8 options, struct opr_cfg *cfg)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_opr *cmd_params;
 +
 +      cmd.header = mc_encode_cmd_header(
@@ -23290,7 +25766,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
 +                 struct opr_cfg *cfg, struct opr_qry *qry)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_opr *cmd_params;
 +      struct dpseci_rsp_get_opr *rsp_params;
 +      int err;
@@ -23339,7 +25815,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
 +      u16 token, const struct dpseci_congestion_notification_cfg *cfg)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_congestion_notification *cmd_params;
 +
 +      cmd.header = mc_encode_cmd_header(
@@ -23374,7 +25850,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
 +      u16 token, struct dpseci_congestion_notification_cfg *cfg)
 +{
-+      struct mc_command cmd = { 0 };
++      struct fsl_mc_command cmd = { 0 };
 +      struct dpseci_cmd_congestion_notification *rsp_params;
 +      int err;
 +
@@ -24260,15 +26736,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  #endif /* CAAM_ERROR_H */
 --- a/drivers/crypto/caam/intern.h
 +++ b/drivers/crypto/caam/intern.h
-@@ -41,6 +41,7 @@ struct caam_drv_private_jr {
-       struct device           *dev;
-       int ridx;
-       struct caam_job_ring __iomem *rregs;    /* JobR's register space */
-+      struct tasklet_struct irqtask;
-       int irq;                        /* One per queue */
-       /* Number of scatterlist crypt transforms active on the JobR */
-@@ -63,10 +64,9 @@ struct caam_drv_private_jr {
+@@ -64,10 +64,9 @@ struct caam_drv_private_jr {
   * Driver-private storage for a single CAAM block instance
   */
  struct caam_drv_private {
@@ -24282,7 +26750,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        /* Physical-presence section */
        struct caam_ctrl __iomem *ctrl; /* controller region */
-@@ -102,11 +102,6 @@ struct caam_drv_private {
+@@ -84,6 +83,7 @@ struct caam_drv_private {
+       u8 qi_present;          /* Nonzero if QI present in device */
+       int secvio_irq;         /* Security violation interrupt number */
+       int virt_en;            /* Virtualization enabled in CAAM */
++      int era;                /* CAAM Era (internal HW revision) */
+ #define       RNG4_MAX_HANDLES 2
+       /* RNG4 block */
+@@ -103,11 +103,6 @@ struct caam_drv_private {
  #ifdef CONFIG_DEBUG_FS
        struct dentry *dfs_root;
        struct dentry *ctl; /* controller dir */
@@ -24294,7 +26770,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
        struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
  #endif
-@@ -114,4 +109,22 @@ struct caam_drv_private {
+@@ -115,4 +110,22 @@ struct caam_drv_private {
  
  void caam_jr_algapi_init(struct device *dev);
  void caam_jr_algapi_remove(struct device *dev);
@@ -24327,79 +26803,68 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  #include "regs.h"
  #include "jr.h"
  #include "desc.h"
-@@ -73,6 +74,8 @@ static int caam_jr_shutdown(struct devic
+@@ -22,6 +23,14 @@ struct jr_driver_data {
  
      ret = caam_reset_hw_jr(dev);
static struct jr_driver_data driver_data;
  
-+      tasklet_kill(&jrp->irqtask);
++static int jr_driver_probed;
 +
-       /* Release interrupt */
-       free_irq(jrp->irq, dev);
-@@ -128,7 +131,7 @@ static irqreturn_t caam_jr_interrupt(int
-       /*
-        * Check the output ring for ready responses, kick
--       * the threaded irq if jobs done.
-+       * tasklet if jobs done.
-        */
-       irqstate = rd_reg32(&jrp->rregs->jrintstatus);
-       if (!irqstate)
-@@ -150,13 +153,18 @@ static irqreturn_t caam_jr_interrupt(int
-       /* Have valid interrupt at this point, just ACK and trigger */
-       wr_reg32(&jrp->rregs->jrintstatus, irqstate);
--      return IRQ_WAKE_THREAD;
-+      preempt_disable();
-+      tasklet_schedule(&jrp->irqtask);
-+      preempt_enable();
++int caam_jr_driver_probed(void)
++{
++      return jr_driver_probed;
++}
++EXPORT_SYMBOL(caam_jr_driver_probed);
 +
-+      return IRQ_HANDLED;
- }
--static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
-+/* Deferred service handler, run as interrupt-fired tasklet */
-+static void caam_jr_dequeue(unsigned long devarg)
+ static int caam_reset_hw_jr(struct device *dev)
  {
-       int hw_idx, sw_idx, i, head, tail;
--      struct device *dev = st_dev;
-+      struct device *dev = (struct device *)devarg;
        struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
-       void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
-       u32 *userdesc, userstatus;
-@@ -230,8 +238,6 @@ static irqreturn_t caam_jr_threadirq(int
+@@ -118,6 +127,8 @@ static int caam_jr_remove(struct platfor
+               dev_err(jrdev, "Failed to shut down job ring\n");
+       irq_dispose_mapping(jrpriv->irq);
  
-       /* reenable / unmask IRQs */
-       clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
--
--      return IRQ_HANDLED;
++      jr_driver_probed--;
++
+       return ret;
  }
  
- /**
-@@ -389,10 +395,11 @@ static int caam_jr_init(struct device *d
-       jrp = dev_get_drvdata(dev);
-+      tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
-+
-       /* Connect job ring interrupt handler. */
--      error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
--                                   caam_jr_threadirq, IRQF_SHARED,
--                                   dev_name(dev), dev);
-+      error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
-+                          dev_name(dev), dev);
-       if (error) {
-               dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
-                       jrp->ridx, jrp->irq);
-@@ -454,6 +461,7 @@ out_free_inpring:
- out_free_irq:
-       free_irq(jrp->irq, dev);
- out_kill_deq:
-+      tasklet_kill(&jrp->irqtask);
-       return error;
- }
+@@ -281,6 +292,36 @@ struct device *caam_jr_alloc(void)
+ EXPORT_SYMBOL(caam_jr_alloc);
  
-@@ -489,15 +497,28 @@ static int caam_jr_probe(struct platform
+ /**
++ * caam_jridx_alloc() - Alloc a specific job ring based on its index.
++ *
++ * returns :  pointer to the newly allocated physical
++ *          JobR dev can be written to if successful.
++ **/
++struct device *caam_jridx_alloc(int idx)
++{
++      struct caam_drv_private_jr *jrpriv;
++      struct device *dev = ERR_PTR(-ENODEV);
++
++      spin_lock(&driver_data.jr_alloc_lock);
++
++      if (list_empty(&driver_data.jr_list))
++              goto end;
++
++      list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
++              if (jrpriv->ridx == idx) {
++                      atomic_inc(&jrpriv->tfm_count);
++                      dev = jrpriv->dev;
++                      break;
++              }
++      }
++
++end:
++      spin_unlock(&driver_data.jr_alloc_lock);
++      return dev;
++}
++EXPORT_SYMBOL(caam_jridx_alloc);
++
++/**
+  * caam_jr_free() - Free the Job Ring
+  * @rdev     - points to the dev that identifies the Job ring to
+  *             be released.
+@@ -497,15 +538,28 @@ static int caam_jr_probe(struct platform
                return -ENOMEM;
        }
  
@@ -24435,7 +26900,12 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
  
        /* Identify the interrupt */
        jrpriv->irq = irq_of_parse_and_map(nprop, 0);
-@@ -520,7 +541,7 @@ static int caam_jr_probe(struct platform
+@@ -525,10 +579,12 @@ static int caam_jr_probe(struct platform
+       atomic_set(&jrpriv->tfm_count, 0);
++      jr_driver_probed++;
++
        return 0;
  }
  
@@ -24444,6 +26914,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
        {
                .compatible = "fsl,sec-v4.0-job-ring",
        },
+--- a/drivers/crypto/caam/jr.h
++++ b/drivers/crypto/caam/jr.h
+@@ -8,7 +8,9 @@
+ #define JR_H
+ /* Prototypes for backend-level services exposed to APIs */
++int caam_jr_driver_probed(void);
+ struct device *caam_jr_alloc(void);
++struct device *caam_jridx_alloc(int idx);
+ void caam_jr_free(struct device *rdev);
+ int caam_jr_enqueue(struct device *dev, u32 *desc,
+                   void (*cbk)(struct device *dev, u32 *desc, u32 status,
 --- a/drivers/crypto/caam/key_gen.c
 +++ b/drivers/crypto/caam/key_gen.c
 @@ -41,15 +41,29 @@ Split key generation--------------------
@@ -24698,7 +27180,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +}
 --- /dev/null
 +++ b/drivers/crypto/caam/qi.c
-@@ -0,0 +1,797 @@
+@@ -0,0 +1,804 @@
 +/*
 + * CAAM/SEC 4.x QI transport/backend driver
 + * Queue Interface backend functionality
@@ -24810,7 +27292,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +
 +      fd.cmd = 0;
 +      fd.format = qm_fd_compound;
-+      fd.cong_weight = req->fd_sgt[1].length;
++      fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
 +      fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
 +                            DMA_BIDIRECTIONAL);
 +      if (dma_mapping_error(qidev, fd.addr)) {
@@ -25277,8 +27759,15 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 +              return qman_cb_dqrr_stop;
 +
 +      fd = &dqrr->fd;
-+      if (unlikely(fd->status))
-+              dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
++      if (unlikely(fd->status)) {
++              u32 ssrc = fd->status & JRSTA_SSRC_MASK;
++              u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
++
++              if (ssrc != JRSTA_SSRC_CCB_ERROR ||
++                  err_id != JRSTA_CCBERR_ERRID_ICVCHK)
++                      dev_err(qidev, "Error: %#x in CAAM response FD\n",
++                              fd->status);
++      }
 +
 +      if (unlikely(fd->format != fd->format)) {
 +              dev_err(qidev, "Non-compound FD from CAAM\n");
@@ -26125,6 +28614,23 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
 -      return sg_nents;
 -}
 +#endif /* _SG_SW_SEC4_H_ */
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes
+       ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+                                sg_count, areq->assoclen, tbl_off, elen);
++      /*
++       * In case of SEC 2.x+, cipher in len must include only the ciphertext,
++       * while extent is used for ICV len.
++       */
++      if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
++          (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
++              desc->ptr[4].len = cpu_to_be16(cryptlen);
++
+       if (ret > 1) {
+               tbl_off += ret;
+               sync_needed = true;
 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
 @@ -516,7 +516,7 @@ err: