1 From 9c9579d76ccd6e738ab98c9b4c73c168912cdb8a Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 15:02:01 +0800
4 Subject: [PATCH] crypto: support layerscape
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 This is a integrated patch for layerscape sec support.
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
34 crypto/acompress.c | 169 +
35 crypto/algboss.c | 12 +-
36 crypto/crypto_user.c | 19 +
37 crypto/scompress.c | 356 ++
38 crypto/tcrypt.c | 17 +-
39 crypto/testmgr.c | 1701 ++++----
40 crypto/testmgr.h | 1125 +++---
41 crypto/tls.c | 607 +++
42 drivers/crypto/caam/Kconfig | 72 +-
43 drivers/crypto/caam/Makefile | 15 +-
44 drivers/crypto/caam/caamalg.c | 2125 +++-------
45 drivers/crypto/caam/caamalg_desc.c | 1913 +++++++++
46 drivers/crypto/caam/caamalg_desc.h | 127 +
47 drivers/crypto/caam/caamalg_qi.c | 2877 +++++++++++++
48 drivers/crypto/caam/caamalg_qi2.c | 4428 +++++++++++++++++++++
49 drivers/crypto/caam/caamalg_qi2.h | 265 ++
50 drivers/crypto/caam/caamhash.c | 521 +--
51 drivers/crypto/caam/caampkc.c | 471 ++-
52 drivers/crypto/caam/caampkc.h | 58 +
53 drivers/crypto/caam/caamrng.c | 16 +-
54 drivers/crypto/caam/compat.h | 1 +
55 drivers/crypto/caam/ctrl.c | 356 +-
56 drivers/crypto/caam/ctrl.h | 2 +
57 drivers/crypto/caam/desc.h | 55 +-
58 drivers/crypto/caam/desc_constr.h | 139 +-
59 drivers/crypto/caam/dpseci.c | 859 ++++
60 drivers/crypto/caam/dpseci.h | 395 ++
61 drivers/crypto/caam/dpseci_cmd.h | 261 ++
62 drivers/crypto/caam/error.c | 127 +-
63 drivers/crypto/caam/error.h | 10 +-
64 drivers/crypto/caam/intern.h | 31 +-
65 drivers/crypto/caam/jr.c | 97 +-
66 drivers/crypto/caam/jr.h | 2 +
67 drivers/crypto/caam/key_gen.c | 32 +-
68 drivers/crypto/caam/key_gen.h | 36 +-
69 drivers/crypto/caam/pdb.h | 62 +
70 drivers/crypto/caam/pkc_desc.c | 36 +
71 drivers/crypto/caam/qi.c | 797 ++++
72 drivers/crypto/caam/qi.h | 204 +
73 drivers/crypto/caam/regs.h | 63 +-
74 drivers/crypto/caam/sg_sw_qm.h | 126 +
75 drivers/crypto/caam/sg_sw_qm2.h | 81 +
76 drivers/crypto/caam/sg_sw_sec4.h | 60 +-
77 drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
78 drivers/staging/wilc1000/linux_wlan.c | 2 +-
79 drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
80 include/crypto/acompress.h | 269 ++
81 include/crypto/internal/acompress.h | 81 +
82 include/crypto/internal/scompress.h | 136 +
83 include/linux/crypto.h | 3 +
84 include/uapi/linux/cryptouser.h | 5 +
85 scripts/spelling.txt | 3 +
86 sound/soc/amd/acp-pcm-dma.c | 2 +-
87 55 files changed, 17310 insertions(+), 3955 deletions(-)
88 create mode 100644 crypto/acompress.c
89 create mode 100644 crypto/scompress.c
90 create mode 100644 crypto/tls.c
91 create mode 100644 drivers/crypto/caam/caamalg_desc.c
92 create mode 100644 drivers/crypto/caam/caamalg_desc.h
93 create mode 100644 drivers/crypto/caam/caamalg_qi.c
94 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
95 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
96 create mode 100644 drivers/crypto/caam/dpseci.c
97 create mode 100644 drivers/crypto/caam/dpseci.h
98 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
99 create mode 100644 drivers/crypto/caam/qi.c
100 create mode 100644 drivers/crypto/caam/qi.h
101 create mode 100644 drivers/crypto/caam/sg_sw_qm.h
102 create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
103 create mode 100644 include/crypto/acompress.h
104 create mode 100644 include/crypto/internal/acompress.h
105 create mode 100644 include/crypto/internal/scompress.h
109 @@ -102,6 +102,15 @@ config CRYPTO_KPP
113 +config CRYPTO_ACOMP2
115 + select CRYPTO_ALGAPI2
119 + select CRYPTO_ALGAPI
120 + select CRYPTO_ACOMP2
123 tristate "RSA algorithm"
124 select CRYPTO_AKCIPHER
125 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
126 select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
127 select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
128 select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
129 + select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
132 tristate "Userspace cryptographic algorithm configuration"
133 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
134 a sequence number xored with a salt. This is the default
138 + tristate "TLS support"
140 + select CRYPTO_BLKCIPHER
141 + select CRYPTO_MANAGER
144 + select CRYPTO_AUTHENC
146 + Support for TLS 1.0 record encryption and decryption
148 + This module adds support for encryption/decryption of TLS 1.0 frames
149 + using blockcipher algorithms. The name of the resulting algorithm is
150 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
151 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
152 + accelerated versions will be used automatically if available.
154 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
155 + operations through AF_ALG or cryptodev interfaces
157 comment "Block modes"
160 --- a/crypto/Makefile
161 +++ b/crypto/Makefile
162 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
163 rsa_generic-y += rsa-pkcs1pad.o
164 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
166 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
167 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
169 cryptomgr-y := algboss.o testmgr.o
171 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
172 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
173 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
174 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
175 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
176 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
177 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
178 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
179 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
181 +++ b/crypto/acompress.c
184 + * Asynchronous Compression operations
186 + * Copyright (c) 2016, Intel Corporation
187 + * Authors: Weigang Li <weigang.li@intel.com>
188 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
190 + * This program is free software; you can redistribute it and/or modify it
191 + * under the terms of the GNU General Public License as published by the Free
192 + * Software Foundation; either version 2 of the License, or (at your option)
193 + * any later version.
196 +#include <linux/errno.h>
197 +#include <linux/kernel.h>
198 +#include <linux/module.h>
199 +#include <linux/seq_file.h>
200 +#include <linux/slab.h>
201 +#include <linux/string.h>
202 +#include <linux/crypto.h>
203 +#include <crypto/algapi.h>
204 +#include <linux/cryptouser.h>
205 +#include <net/netlink.h>
206 +#include <crypto/internal/acompress.h>
207 +#include <crypto/internal/scompress.h>
208 +#include "internal.h"
210 +static const struct crypto_type crypto_acomp_type;
213 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
215 + struct crypto_report_acomp racomp;
217 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
219 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
220 + sizeof(struct crypto_report_acomp), &racomp))
221 + goto nla_put_failure;
228 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
234 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
235 + __attribute__ ((unused));
237 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
239 + seq_puts(m, "type : acomp\n");
242 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
244 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
245 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
250 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
252 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
253 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
255 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
256 + return crypto_init_scomp_ops_async(tfm);
258 + acomp->compress = alg->compress;
259 + acomp->decompress = alg->decompress;
260 + acomp->dst_free = alg->dst_free;
261 + acomp->reqsize = alg->reqsize;
264 + acomp->base.exit = crypto_acomp_exit_tfm;
267 + return alg->init(acomp);
272 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
274 + int extsize = crypto_alg_extsize(alg);
276 + if (alg->cra_type != &crypto_acomp_type)
277 + extsize += sizeof(struct crypto_scomp *);
282 +static const struct crypto_type crypto_acomp_type = {
283 + .extsize = crypto_acomp_extsize,
284 + .init_tfm = crypto_acomp_init_tfm,
285 +#ifdef CONFIG_PROC_FS
286 + .show = crypto_acomp_show,
288 + .report = crypto_acomp_report,
289 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
290 + .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
291 + .type = CRYPTO_ALG_TYPE_ACOMPRESS,
292 + .tfmsize = offsetof(struct crypto_acomp, base),
295 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
298 + return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
300 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
302 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
304 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
305 + struct acomp_req *req;
307 + req = __acomp_request_alloc(acomp);
308 + if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
309 + return crypto_acomp_scomp_alloc_ctx(req);
313 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
315 +void acomp_request_free(struct acomp_req *req)
317 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
318 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
320 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
321 + crypto_acomp_scomp_free_ctx(req);
323 + if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
324 + acomp->dst_free(req->dst);
328 + __acomp_request_free(req);
330 +EXPORT_SYMBOL_GPL(acomp_request_free);
332 +int crypto_register_acomp(struct acomp_alg *alg)
334 + struct crypto_alg *base = &alg->base;
336 + base->cra_type = &crypto_acomp_type;
337 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
338 + base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
340 + return crypto_register_alg(base);
342 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
344 +int crypto_unregister_acomp(struct acomp_alg *alg)
346 + return crypto_unregister_alg(&alg->base);
348 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
350 +MODULE_LICENSE("GPL");
351 +MODULE_DESCRIPTION("Asynchronous compression type");
352 --- a/crypto/algboss.c
353 +++ b/crypto/algboss.c
354 @@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struc
355 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
356 type = alg->cra_flags;
358 - /* This piece of crap needs to disappear into per-type test hooks. */
359 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
360 - type |= CRYPTO_ALG_TESTED;
362 - if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
363 - CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
364 - ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
365 - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
366 - alg->cra_ablkcipher.ivsize))
367 + /* Do not test internal algorithms. */
368 + if (type & CRYPTO_ALG_INTERNAL)
369 type |= CRYPTO_ALG_TESTED;
374 --- a/crypto/crypto_user.c
375 +++ b/crypto/crypto_user.c
376 @@ -112,6 +112,21 @@ nla_put_failure:
380 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
382 + struct crypto_report_acomp racomp;
384 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
386 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
387 + sizeof(struct crypto_report_acomp), &racomp))
388 + goto nla_put_failure;
395 static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
397 struct crypto_report_akcipher rakcipher;
398 @@ -186,7 +201,11 @@ static int crypto_report_one(struct cryp
399 goto nla_put_failure;
402 + case CRYPTO_ALG_TYPE_ACOMPRESS:
403 + if (crypto_report_acomp(skb, alg))
404 + goto nla_put_failure;
407 case CRYPTO_ALG_TYPE_AKCIPHER:
408 if (crypto_report_akcipher(skb, alg))
409 goto nla_put_failure;
411 +++ b/crypto/scompress.c
414 + * Synchronous Compression operations
416 + * Copyright 2015 LG Electronics Inc.
417 + * Copyright (c) 2016, Intel Corporation
418 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
420 + * This program is free software; you can redistribute it and/or modify it
421 + * under the terms of the GNU General Public License as published by the Free
422 + * Software Foundation; either version 2 of the License, or (at your option)
423 + * any later version.
426 +#include <linux/errno.h>
427 +#include <linux/kernel.h>
428 +#include <linux/module.h>
429 +#include <linux/seq_file.h>
430 +#include <linux/slab.h>
431 +#include <linux/string.h>
432 +#include <linux/crypto.h>
433 +#include <linux/vmalloc.h>
434 +#include <crypto/algapi.h>
435 +#include <linux/cryptouser.h>
436 +#include <net/netlink.h>
437 +#include <linux/scatterlist.h>
438 +#include <crypto/scatterwalk.h>
439 +#include <crypto/internal/acompress.h>
440 +#include <crypto/internal/scompress.h>
441 +#include "internal.h"
443 +static const struct crypto_type crypto_scomp_type;
444 +static void * __percpu *scomp_src_scratches;
445 +static void * __percpu *scomp_dst_scratches;
446 +static int scomp_scratch_users;
447 +static DEFINE_MUTEX(scomp_lock);
450 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
452 + struct crypto_report_comp rscomp;
454 + strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
456 + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
457 + sizeof(struct crypto_report_comp), &rscomp))
458 + goto nla_put_failure;
465 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
471 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
472 + __attribute__ ((unused));
474 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
476 + seq_puts(m, "type : scomp\n");
479 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
484 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
491 + for_each_possible_cpu(i)
492 + vfree(*per_cpu_ptr(scratches, i));
494 + free_percpu(scratches);
497 +static void * __percpu *crypto_scomp_alloc_scratches(void)
499 + void * __percpu *scratches;
502 + scratches = alloc_percpu(void *);
506 + for_each_possible_cpu(i) {
509 + scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
512 + *per_cpu_ptr(scratches, i) = scratch;
518 + crypto_scomp_free_scratches(scratches);
522 +static void crypto_scomp_free_all_scratches(void)
524 + if (!--scomp_scratch_users) {
525 + crypto_scomp_free_scratches(scomp_src_scratches);
526 + crypto_scomp_free_scratches(scomp_dst_scratches);
527 + scomp_src_scratches = NULL;
528 + scomp_dst_scratches = NULL;
532 +static int crypto_scomp_alloc_all_scratches(void)
534 + if (!scomp_scratch_users++) {
535 + scomp_src_scratches = crypto_scomp_alloc_scratches();
536 + if (!scomp_src_scratches)
538 + scomp_dst_scratches = crypto_scomp_alloc_scratches();
539 + if (!scomp_dst_scratches)
545 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
554 + for_each_sg(sgl, sgl, n, i) {
555 + page = sg_page(sgl);
563 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
565 + struct scatterlist *sgl;
569 + n = ((size - 1) >> PAGE_SHIFT) + 1;
571 + sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
575 + sg_init_table(sgl, n);
577 + for (i = 0; i < n; i++) {
578 + page = alloc_page(gfp);
581 + sg_set_page(sgl + i, page, PAGE_SIZE, 0);
587 + sg_mark_end(sgl + i);
588 + crypto_scomp_sg_free(sgl);
592 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
594 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
595 + void **tfm_ctx = acomp_tfm_ctx(tfm);
596 + struct crypto_scomp *scomp = *tfm_ctx;
597 + void **ctx = acomp_request_ctx(req);
598 + const int cpu = get_cpu();
599 + u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
600 + u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
603 + if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
608 + if (req->dst && !req->dlen) {
613 + if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
614 + req->dlen = SCOMP_SCRATCH_SIZE;
616 + scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
618 + ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
619 + scratch_dst, &req->dlen, *ctx);
621 + ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
622 + scratch_dst, &req->dlen, *ctx);
625 + req->dst = crypto_scomp_sg_alloc(req->dlen,
626 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
627 + GFP_KERNEL : GFP_ATOMIC);
631 + scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
639 +static int scomp_acomp_compress(struct acomp_req *req)
641 + return scomp_acomp_comp_decomp(req, 1);
644 +static int scomp_acomp_decompress(struct acomp_req *req)
646 + return scomp_acomp_comp_decomp(req, 0);
649 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
651 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
653 + crypto_free_scomp(*ctx);
656 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
658 + struct crypto_alg *calg = tfm->__crt_alg;
659 + struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
660 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
661 + struct crypto_scomp *scomp;
663 + if (!crypto_mod_get(calg))
666 + scomp = crypto_create_tfm(calg, &crypto_scomp_type);
667 + if (IS_ERR(scomp)) {
668 + crypto_mod_put(calg);
669 + return PTR_ERR(scomp);
673 + tfm->exit = crypto_exit_scomp_ops_async;
675 + crt->compress = scomp_acomp_compress;
676 + crt->decompress = scomp_acomp_decompress;
677 + crt->dst_free = crypto_scomp_sg_free;
678 + crt->reqsize = sizeof(void *);
683 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
685 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
686 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
687 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
688 + struct crypto_scomp *scomp = *tfm_ctx;
691 + ctx = crypto_scomp_alloc_ctx(scomp);
702 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
704 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
705 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
706 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
707 + struct crypto_scomp *scomp = *tfm_ctx;
708 + void *ctx = *req->__ctx;
711 + crypto_scomp_free_ctx(scomp, ctx);
714 +static const struct crypto_type crypto_scomp_type = {
715 + .extsize = crypto_alg_extsize,
716 + .init_tfm = crypto_scomp_init_tfm,
717 +#ifdef CONFIG_PROC_FS
718 + .show = crypto_scomp_show,
720 + .report = crypto_scomp_report,
721 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
722 + .maskset = CRYPTO_ALG_TYPE_MASK,
723 + .type = CRYPTO_ALG_TYPE_SCOMPRESS,
724 + .tfmsize = offsetof(struct crypto_scomp, base),
727 +int crypto_register_scomp(struct scomp_alg *alg)
729 + struct crypto_alg *base = &alg->base;
732 + mutex_lock(&scomp_lock);
733 + if (crypto_scomp_alloc_all_scratches())
736 + base->cra_type = &crypto_scomp_type;
737 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
738 + base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
740 + ret = crypto_register_alg(base);
744 + mutex_unlock(&scomp_lock);
748 + crypto_scomp_free_all_scratches();
749 + mutex_unlock(&scomp_lock);
752 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
754 +int crypto_unregister_scomp(struct scomp_alg *alg)
758 + mutex_lock(&scomp_lock);
759 + ret = crypto_unregister_alg(&alg->base);
760 + crypto_scomp_free_all_scratches();
761 + mutex_unlock(&scomp_lock);
765 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
767 +MODULE_LICENSE("GPL");
768 +MODULE_DESCRIPTION("Synchronous compression type");
769 --- a/crypto/tcrypt.c
770 +++ b/crypto/tcrypt.c
771 @@ -74,7 +74,7 @@ static char *check[] = {
772 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
773 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
774 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
779 struct tcrypt_result {
780 @@ -1329,6 +1329,10 @@ static int do_test(const char *alg, u32
781 ret += tcrypt_test("hmac(sha3-512)");
785 + ret += tcrypt_test("rsa");
789 ret += tcrypt_test("ansi_cprng");
791 @@ -1390,6 +1394,9 @@ static int do_test(const char *alg, u32
793 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
796 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
799 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
800 speed_template_16_24_32);
801 @@ -1404,9 +1411,9 @@ static int do_test(const char *alg, u32
802 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
803 speed_template_32_40_48);
804 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
805 - speed_template_32_48_64);
806 + speed_template_32_64);
807 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
808 - speed_template_32_48_64);
809 + speed_template_32_64);
810 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
811 speed_template_16_24_32);
812 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
813 @@ -1837,9 +1844,9 @@ static int do_test(const char *alg, u32
814 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
815 speed_template_32_40_48);
816 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
817 - speed_template_32_48_64);
818 + speed_template_32_64);
819 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
820 - speed_template_32_48_64);
821 + speed_template_32_64);
822 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
823 speed_template_16_24_32);
824 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
825 --- a/crypto/testmgr.c
826 +++ b/crypto/testmgr.c
828 #include <crypto/drbg.h>
829 #include <crypto/akcipher.h>
830 #include <crypto/kpp.h>
831 +#include <crypto/acompress.h>
833 #include "internal.h"
835 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const c
844 @@ -82,47 +83,54 @@ struct tcrypt_result {
846 struct aead_test_suite {
848 - struct aead_testvec *vecs;
849 + const struct aead_testvec *vecs;
854 struct cipher_test_suite {
856 - struct cipher_testvec *vecs;
857 + const struct cipher_testvec *vecs;
862 struct comp_test_suite {
864 - struct comp_testvec *vecs;
865 + const struct comp_testvec *vecs;
870 struct hash_test_suite {
871 - struct hash_testvec *vecs;
872 + const struct hash_testvec *vecs;
876 struct cprng_test_suite {
877 - struct cprng_testvec *vecs;
878 + const struct cprng_testvec *vecs;
882 struct drbg_test_suite {
883 - struct drbg_testvec *vecs;
884 + const struct drbg_testvec *vecs;
888 +struct tls_test_suite {
890 + struct tls_testvec *vecs;
891 + unsigned int count;
895 struct akcipher_test_suite {
896 - struct akcipher_testvec *vecs;
897 + const struct akcipher_testvec *vecs;
901 struct kpp_test_suite {
902 - struct kpp_testvec *vecs;
903 + const struct kpp_testvec *vecs;
907 @@ -139,12 +147,14 @@ struct alg_test_desc {
908 struct hash_test_suite hash;
909 struct cprng_test_suite cprng;
910 struct drbg_test_suite drbg;
911 + struct tls_test_suite tls;
912 struct akcipher_test_suite akcipher;
913 struct kpp_test_suite kpp;
917 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
918 +static const unsigned int IDX[8] = {
919 + IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
921 static void hexdump(unsigned char *buf, unsigned int len)
923 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_r
926 static int ahash_partial_update(struct ahash_request **preq,
927 - struct crypto_ahash *tfm, struct hash_testvec *template,
928 + struct crypto_ahash *tfm, const struct hash_testvec *template,
929 void *hash_buff, int k, int temp, struct scatterlist *sg,
930 const char *algo, char *result, struct tcrypt_result *tresult)
932 @@ -259,11 +269,12 @@ out_nostate:
936 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
937 - unsigned int tcount, bool use_digest,
938 - const int align_offset)
939 +static int __test_hash(struct crypto_ahash *tfm,
940 + const struct hash_testvec *template, unsigned int tcount,
941 + bool use_digest, const int align_offset)
943 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
944 + size_t digest_size = crypto_ahash_digestsize(tfm);
945 unsigned int i, j, k, temp;
946 struct scatterlist sg[8];
948 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_aha
949 char *xbuf[XBUFSIZE];
952 - result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
953 + result = kmalloc(digest_size, GFP_KERNEL);
956 key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
957 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_aha
961 - memset(result, 0, MAX_DIGEST_SIZE);
962 + memset(result, 0, digest_size);
965 hash_buff += align_offset;
966 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_aha
970 - memset(result, 0, MAX_DIGEST_SIZE);
971 + memset(result, 0, digest_size);
974 sg_init_table(sg, template[i].np);
975 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_aha
979 - memset(result, 0, MAX_DIGEST_SIZE);
980 + memset(result, 0, digest_size);
984 @@ -536,7 +547,8 @@ out_nobuf:
988 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
989 +static int test_hash(struct crypto_ahash *tfm,
990 + const struct hash_testvec *template,
991 unsigned int tcount, bool use_digest)
993 unsigned int alignmask;
994 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash
997 static int __test_aead(struct crypto_aead *tfm, int enc,
998 - struct aead_testvec *template, unsigned int tcount,
999 + const struct aead_testvec *template, unsigned int tcount,
1000 const bool diff_dst, const int align_offset)
1002 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1003 @@ -955,7 +967,7 @@ out_noxbuf:
1006 static int test_aead(struct crypto_aead *tfm, int enc,
1007 - struct aead_testvec *template, unsigned int tcount)
1008 + const struct aead_testvec *template, unsigned int tcount)
1010 unsigned int alignmask;
1012 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead
1016 +static int __test_tls(struct crypto_aead *tfm, int enc,
1017 + struct tls_testvec *template, unsigned int tcount,
1018 + const bool diff_dst)
1020 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1021 + unsigned int i, k, authsize;
1023 + struct aead_request *req;
1024 + struct scatterlist *sg;
1025 + struct scatterlist *sgout;
1026 + const char *e, *d;
1027 + struct tcrypt_result result;
1033 + char *xbuf[XBUFSIZE];
1034 + char *xoutbuf[XBUFSIZE];
1035 + char *axbuf[XBUFSIZE];
1036 + int ret = -ENOMEM;
1038 + if (testmgr_alloc_buf(xbuf))
1041 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
1042 + goto out_nooutbuf;
1044 + if (testmgr_alloc_buf(axbuf))
1047 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1051 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1055 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1061 + d = diff_dst ? "-ddst" : "";
1062 + e = enc ? "encryption" : "decryption";
1064 + init_completion(&result.completion);
1066 + req = aead_request_alloc(tfm, GFP_KERNEL);
1068 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
1073 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1074 + tcrypt_complete, &result);
1076 + for (i = 0; i < tcount; i++) {
1081 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1082 + template[i].alen > PAGE_SIZE))
1085 + memcpy(assoc, template[i].assoc, template[i].alen);
1086 + memcpy(input, template[i].input, template[i].ilen);
1088 + if (template[i].iv)
1089 + memcpy(iv, template[i].iv, MAX_IVLEN);
1091 + memset(iv, 0, MAX_IVLEN);
1093 + crypto_aead_clear_flags(tfm, ~0);
1095 + if (template[i].klen > MAX_KEYLEN) {
1096 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1097 + d, i, algo, template[i].klen, MAX_KEYLEN);
1101 + memcpy(key, template[i].key, template[i].klen);
1103 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
1104 + if (!ret == template[i].fail) {
1105 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1106 + d, i, algo, crypto_aead_get_flags(tfm));
1112 + ret = crypto_aead_setauthsize(tfm, authsize);
1114 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1115 + d, authsize, i, algo);
1119 + k = !!template[i].alen;
1120 + sg_init_table(sg, k + 1);
1121 + sg_set_buf(&sg[0], assoc, template[i].alen);
1122 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1123 + template[i].ilen));
1127 + sg_init_table(sgout, k + 1);
1128 + sg_set_buf(&sgout[0], assoc, template[i].alen);
1130 + output = xoutbuf[0];
1131 + sg_set_buf(&sgout[k], output,
1132 + (enc ? template[i].rlen : template[i].ilen));
1135 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1136 + template[i].ilen, iv);
1138 + aead_request_set_ad(req, template[i].alen);
1140 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1144 + if (template[i].novrfy) {
1145 + /* verification was supposed to fail */
1146 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1148 + /* so really, we got a bad message */
1153 + case -EINPROGRESS:
1155 + wait_for_completion(&result.completion);
1156 + reinit_completion(&result.completion);
1161 + /* verification failure was expected */
1162 + if (template[i].novrfy)
1164 + /* fall through */
1166 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1167 + d, e, i, algo, -ret);
1172 + if (memcmp(q, template[i].result, template[i].rlen)) {
1173 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1175 + hexdump(q, template[i].rlen);
1176 + pr_err("should be:\n");
1177 + hexdump(template[i].result, template[i].rlen);
1184 + aead_request_free(req);
1192 + testmgr_free_buf(axbuf);
1195 + testmgr_free_buf(xoutbuf);
1197 + testmgr_free_buf(xbuf);
1202 +static int test_tls(struct crypto_aead *tfm, int enc,
1203 + struct tls_testvec *template, unsigned int tcount)
1206 + /* test 'dst == src' case */
1207 + ret = __test_tls(tfm, enc, template, tcount, false);
1210 + /* test 'dst != src' case */
1211 + return __test_tls(tfm, enc, template, tcount, true);
1214 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1215 + u32 type, u32 mask)
1217 + struct crypto_aead *tfm;
1220 + tfm = crypto_alloc_aead(driver, type, mask);
1221 + if (IS_ERR(tfm)) {
1222 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1223 + driver, PTR_ERR(tfm));
1224 + return PTR_ERR(tfm);
1227 + if (desc->suite.tls.enc.vecs) {
1228 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1229 + desc->suite.tls.enc.count);
1234 + if (!err && desc->suite.tls.dec.vecs)
1235 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1236 + desc->suite.tls.dec.count);
1239 + crypto_free_aead(tfm);
1243 static int test_cipher(struct crypto_cipher *tfm, int enc,
1244 - struct cipher_testvec *template, unsigned int tcount)
1245 + const struct cipher_testvec *template,
1246 + unsigned int tcount)
1248 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1249 unsigned int i, j, k;
1250 @@ -1066,7 +1306,8 @@ out_nobuf:
1253 static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1254 - struct cipher_testvec *template, unsigned int tcount,
1255 + const struct cipher_testvec *template,
1256 + unsigned int tcount,
1257 const bool diff_dst, const int align_offset)
1260 @@ -1330,7 +1571,8 @@ out_nobuf:
1263 static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1264 - struct cipher_testvec *template, unsigned int tcount)
1265 + const struct cipher_testvec *template,
1266 + unsigned int tcount)
1268 unsigned int alignmask;
1270 @@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_s
1274 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1275 - struct comp_testvec *dtemplate, int ctcount, int dtcount)
1276 +static int test_comp(struct crypto_comp *tfm,
1277 + const struct comp_testvec *ctemplate,
1278 + const struct comp_testvec *dtemplate,
1279 + int ctcount, int dtcount)
1281 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1283 @@ -1442,7 +1686,154 @@ out:
1287 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1288 +static int test_acomp(struct crypto_acomp *tfm,
1289 + const struct comp_testvec *ctemplate,
1290 + const struct comp_testvec *dtemplate,
1291 + int ctcount, int dtcount)
1293 + const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1297 + struct scatterlist src, dst;
1298 + struct acomp_req *req;
1299 + struct tcrypt_result result;
1301 + output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1305 + for (i = 0; i < ctcount; i++) {
1306 + unsigned int dlen = COMP_BUF_SIZE;
1307 + int ilen = ctemplate[i].inlen;
1310 + input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1316 + memset(output, 0, dlen);
1317 + init_completion(&result.completion);
1318 + sg_init_one(&src, input_vec, ilen);
1319 + sg_init_one(&dst, output, dlen);
1321 + req = acomp_request_alloc(tfm);
1323 + pr_err("alg: acomp: request alloc failed for %s\n",
1330 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1331 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1332 + tcrypt_complete, &result);
1334 + ret = wait_async_op(&result, crypto_acomp_compress(req));
1336 + pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1337 + i + 1, algo, -ret);
1339 + acomp_request_free(req);
1343 + if (req->dlen != ctemplate[i].outlen) {
1344 + pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1345 + i + 1, algo, req->dlen);
1348 + acomp_request_free(req);
1352 + if (memcmp(output, ctemplate[i].output, req->dlen)) {
1353 + pr_err("alg: acomp: Compression test %d failed for %s\n",
1355 + hexdump(output, req->dlen);
1358 + acomp_request_free(req);
1363 + acomp_request_free(req);
1366 + for (i = 0; i < dtcount; i++) {
1367 + unsigned int dlen = COMP_BUF_SIZE;
1368 + int ilen = dtemplate[i].inlen;
1371 + input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1377 + memset(output, 0, dlen);
1378 + init_completion(&result.completion);
1379 + sg_init_one(&src, input_vec, ilen);
1380 + sg_init_one(&dst, output, dlen);
1382 + req = acomp_request_alloc(tfm);
1384 + pr_err("alg: acomp: request alloc failed for %s\n",
1391 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1392 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1393 + tcrypt_complete, &result);
1395 + ret = wait_async_op(&result, crypto_acomp_decompress(req));
1397 + pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1398 + i + 1, algo, -ret);
1400 + acomp_request_free(req);
1404 + if (req->dlen != dtemplate[i].outlen) {
1405 + pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1406 + i + 1, algo, req->dlen);
1409 + acomp_request_free(req);
1413 + if (memcmp(output, dtemplate[i].output, req->dlen)) {
1414 + pr_err("alg: acomp: Decompression test %d failed for %s\n",
1416 + hexdump(output, req->dlen);
1419 + acomp_request_free(req);
1424 + acomp_request_free(req);
1434 +static int test_cprng(struct crypto_rng *tfm,
1435 + const struct cprng_testvec *template,
1436 unsigned int tcount)
1438 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1439 @@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct al
1440 struct crypto_aead *tfm;
1443 - tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1444 + tfm = crypto_alloc_aead(driver, type, mask);
1446 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1447 "%ld\n", driver, PTR_ERR(tfm));
1448 @@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct
1449 struct crypto_cipher *tfm;
1452 - tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1453 + tfm = crypto_alloc_cipher(driver, type, mask);
1455 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1456 "%s: %ld\n", driver, PTR_ERR(tfm));
1457 @@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struc
1458 struct crypto_skcipher *tfm;
1461 - tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1462 + tfm = crypto_alloc_skcipher(driver, type, mask);
1464 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1465 "%s: %ld\n", driver, PTR_ERR(tfm));
1466 @@ -1593,22 +1984,38 @@ out:
1467 static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1470 - struct crypto_comp *tfm;
1471 + struct crypto_comp *comp;
1472 + struct crypto_acomp *acomp;
1474 + u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1476 - tfm = crypto_alloc_comp(driver, type, mask);
1477 - if (IS_ERR(tfm)) {
1478 - printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1479 - "%ld\n", driver, PTR_ERR(tfm));
1480 - return PTR_ERR(tfm);
1482 + if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1483 + acomp = crypto_alloc_acomp(driver, type, mask);
1484 + if (IS_ERR(acomp)) {
1485 + pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1486 + driver, PTR_ERR(acomp));
1487 + return PTR_ERR(acomp);
1489 + err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1490 + desc->suite.comp.decomp.vecs,
1491 + desc->suite.comp.comp.count,
1492 + desc->suite.comp.decomp.count);
1493 + crypto_free_acomp(acomp);
1495 + comp = crypto_alloc_comp(driver, type, mask);
1496 + if (IS_ERR(comp)) {
1497 + pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1498 + driver, PTR_ERR(comp));
1499 + return PTR_ERR(comp);
1502 - err = test_comp(tfm, desc->suite.comp.comp.vecs,
1503 - desc->suite.comp.decomp.vecs,
1504 - desc->suite.comp.comp.count,
1505 - desc->suite.comp.decomp.count);
1506 + err = test_comp(comp, desc->suite.comp.comp.vecs,
1507 + desc->suite.comp.decomp.vecs,
1508 + desc->suite.comp.comp.count,
1509 + desc->suite.comp.decomp.count);
1511 - crypto_free_comp(tfm);
1512 + crypto_free_comp(comp);
1517 @@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct al
1518 struct crypto_ahash *tfm;
1521 - tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1522 + tfm = crypto_alloc_ahash(driver, type, mask);
1524 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1525 "%ld\n", driver, PTR_ERR(tfm));
1526 @@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct
1530 - tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1531 + tfm = crypto_alloc_shash(driver, type, mask);
1533 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1534 "%ld\n", driver, PTR_ERR(tfm));
1535 @@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct a
1536 struct crypto_rng *rng;
1539 - rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1540 + rng = crypto_alloc_rng(driver, type, mask);
1542 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1543 "%ld\n", driver, PTR_ERR(rng));
1544 @@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct a
1548 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1549 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1550 const char *driver, u32 type, u32 mask)
1553 @@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_te
1557 - drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1558 + drng = crypto_alloc_rng(driver, type, mask);
1560 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1562 @@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct al
1566 - struct drbg_testvec *template = desc->suite.drbg.vecs;
1567 + const struct drbg_testvec *template = desc->suite.drbg.vecs;
1568 unsigned int tcount = desc->suite.drbg.count;
1570 if (0 == memcmp(driver, "drbg_pr_", 8))
1571 @@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct al
1575 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1576 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1579 struct kpp_request *req;
1580 @@ -1888,7 +2295,7 @@ free_req:
1583 static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1584 - struct kpp_testvec *vecs, unsigned int tcount)
1585 + const struct kpp_testvec *vecs, unsigned int tcount)
1589 @@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg
1590 struct crypto_kpp *tfm;
1593 - tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1594 + tfm = crypto_alloc_kpp(driver, type, mask);
1596 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1597 driver, PTR_ERR(tfm));
1598 @@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg
1601 static int test_akcipher_one(struct crypto_akcipher *tfm,
1602 - struct akcipher_testvec *vecs)
1603 + const struct akcipher_testvec *vecs)
1605 char *xbuf[XBUFSIZE];
1606 struct akcipher_request *req;
1607 @@ -2044,7 +2451,8 @@ free_xbuf:
1610 static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1611 - struct akcipher_testvec *vecs, unsigned int tcount)
1612 + const struct akcipher_testvec *vecs,
1613 + unsigned int tcount)
1616 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1617 @@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struc
1618 struct crypto_akcipher *tfm;
1621 - tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1622 + tfm = crypto_alloc_akcipher(driver, type, mask);
1624 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1625 driver, PTR_ERR(tfm));
1626 @@ -2088,112 +2496,23 @@ static int alg_test_null(const struct al
1630 +#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) }
1632 /* Please keep this list sorted by algorithm name. */
1633 static const struct alg_test_desc alg_test_descs[] = {
1635 - .alg = "__cbc-cast5-avx",
1636 - .test = alg_test_null,
1638 - .alg = "__cbc-cast6-avx",
1639 - .test = alg_test_null,
1641 - .alg = "__cbc-serpent-avx",
1642 - .test = alg_test_null,
1644 - .alg = "__cbc-serpent-avx2",
1645 - .test = alg_test_null,
1647 - .alg = "__cbc-serpent-sse2",
1648 - .test = alg_test_null,
1650 - .alg = "__cbc-twofish-avx",
1651 - .test = alg_test_null,
1653 - .alg = "__driver-cbc-aes-aesni",
1654 - .test = alg_test_null,
1655 - .fips_allowed = 1,
1657 - .alg = "__driver-cbc-camellia-aesni",
1658 - .test = alg_test_null,
1660 - .alg = "__driver-cbc-camellia-aesni-avx2",
1661 - .test = alg_test_null,
1663 - .alg = "__driver-cbc-cast5-avx",
1664 - .test = alg_test_null,
1666 - .alg = "__driver-cbc-cast6-avx",
1667 - .test = alg_test_null,
1669 - .alg = "__driver-cbc-serpent-avx",
1670 - .test = alg_test_null,
1672 - .alg = "__driver-cbc-serpent-avx2",
1673 - .test = alg_test_null,
1675 - .alg = "__driver-cbc-serpent-sse2",
1676 - .test = alg_test_null,
1678 - .alg = "__driver-cbc-twofish-avx",
1679 - .test = alg_test_null,
1681 - .alg = "__driver-ecb-aes-aesni",
1682 - .test = alg_test_null,
1683 - .fips_allowed = 1,
1685 - .alg = "__driver-ecb-camellia-aesni",
1686 - .test = alg_test_null,
1688 - .alg = "__driver-ecb-camellia-aesni-avx2",
1689 - .test = alg_test_null,
1691 - .alg = "__driver-ecb-cast5-avx",
1692 - .test = alg_test_null,
1694 - .alg = "__driver-ecb-cast6-avx",
1695 - .test = alg_test_null,
1697 - .alg = "__driver-ecb-serpent-avx",
1698 - .test = alg_test_null,
1700 - .alg = "__driver-ecb-serpent-avx2",
1701 - .test = alg_test_null,
1703 - .alg = "__driver-ecb-serpent-sse2",
1704 - .test = alg_test_null,
1706 - .alg = "__driver-ecb-twofish-avx",
1707 - .test = alg_test_null,
1709 - .alg = "__driver-gcm-aes-aesni",
1710 - .test = alg_test_null,
1711 - .fips_allowed = 1,
1713 - .alg = "__ghash-pclmulqdqni",
1714 - .test = alg_test_null,
1715 - .fips_allowed = 1,
1717 .alg = "ansi_cprng",
1718 .test = alg_test_cprng,
1721 - .vecs = ansi_cprng_aes_tv_template,
1722 - .count = ANSI_CPRNG_AES_TEST_VECTORS
1724 + .cprng = __VECS(ansi_cprng_aes_tv_template)
1727 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1728 .test = alg_test_aead,
1732 - .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1733 - .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1736 - .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1737 - .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1739 + .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1740 + .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1744 @@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_te
1745 .test = alg_test_aead,
1750 - hmac_sha1_aes_cbc_enc_tv_temp,
1752 - HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1754 + .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1758 @@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_te
1759 .test = alg_test_aead,
1764 - hmac_sha1_des_cbc_enc_tv_temp,
1766 - HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1768 + .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1772 @@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_te
1778 - hmac_sha1_des3_ede_cbc_enc_tv_temp,
1780 - HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1782 + .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1786 @@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_te
1787 .test = alg_test_aead,
1792 - hmac_sha1_ecb_cipher_null_enc_tv_temp,
1794 - HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1798 - hmac_sha1_ecb_cipher_null_dec_tv_temp,
1800 - HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1802 + .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1803 + .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1807 @@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_te
1808 .test = alg_test_aead,
1813 - hmac_sha224_des_cbc_enc_tv_temp,
1815 - HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1817 + .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1821 @@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_te
1827 - hmac_sha224_des3_ede_cbc_enc_tv_temp,
1829 - HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1831 + .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1835 @@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_te
1841 - hmac_sha256_aes_cbc_enc_tv_temp,
1843 - HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1845 + .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1849 @@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_te
1850 .test = alg_test_aead,
1855 - hmac_sha256_des_cbc_enc_tv_temp,
1857 - HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1859 + .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1863 @@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_te
1869 - hmac_sha256_des3_ede_cbc_enc_tv_temp,
1871 - HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1873 + .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1877 @@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_te
1878 .test = alg_test_aead,
1883 - hmac_sha384_des_cbc_enc_tv_temp,
1885 - HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1887 + .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1891 @@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_te
1897 - hmac_sha384_des3_ede_cbc_enc_tv_temp,
1899 - HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1901 + .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1905 @@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_te
1906 .test = alg_test_aead,
1911 - hmac_sha512_aes_cbc_enc_tv_temp,
1913 - HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1915 + .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1919 @@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_te
1920 .test = alg_test_aead,
1925 - hmac_sha512_des_cbc_enc_tv_temp,
1927 - HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1929 + .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1933 @@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_te
1939 - hmac_sha512_des3_ede_cbc_enc_tv_temp,
1941 - HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1943 + .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1947 @@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_te
1952 - .vecs = aes_cbc_enc_tv_template,
1953 - .count = AES_CBC_ENC_TEST_VECTORS
1956 - .vecs = aes_cbc_dec_tv_template,
1957 - .count = AES_CBC_DEC_TEST_VECTORS
1959 + .enc = __VECS(aes_cbc_enc_tv_template),
1960 + .dec = __VECS(aes_cbc_dec_tv_template)
1964 @@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_te
1965 .test = alg_test_skcipher,
1969 - .vecs = anubis_cbc_enc_tv_template,
1970 - .count = ANUBIS_CBC_ENC_TEST_VECTORS
1973 - .vecs = anubis_cbc_dec_tv_template,
1974 - .count = ANUBIS_CBC_DEC_TEST_VECTORS
1976 + .enc = __VECS(anubis_cbc_enc_tv_template),
1977 + .dec = __VECS(anubis_cbc_dec_tv_template)
1981 @@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_te
1982 .test = alg_test_skcipher,
1986 - .vecs = bf_cbc_enc_tv_template,
1987 - .count = BF_CBC_ENC_TEST_VECTORS
1990 - .vecs = bf_cbc_dec_tv_template,
1991 - .count = BF_CBC_DEC_TEST_VECTORS
1993 + .enc = __VECS(bf_cbc_enc_tv_template),
1994 + .dec = __VECS(bf_cbc_dec_tv_template)
1998 @@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_te
1999 .test = alg_test_skcipher,
2003 - .vecs = camellia_cbc_enc_tv_template,
2004 - .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2007 - .vecs = camellia_cbc_dec_tv_template,
2008 - .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2010 + .enc = __VECS(camellia_cbc_enc_tv_template),
2011 + .dec = __VECS(camellia_cbc_dec_tv_template)
2015 @@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_te
2016 .test = alg_test_skcipher,
2020 - .vecs = cast5_cbc_enc_tv_template,
2021 - .count = CAST5_CBC_ENC_TEST_VECTORS
2024 - .vecs = cast5_cbc_dec_tv_template,
2025 - .count = CAST5_CBC_DEC_TEST_VECTORS
2027 + .enc = __VECS(cast5_cbc_enc_tv_template),
2028 + .dec = __VECS(cast5_cbc_dec_tv_template)
2032 @@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_te
2033 .test = alg_test_skcipher,
2037 - .vecs = cast6_cbc_enc_tv_template,
2038 - .count = CAST6_CBC_ENC_TEST_VECTORS
2041 - .vecs = cast6_cbc_dec_tv_template,
2042 - .count = CAST6_CBC_DEC_TEST_VECTORS
2044 + .enc = __VECS(cast6_cbc_enc_tv_template),
2045 + .dec = __VECS(cast6_cbc_dec_tv_template)
2049 @@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_te
2050 .test = alg_test_skcipher,
2054 - .vecs = des_cbc_enc_tv_template,
2055 - .count = DES_CBC_ENC_TEST_VECTORS
2058 - .vecs = des_cbc_dec_tv_template,
2059 - .count = DES_CBC_DEC_TEST_VECTORS
2061 + .enc = __VECS(des_cbc_enc_tv_template),
2062 + .dec = __VECS(des_cbc_dec_tv_template)
2066 @@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_te
2071 - .vecs = des3_ede_cbc_enc_tv_template,
2072 - .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2075 - .vecs = des3_ede_cbc_dec_tv_template,
2076 - .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2078 + .enc = __VECS(des3_ede_cbc_enc_tv_template),
2079 + .dec = __VECS(des3_ede_cbc_dec_tv_template)
2083 @@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_te
2084 .test = alg_test_skcipher,
2088 - .vecs = serpent_cbc_enc_tv_template,
2089 - .count = SERPENT_CBC_ENC_TEST_VECTORS
2092 - .vecs = serpent_cbc_dec_tv_template,
2093 - .count = SERPENT_CBC_DEC_TEST_VECTORS
2095 + .enc = __VECS(serpent_cbc_enc_tv_template),
2096 + .dec = __VECS(serpent_cbc_dec_tv_template)
2100 @@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_te
2101 .test = alg_test_skcipher,
2105 - .vecs = tf_cbc_enc_tv_template,
2106 - .count = TF_CBC_ENC_TEST_VECTORS
2109 - .vecs = tf_cbc_dec_tv_template,
2110 - .count = TF_CBC_DEC_TEST_VECTORS
2112 + .enc = __VECS(tf_cbc_enc_tv_template),
2113 + .dec = __VECS(tf_cbc_dec_tv_template)
2117 + .alg = "cbcmac(aes)",
2118 + .fips_allowed = 1,
2119 + .test = alg_test_hash,
2121 + .hash = __VECS(aes_cbcmac_tv_template)
2125 .test = alg_test_aead,
2130 - .vecs = aes_ccm_enc_tv_template,
2131 - .count = AES_CCM_ENC_TEST_VECTORS
2134 - .vecs = aes_ccm_dec_tv_template,
2135 - .count = AES_CCM_DEC_TEST_VECTORS
2137 + .enc = __VECS(aes_ccm_enc_tv_template),
2138 + .dec = __VECS(aes_ccm_dec_tv_template)
2142 @@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_te
2143 .test = alg_test_skcipher,
2147 - .vecs = chacha20_enc_tv_template,
2148 - .count = CHACHA20_ENC_TEST_VECTORS
2151 - .vecs = chacha20_enc_tv_template,
2152 - .count = CHACHA20_ENC_TEST_VECTORS
2154 + .enc = __VECS(chacha20_enc_tv_template),
2155 + .dec = __VECS(chacha20_enc_tv_template),
2159 @@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_te
2161 .test = alg_test_hash,
2164 - .vecs = aes_cmac128_tv_template,
2165 - .count = CMAC_AES_TEST_VECTORS
2167 + .hash = __VECS(aes_cmac128_tv_template)
2170 .alg = "cmac(des3_ede)",
2172 .test = alg_test_hash,
2175 - .vecs = des3_ede_cmac64_tv_template,
2176 - .count = CMAC_DES3_EDE_TEST_VECTORS
2178 + .hash = __VECS(des3_ede_cmac64_tv_template)
2181 .alg = "compress_null",
2182 @@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_te
2184 .test = alg_test_hash,
2187 - .vecs = crc32_tv_template,
2188 - .count = CRC32_TEST_VECTORS
2190 + .hash = __VECS(crc32_tv_template)
2194 .test = alg_test_crc32c,
2198 - .vecs = crc32c_tv_template,
2199 - .count = CRC32C_TEST_VECTORS
2201 + .hash = __VECS(crc32c_tv_template)
2205 .test = alg_test_hash,
2209 - .vecs = crct10dif_tv_template,
2210 - .count = CRCT10DIF_TEST_VECTORS
2212 + .hash = __VECS(crct10dif_tv_template)
2215 - .alg = "cryptd(__driver-cbc-aes-aesni)",
2216 - .test = alg_test_null,
2217 - .fips_allowed = 1,
2219 - .alg = "cryptd(__driver-cbc-camellia-aesni)",
2220 - .test = alg_test_null,
2222 - .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2223 - .test = alg_test_null,
2225 - .alg = "cryptd(__driver-cbc-serpent-avx2)",
2226 - .test = alg_test_null,
2228 - .alg = "cryptd(__driver-ecb-aes-aesni)",
2229 - .test = alg_test_null,
2230 - .fips_allowed = 1,
2232 - .alg = "cryptd(__driver-ecb-camellia-aesni)",
2233 - .test = alg_test_null,
2235 - .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2236 - .test = alg_test_null,
2238 - .alg = "cryptd(__driver-ecb-cast5-avx)",
2239 - .test = alg_test_null,
2241 - .alg = "cryptd(__driver-ecb-cast6-avx)",
2242 - .test = alg_test_null,
2244 - .alg = "cryptd(__driver-ecb-serpent-avx)",
2245 - .test = alg_test_null,
2247 - .alg = "cryptd(__driver-ecb-serpent-avx2)",
2248 - .test = alg_test_null,
2250 - .alg = "cryptd(__driver-ecb-serpent-sse2)",
2251 - .test = alg_test_null,
2253 - .alg = "cryptd(__driver-ecb-twofish-avx)",
2254 - .test = alg_test_null,
2256 - .alg = "cryptd(__driver-gcm-aes-aesni)",
2257 - .test = alg_test_null,
2258 - .fips_allowed = 1,
2260 - .alg = "cryptd(__ghash-pclmulqdqni)",
2261 - .test = alg_test_null,
2262 - .fips_allowed = 1,
2265 .test = alg_test_skcipher,
2270 - .vecs = aes_ctr_enc_tv_template,
2271 - .count = AES_CTR_ENC_TEST_VECTORS
2274 - .vecs = aes_ctr_dec_tv_template,
2275 - .count = AES_CTR_DEC_TEST_VECTORS
2277 + .enc = __VECS(aes_ctr_enc_tv_template),
2278 + .dec = __VECS(aes_ctr_dec_tv_template)
2282 @@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_te
2283 .test = alg_test_skcipher,
2287 - .vecs = bf_ctr_enc_tv_template,
2288 - .count = BF_CTR_ENC_TEST_VECTORS
2291 - .vecs = bf_ctr_dec_tv_template,
2292 - .count = BF_CTR_DEC_TEST_VECTORS
2294 + .enc = __VECS(bf_ctr_enc_tv_template),
2295 + .dec = __VECS(bf_ctr_dec_tv_template)
2299 @@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_te
2300 .test = alg_test_skcipher,
2304 - .vecs = camellia_ctr_enc_tv_template,
2305 - .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2308 - .vecs = camellia_ctr_dec_tv_template,
2309 - .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2311 + .enc = __VECS(camellia_ctr_enc_tv_template),
2312 + .dec = __VECS(camellia_ctr_dec_tv_template)
2316 @@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_te
2317 .test = alg_test_skcipher,
2321 - .vecs = cast5_ctr_enc_tv_template,
2322 - .count = CAST5_CTR_ENC_TEST_VECTORS
2325 - .vecs = cast5_ctr_dec_tv_template,
2326 - .count = CAST5_CTR_DEC_TEST_VECTORS
2328 + .enc = __VECS(cast5_ctr_enc_tv_template),
2329 + .dec = __VECS(cast5_ctr_dec_tv_template)
2333 @@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_te
2334 .test = alg_test_skcipher,
2338 - .vecs = cast6_ctr_enc_tv_template,
2339 - .count = CAST6_CTR_ENC_TEST_VECTORS
2342 - .vecs = cast6_ctr_dec_tv_template,
2343 - .count = CAST6_CTR_DEC_TEST_VECTORS
2345 + .enc = __VECS(cast6_ctr_enc_tv_template),
2346 + .dec = __VECS(cast6_ctr_dec_tv_template)
2350 @@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_te
2351 .test = alg_test_skcipher,
2355 - .vecs = des_ctr_enc_tv_template,
2356 - .count = DES_CTR_ENC_TEST_VECTORS
2359 - .vecs = des_ctr_dec_tv_template,
2360 - .count = DES_CTR_DEC_TEST_VECTORS
2362 + .enc = __VECS(des_ctr_enc_tv_template),
2363 + .dec = __VECS(des_ctr_dec_tv_template)
2367 .alg = "ctr(des3_ede)",
2368 .test = alg_test_skcipher,
2369 + .fips_allowed = 1,
2373 - .vecs = des3_ede_ctr_enc_tv_template,
2374 - .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2377 - .vecs = des3_ede_ctr_dec_tv_template,
2378 - .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2380 + .enc = __VECS(des3_ede_ctr_enc_tv_template),
2381 + .dec = __VECS(des3_ede_ctr_dec_tv_template)
2385 @@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_te
2386 .test = alg_test_skcipher,
2390 - .vecs = serpent_ctr_enc_tv_template,
2391 - .count = SERPENT_CTR_ENC_TEST_VECTORS
2394 - .vecs = serpent_ctr_dec_tv_template,
2395 - .count = SERPENT_CTR_DEC_TEST_VECTORS
2397 + .enc = __VECS(serpent_ctr_enc_tv_template),
2398 + .dec = __VECS(serpent_ctr_dec_tv_template)
2402 @@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_te
2403 .test = alg_test_skcipher,
2407 - .vecs = tf_ctr_enc_tv_template,
2408 - .count = TF_CTR_ENC_TEST_VECTORS
2411 - .vecs = tf_ctr_dec_tv_template,
2412 - .count = TF_CTR_DEC_TEST_VECTORS
2414 + .enc = __VECS(tf_ctr_enc_tv_template),
2415 + .dec = __VECS(tf_ctr_dec_tv_template)
2419 @@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_te
2420 .test = alg_test_skcipher,
2424 - .vecs = cts_mode_enc_tv_template,
2425 - .count = CTS_MODE_ENC_TEST_VECTORS
2428 - .vecs = cts_mode_dec_tv_template,
2429 - .count = CTS_MODE_DEC_TEST_VECTORS
2431 + .enc = __VECS(cts_mode_enc_tv_template),
2432 + .dec = __VECS(cts_mode_dec_tv_template)
2436 @@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_te
2441 - .vecs = deflate_comp_tv_template,
2442 - .count = DEFLATE_COMP_TEST_VECTORS
2445 - .vecs = deflate_decomp_tv_template,
2446 - .count = DEFLATE_DECOMP_TEST_VECTORS
2448 + .comp = __VECS(deflate_comp_tv_template),
2449 + .decomp = __VECS(deflate_decomp_tv_template)
2453 @@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_te
2454 .test = alg_test_kpp,
2458 - .vecs = dh_tv_template,
2459 - .count = DH_TEST_VECTORS
2461 + .kpp = __VECS(dh_tv_template)
2464 .alg = "digest_null",
2465 @@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_te
2466 .test = alg_test_drbg,
2470 - .vecs = drbg_nopr_ctr_aes128_tv_template,
2471 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2473 + .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2476 .alg = "drbg_nopr_ctr_aes192",
2477 .test = alg_test_drbg,
2481 - .vecs = drbg_nopr_ctr_aes192_tv_template,
2482 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2484 + .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2487 .alg = "drbg_nopr_ctr_aes256",
2488 .test = alg_test_drbg,
2492 - .vecs = drbg_nopr_ctr_aes256_tv_template,
2493 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2495 + .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2499 @@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_te
2500 .test = alg_test_drbg,
2504 - .vecs = drbg_nopr_hmac_sha256_tv_template,
2506 - ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2508 + .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2511 /* covered by drbg_nopr_hmac_sha256 test */
2512 @@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_te
2513 .test = alg_test_drbg,
2517 - .vecs = drbg_nopr_sha256_tv_template,
2518 - .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2520 + .drbg = __VECS(drbg_nopr_sha256_tv_template)
2523 /* covered by drbg_nopr_sha256 test */
2524 @@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_te
2525 .test = alg_test_drbg,
2529 - .vecs = drbg_pr_ctr_aes128_tv_template,
2530 - .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2532 + .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2535 /* covered by drbg_pr_ctr_aes128 test */
2536 @@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_te
2537 .test = alg_test_drbg,
2541 - .vecs = drbg_pr_hmac_sha256_tv_template,
2542 - .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2544 + .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2547 /* covered by drbg_pr_hmac_sha256 test */
2548 @@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_te
2549 .test = alg_test_drbg,
2553 - .vecs = drbg_pr_sha256_tv_template,
2554 - .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2556 + .drbg = __VECS(drbg_pr_sha256_tv_template)
2559 /* covered by drbg_pr_sha256 test */
2560 @@ -3034,23 +3056,13 @@ static const struct alg_test_desc alg_te
2562 .test = alg_test_null,
2564 - .alg = "ecb(__aes-aesni)",
2565 - .test = alg_test_null,
2566 - .fips_allowed = 1,
2569 .test = alg_test_skcipher,
2574 - .vecs = aes_enc_tv_template,
2575 - .count = AES_ENC_TEST_VECTORS
2578 - .vecs = aes_dec_tv_template,
2579 - .count = AES_DEC_TEST_VECTORS
2581 + .enc = __VECS(aes_enc_tv_template),
2582 + .dec = __VECS(aes_dec_tv_template)
2586 @@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_te
2587 .test = alg_test_skcipher,
2591 - .vecs = anubis_enc_tv_template,
2592 - .count = ANUBIS_ENC_TEST_VECTORS
2595 - .vecs = anubis_dec_tv_template,
2596 - .count = ANUBIS_DEC_TEST_VECTORS
2598 + .enc = __VECS(anubis_enc_tv_template),
2599 + .dec = __VECS(anubis_dec_tv_template)
2603 @@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_te
2604 .test = alg_test_skcipher,
2608 - .vecs = arc4_enc_tv_template,
2609 - .count = ARC4_ENC_TEST_VECTORS
2612 - .vecs = arc4_dec_tv_template,
2613 - .count = ARC4_DEC_TEST_VECTORS
2615 + .enc = __VECS(arc4_enc_tv_template),
2616 + .dec = __VECS(arc4_dec_tv_template)
2620 @@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_te
2621 .test = alg_test_skcipher,
2625 - .vecs = bf_enc_tv_template,
2626 - .count = BF_ENC_TEST_VECTORS
2629 - .vecs = bf_dec_tv_template,
2630 - .count = BF_DEC_TEST_VECTORS
2632 + .enc = __VECS(bf_enc_tv_template),
2633 + .dec = __VECS(bf_dec_tv_template)
2637 @@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_te
2638 .test = alg_test_skcipher,
2642 - .vecs = camellia_enc_tv_template,
2643 - .count = CAMELLIA_ENC_TEST_VECTORS
2646 - .vecs = camellia_dec_tv_template,
2647 - .count = CAMELLIA_DEC_TEST_VECTORS
2649 + .enc = __VECS(camellia_enc_tv_template),
2650 + .dec = __VECS(camellia_dec_tv_template)
2654 @@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_te
2655 .test = alg_test_skcipher,
2659 - .vecs = cast5_enc_tv_template,
2660 - .count = CAST5_ENC_TEST_VECTORS
2663 - .vecs = cast5_dec_tv_template,
2664 - .count = CAST5_DEC_TEST_VECTORS
2666 + .enc = __VECS(cast5_enc_tv_template),
2667 + .dec = __VECS(cast5_dec_tv_template)
2671 @@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_te
2672 .test = alg_test_skcipher,
2676 - .vecs = cast6_enc_tv_template,
2677 - .count = CAST6_ENC_TEST_VECTORS
2680 - .vecs = cast6_dec_tv_template,
2681 - .count = CAST6_DEC_TEST_VECTORS
2683 + .enc = __VECS(cast6_enc_tv_template),
2684 + .dec = __VECS(cast6_dec_tv_template)
2688 @@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_te
2689 .test = alg_test_skcipher,
2693 - .vecs = des_enc_tv_template,
2694 - .count = DES_ENC_TEST_VECTORS
2697 - .vecs = des_dec_tv_template,
2698 - .count = DES_DEC_TEST_VECTORS
2700 + .enc = __VECS(des_enc_tv_template),
2701 + .dec = __VECS(des_dec_tv_template)
2705 @@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_te
2710 - .vecs = des3_ede_enc_tv_template,
2711 - .count = DES3_EDE_ENC_TEST_VECTORS
2714 - .vecs = des3_ede_dec_tv_template,
2715 - .count = DES3_EDE_DEC_TEST_VECTORS
2717 + .enc = __VECS(des3_ede_enc_tv_template),
2718 + .dec = __VECS(des3_ede_dec_tv_template)
2722 @@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_te
2723 .test = alg_test_skcipher,
2727 - .vecs = khazad_enc_tv_template,
2728 - .count = KHAZAD_ENC_TEST_VECTORS
2731 - .vecs = khazad_dec_tv_template,
2732 - .count = KHAZAD_DEC_TEST_VECTORS
2734 + .enc = __VECS(khazad_enc_tv_template),
2735 + .dec = __VECS(khazad_dec_tv_template)
2739 @@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_te
2740 .test = alg_test_skcipher,
2744 - .vecs = seed_enc_tv_template,
2745 - .count = SEED_ENC_TEST_VECTORS
2748 - .vecs = seed_dec_tv_template,
2749 - .count = SEED_DEC_TEST_VECTORS
2751 + .enc = __VECS(seed_enc_tv_template),
2752 + .dec = __VECS(seed_dec_tv_template)
2756 @@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_te
2757 .test = alg_test_skcipher,
2761 - .vecs = serpent_enc_tv_template,
2762 - .count = SERPENT_ENC_TEST_VECTORS
2765 - .vecs = serpent_dec_tv_template,
2766 - .count = SERPENT_DEC_TEST_VECTORS
2768 + .enc = __VECS(serpent_enc_tv_template),
2769 + .dec = __VECS(serpent_dec_tv_template)
2773 @@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_te
2774 .test = alg_test_skcipher,
2778 - .vecs = tea_enc_tv_template,
2779 - .count = TEA_ENC_TEST_VECTORS
2782 - .vecs = tea_dec_tv_template,
2783 - .count = TEA_DEC_TEST_VECTORS
2785 + .enc = __VECS(tea_enc_tv_template),
2786 + .dec = __VECS(tea_dec_tv_template)
2790 @@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_te
2791 .test = alg_test_skcipher,
2795 - .vecs = tnepres_enc_tv_template,
2796 - .count = TNEPRES_ENC_TEST_VECTORS
2799 - .vecs = tnepres_dec_tv_template,
2800 - .count = TNEPRES_DEC_TEST_VECTORS
2802 + .enc = __VECS(tnepres_enc_tv_template),
2803 + .dec = __VECS(tnepres_dec_tv_template)
2807 @@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_te
2808 .test = alg_test_skcipher,
2812 - .vecs = tf_enc_tv_template,
2813 - .count = TF_ENC_TEST_VECTORS
2816 - .vecs = tf_dec_tv_template,
2817 - .count = TF_DEC_TEST_VECTORS
2819 + .enc = __VECS(tf_enc_tv_template),
2820 + .dec = __VECS(tf_dec_tv_template)
2824 @@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_te
2825 .test = alg_test_skcipher,
2829 - .vecs = xeta_enc_tv_template,
2830 - .count = XETA_ENC_TEST_VECTORS
2833 - .vecs = xeta_dec_tv_template,
2834 - .count = XETA_DEC_TEST_VECTORS
2836 + .enc = __VECS(xeta_enc_tv_template),
2837 + .dec = __VECS(xeta_dec_tv_template)
2841 @@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_te
2842 .test = alg_test_skcipher,
2846 - .vecs = xtea_enc_tv_template,
2847 - .count = XTEA_ENC_TEST_VECTORS
2850 - .vecs = xtea_dec_tv_template,
2851 - .count = XTEA_DEC_TEST_VECTORS
2853 + .enc = __VECS(xtea_enc_tv_template),
2854 + .dec = __VECS(xtea_dec_tv_template)
2858 @@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_te
2859 .test = alg_test_kpp,
2863 - .vecs = ecdh_tv_template,
2864 - .count = ECDH_TEST_VECTORS
2866 + .kpp = __VECS(ecdh_tv_template)
2870 @@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_te
2875 - .vecs = aes_gcm_enc_tv_template,
2876 - .count = AES_GCM_ENC_TEST_VECTORS
2879 - .vecs = aes_gcm_dec_tv_template,
2880 - .count = AES_GCM_DEC_TEST_VECTORS
2882 + .enc = __VECS(aes_gcm_enc_tv_template),
2883 + .dec = __VECS(aes_gcm_dec_tv_template)
2887 @@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_te
2888 .test = alg_test_hash,
2892 - .vecs = ghash_tv_template,
2893 - .count = GHASH_TEST_VECTORS
2895 + .hash = __VECS(ghash_tv_template)
2898 .alg = "hmac(crc32)",
2899 .test = alg_test_hash,
2902 - .vecs = bfin_crc_tv_template,
2903 - .count = BFIN_CRC_TEST_VECTORS
2905 + .hash = __VECS(bfin_crc_tv_template)
2909 .test = alg_test_hash,
2912 - .vecs = hmac_md5_tv_template,
2913 - .count = HMAC_MD5_TEST_VECTORS
2915 + .hash = __VECS(hmac_md5_tv_template)
2918 .alg = "hmac(rmd128)",
2919 .test = alg_test_hash,
2922 - .vecs = hmac_rmd128_tv_template,
2923 - .count = HMAC_RMD128_TEST_VECTORS
2925 + .hash = __VECS(hmac_rmd128_tv_template)
2928 .alg = "hmac(rmd160)",
2929 .test = alg_test_hash,
2932 - .vecs = hmac_rmd160_tv_template,
2933 - .count = HMAC_RMD160_TEST_VECTORS
2935 + .hash = __VECS(hmac_rmd160_tv_template)
2938 .alg = "hmac(sha1)",
2939 .test = alg_test_hash,
2943 - .vecs = hmac_sha1_tv_template,
2944 - .count = HMAC_SHA1_TEST_VECTORS
2946 + .hash = __VECS(hmac_sha1_tv_template)
2949 .alg = "hmac(sha224)",
2950 .test = alg_test_hash,
2954 - .vecs = hmac_sha224_tv_template,
2955 - .count = HMAC_SHA224_TEST_VECTORS
2957 + .hash = __VECS(hmac_sha224_tv_template)
2960 .alg = "hmac(sha256)",
2961 .test = alg_test_hash,
2965 - .vecs = hmac_sha256_tv_template,
2966 - .count = HMAC_SHA256_TEST_VECTORS
2968 + .hash = __VECS(hmac_sha256_tv_template)
2971 .alg = "hmac(sha3-224)",
2972 .test = alg_test_hash,
2976 - .vecs = hmac_sha3_224_tv_template,
2977 - .count = HMAC_SHA3_224_TEST_VECTORS
2979 + .hash = __VECS(hmac_sha3_224_tv_template)
2982 .alg = "hmac(sha3-256)",
2983 .test = alg_test_hash,
2987 - .vecs = hmac_sha3_256_tv_template,
2988 - .count = HMAC_SHA3_256_TEST_VECTORS
2990 + .hash = __VECS(hmac_sha3_256_tv_template)
2993 .alg = "hmac(sha3-384)",
2994 .test = alg_test_hash,
2998 - .vecs = hmac_sha3_384_tv_template,
2999 - .count = HMAC_SHA3_384_TEST_VECTORS
3001 + .hash = __VECS(hmac_sha3_384_tv_template)
3004 .alg = "hmac(sha3-512)",
3005 .test = alg_test_hash,
3009 - .vecs = hmac_sha3_512_tv_template,
3010 - .count = HMAC_SHA3_512_TEST_VECTORS
3012 + .hash = __VECS(hmac_sha3_512_tv_template)
3015 .alg = "hmac(sha384)",
3016 .test = alg_test_hash,
3020 - .vecs = hmac_sha384_tv_template,
3021 - .count = HMAC_SHA384_TEST_VECTORS
3023 + .hash = __VECS(hmac_sha384_tv_template)
3026 .alg = "hmac(sha512)",
3027 .test = alg_test_hash,
3031 - .vecs = hmac_sha512_tv_template,
3032 - .count = HMAC_SHA512_TEST_VECTORS
3034 + .hash = __VECS(hmac_sha512_tv_template)
3037 .alg = "jitterentropy_rng",
3038 @@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_te
3043 - .vecs = aes_kw_enc_tv_template,
3044 - .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3047 - .vecs = aes_kw_dec_tv_template,
3048 - .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3050 + .enc = __VECS(aes_kw_enc_tv_template),
3051 + .dec = __VECS(aes_kw_dec_tv_template)
3055 @@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_te
3056 .test = alg_test_skcipher,
3060 - .vecs = aes_lrw_enc_tv_template,
3061 - .count = AES_LRW_ENC_TEST_VECTORS
3064 - .vecs = aes_lrw_dec_tv_template,
3065 - .count = AES_LRW_DEC_TEST_VECTORS
3067 + .enc = __VECS(aes_lrw_enc_tv_template),
3068 + .dec = __VECS(aes_lrw_dec_tv_template)
3072 @@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_te
3073 .test = alg_test_skcipher,
3077 - .vecs = camellia_lrw_enc_tv_template,
3078 - .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3081 - .vecs = camellia_lrw_dec_tv_template,
3082 - .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3084 + .enc = __VECS(camellia_lrw_enc_tv_template),
3085 + .dec = __VECS(camellia_lrw_dec_tv_template)
3089 @@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_te
3090 .test = alg_test_skcipher,
3094 - .vecs = cast6_lrw_enc_tv_template,
3095 - .count = CAST6_LRW_ENC_TEST_VECTORS
3098 - .vecs = cast6_lrw_dec_tv_template,
3099 - .count = CAST6_LRW_DEC_TEST_VECTORS
3101 + .enc = __VECS(cast6_lrw_enc_tv_template),
3102 + .dec = __VECS(cast6_lrw_dec_tv_template)
3106 @@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_te
3107 .test = alg_test_skcipher,
3111 - .vecs = serpent_lrw_enc_tv_template,
3112 - .count = SERPENT_LRW_ENC_TEST_VECTORS
3115 - .vecs = serpent_lrw_dec_tv_template,
3116 - .count = SERPENT_LRW_DEC_TEST_VECTORS
3118 + .enc = __VECS(serpent_lrw_enc_tv_template),
3119 + .dec = __VECS(serpent_lrw_dec_tv_template)
3123 @@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_te
3124 .test = alg_test_skcipher,
3128 - .vecs = tf_lrw_enc_tv_template,
3129 - .count = TF_LRW_ENC_TEST_VECTORS
3132 - .vecs = tf_lrw_dec_tv_template,
3133 - .count = TF_LRW_DEC_TEST_VECTORS
3135 + .enc = __VECS(tf_lrw_enc_tv_template),
3136 + .dec = __VECS(tf_lrw_dec_tv_template)
3140 @@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_te
3145 - .vecs = lz4_comp_tv_template,
3146 - .count = LZ4_COMP_TEST_VECTORS
3149 - .vecs = lz4_decomp_tv_template,
3150 - .count = LZ4_DECOMP_TEST_VECTORS
3152 + .comp = __VECS(lz4_comp_tv_template),
3153 + .decomp = __VECS(lz4_decomp_tv_template)
3157 @@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_te
3162 - .vecs = lz4hc_comp_tv_template,
3163 - .count = LZ4HC_COMP_TEST_VECTORS
3166 - .vecs = lz4hc_decomp_tv_template,
3167 - .count = LZ4HC_DECOMP_TEST_VECTORS
3169 + .comp = __VECS(lz4hc_comp_tv_template),
3170 + .decomp = __VECS(lz4hc_decomp_tv_template)
3174 @@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_te
3179 - .vecs = lzo_comp_tv_template,
3180 - .count = LZO_COMP_TEST_VECTORS
3183 - .vecs = lzo_decomp_tv_template,
3184 - .count = LZO_DECOMP_TEST_VECTORS
3186 + .comp = __VECS(lzo_comp_tv_template),
3187 + .decomp = __VECS(lzo_decomp_tv_template)
3192 .test = alg_test_hash,
3195 - .vecs = md4_tv_template,
3196 - .count = MD4_TEST_VECTORS
3198 + .hash = __VECS(md4_tv_template)
3202 .test = alg_test_hash,
3205 - .vecs = md5_tv_template,
3206 - .count = MD5_TEST_VECTORS
3208 + .hash = __VECS(md5_tv_template)
3211 .alg = "michael_mic",
3212 .test = alg_test_hash,
3215 - .vecs = michael_mic_tv_template,
3216 - .count = MICHAEL_MIC_TEST_VECTORS
3218 + .hash = __VECS(michael_mic_tv_template)
3222 @@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_te
3227 - .vecs = aes_ofb_enc_tv_template,
3228 - .count = AES_OFB_ENC_TEST_VECTORS
3231 - .vecs = aes_ofb_dec_tv_template,
3232 - .count = AES_OFB_DEC_TEST_VECTORS
3234 + .enc = __VECS(aes_ofb_enc_tv_template),
3235 + .dec = __VECS(aes_ofb_dec_tv_template)
3239 @@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_te
3240 .test = alg_test_skcipher,
3244 - .vecs = fcrypt_pcbc_enc_tv_template,
3245 - .count = FCRYPT_ENC_TEST_VECTORS
3248 - .vecs = fcrypt_pcbc_dec_tv_template,
3249 - .count = FCRYPT_DEC_TEST_VECTORS
3251 + .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3252 + .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3257 .test = alg_test_hash,
3260 - .vecs = poly1305_tv_template,
3261 - .count = POLY1305_TEST_VECTORS
3263 + .hash = __VECS(poly1305_tv_template)
3266 .alg = "rfc3686(ctr(aes))",
3267 @@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_te
3272 - .vecs = aes_ctr_rfc3686_enc_tv_template,
3273 - .count = AES_CTR_3686_ENC_TEST_VECTORS
3276 - .vecs = aes_ctr_rfc3686_dec_tv_template,
3277 - .count = AES_CTR_3686_DEC_TEST_VECTORS
3279 + .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3280 + .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3284 @@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_te
3289 - .vecs = aes_gcm_rfc4106_enc_tv_template,
3290 - .count = AES_GCM_4106_ENC_TEST_VECTORS
3293 - .vecs = aes_gcm_rfc4106_dec_tv_template,
3294 - .count = AES_GCM_4106_DEC_TEST_VECTORS
3296 + .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3297 + .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3301 @@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_te
3306 - .vecs = aes_ccm_rfc4309_enc_tv_template,
3307 - .count = AES_CCM_4309_ENC_TEST_VECTORS
3310 - .vecs = aes_ccm_rfc4309_dec_tv_template,
3311 - .count = AES_CCM_4309_DEC_TEST_VECTORS
3313 + .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3314 + .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3318 @@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_te
3319 .test = alg_test_aead,
3323 - .vecs = aes_gcm_rfc4543_enc_tv_template,
3324 - .count = AES_GCM_4543_ENC_TEST_VECTORS
3327 - .vecs = aes_gcm_rfc4543_dec_tv_template,
3328 - .count = AES_GCM_4543_DEC_TEST_VECTORS
3330 + .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3331 + .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3335 @@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_te
3336 .test = alg_test_aead,
3340 - .vecs = rfc7539_enc_tv_template,
3341 - .count = RFC7539_ENC_TEST_VECTORS
3344 - .vecs = rfc7539_dec_tv_template,
3345 - .count = RFC7539_DEC_TEST_VECTORS
3347 + .enc = __VECS(rfc7539_enc_tv_template),
3348 + .dec = __VECS(rfc7539_dec_tv_template),
3352 @@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_te
3353 .test = alg_test_aead,
3357 - .vecs = rfc7539esp_enc_tv_template,
3358 - .count = RFC7539ESP_ENC_TEST_VECTORS
3361 - .vecs = rfc7539esp_dec_tv_template,
3362 - .count = RFC7539ESP_DEC_TEST_VECTORS
3364 + .enc = __VECS(rfc7539esp_enc_tv_template),
3365 + .dec = __VECS(rfc7539esp_dec_tv_template),
3370 .test = alg_test_hash,
3373 - .vecs = rmd128_tv_template,
3374 - .count = RMD128_TEST_VECTORS
3376 + .hash = __VECS(rmd128_tv_template)
3380 .test = alg_test_hash,
3383 - .vecs = rmd160_tv_template,
3384 - .count = RMD160_TEST_VECTORS
3386 + .hash = __VECS(rmd160_tv_template)
3390 .test = alg_test_hash,
3393 - .vecs = rmd256_tv_template,
3394 - .count = RMD256_TEST_VECTORS
3396 + .hash = __VECS(rmd256_tv_template)
3400 .test = alg_test_hash,
3403 - .vecs = rmd320_tv_template,
3404 - .count = RMD320_TEST_VECTORS
3406 + .hash = __VECS(rmd320_tv_template)
3410 .test = alg_test_akcipher,
3414 - .vecs = rsa_tv_template,
3415 - .count = RSA_TEST_VECTORS
3417 + .akcipher = __VECS(rsa_tv_template)
3421 .test = alg_test_skcipher,
3425 - .vecs = salsa20_stream_enc_tv_template,
3426 - .count = SALSA20_STREAM_ENC_TEST_VECTORS
3428 + .enc = __VECS(salsa20_stream_enc_tv_template)
3432 @@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_te
3433 .test = alg_test_hash,
3437 - .vecs = sha1_tv_template,
3438 - .count = SHA1_TEST_VECTORS
3440 + .hash = __VECS(sha1_tv_template)
3444 .test = alg_test_hash,
3448 - .vecs = sha224_tv_template,
3449 - .count = SHA224_TEST_VECTORS
3451 + .hash = __VECS(sha224_tv_template)
3455 .test = alg_test_hash,
3459 - .vecs = sha256_tv_template,
3460 - .count = SHA256_TEST_VECTORS
3462 + .hash = __VECS(sha256_tv_template)
3466 .test = alg_test_hash,
3470 - .vecs = sha3_224_tv_template,
3471 - .count = SHA3_224_TEST_VECTORS
3473 + .hash = __VECS(sha3_224_tv_template)
3477 .test = alg_test_hash,
3481 - .vecs = sha3_256_tv_template,
3482 - .count = SHA3_256_TEST_VECTORS
3484 + .hash = __VECS(sha3_256_tv_template)
3488 .test = alg_test_hash,
3492 - .vecs = sha3_384_tv_template,
3493 - .count = SHA3_384_TEST_VECTORS
3495 + .hash = __VECS(sha3_384_tv_template)
3499 .test = alg_test_hash,
3503 - .vecs = sha3_512_tv_template,
3504 - .count = SHA3_512_TEST_VECTORS
3506 + .hash = __VECS(sha3_512_tv_template)
3510 .test = alg_test_hash,
3514 - .vecs = sha384_tv_template,
3515 - .count = SHA384_TEST_VECTORS
3517 + .hash = __VECS(sha384_tv_template)
3521 .test = alg_test_hash,
3525 - .vecs = sha512_tv_template,
3526 - .count = SHA512_TEST_VECTORS
3528 + .hash = __VECS(sha512_tv_template)
3532 .test = alg_test_hash,
3535 - .vecs = tgr128_tv_template,
3536 - .count = TGR128_TEST_VECTORS
3538 + .hash = __VECS(tgr128_tv_template)
3542 .test = alg_test_hash,
3545 - .vecs = tgr160_tv_template,
3546 - .count = TGR160_TEST_VECTORS
3548 + .hash = __VECS(tgr160_tv_template)
3552 .test = alg_test_hash,
3555 - .vecs = tgr192_tv_template,
3556 - .count = TGR192_TEST_VECTORS
3557 + .hash = __VECS(tgr192_tv_template)
3560 + .alg = "tls10(hmac(sha1),cbc(aes))",
3561 + .test = alg_test_tls,
3564 + .enc = __VECS(tls_enc_tv_template),
3565 + .dec = __VECS(tls_dec_tv_template)
3570 .test = alg_test_hash,
3573 - .vecs = aes_vmac128_tv_template,
3574 - .count = VMAC_AES_TEST_VECTORS
3576 + .hash = __VECS(aes_vmac128_tv_template)
3580 .test = alg_test_hash,
3583 - .vecs = wp256_tv_template,
3584 - .count = WP256_TEST_VECTORS
3586 + .hash = __VECS(wp256_tv_template)
3590 .test = alg_test_hash,
3593 - .vecs = wp384_tv_template,
3594 - .count = WP384_TEST_VECTORS
3596 + .hash = __VECS(wp384_tv_template)
3600 .test = alg_test_hash,
3603 - .vecs = wp512_tv_template,
3604 - .count = WP512_TEST_VECTORS
3606 + .hash = __VECS(wp512_tv_template)
3610 .test = alg_test_hash,
3613 - .vecs = aes_xcbc128_tv_template,
3614 - .count = XCBC_AES_TEST_VECTORS
3616 + .hash = __VECS(aes_xcbc128_tv_template)
3620 @@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_te
3625 - .vecs = aes_xts_enc_tv_template,
3626 - .count = AES_XTS_ENC_TEST_VECTORS
3629 - .vecs = aes_xts_dec_tv_template,
3630 - .count = AES_XTS_DEC_TEST_VECTORS
3632 + .enc = __VECS(aes_xts_enc_tv_template),
3633 + .dec = __VECS(aes_xts_dec_tv_template)
3637 @@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_te
3638 .test = alg_test_skcipher,
3642 - .vecs = camellia_xts_enc_tv_template,
3643 - .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3646 - .vecs = camellia_xts_dec_tv_template,
3647 - .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3649 + .enc = __VECS(camellia_xts_enc_tv_template),
3650 + .dec = __VECS(camellia_xts_dec_tv_template)
3654 @@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_te
3655 .test = alg_test_skcipher,
3659 - .vecs = cast6_xts_enc_tv_template,
3660 - .count = CAST6_XTS_ENC_TEST_VECTORS
3663 - .vecs = cast6_xts_dec_tv_template,
3664 - .count = CAST6_XTS_DEC_TEST_VECTORS
3666 + .enc = __VECS(cast6_xts_enc_tv_template),
3667 + .dec = __VECS(cast6_xts_dec_tv_template)
3671 @@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_te
3672 .test = alg_test_skcipher,
3676 - .vecs = serpent_xts_enc_tv_template,
3677 - .count = SERPENT_XTS_ENC_TEST_VECTORS
3680 - .vecs = serpent_xts_dec_tv_template,
3681 - .count = SERPENT_XTS_DEC_TEST_VECTORS
3683 + .enc = __VECS(serpent_xts_enc_tv_template),
3684 + .dec = __VECS(serpent_xts_dec_tv_template)
3688 @@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_te
3689 .test = alg_test_skcipher,
3693 - .vecs = tf_xts_enc_tv_template,
3694 - .count = TF_XTS_ENC_TEST_VECTORS
3697 - .vecs = tf_xts_dec_tv_template,
3698 - .count = TF_XTS_DEC_TEST_VECTORS
3700 + .enc = __VECS(tf_xts_enc_tv_template),
3701 + .dec = __VECS(tf_xts_dec_tv_template)
3705 --- a/crypto/testmgr.h
3706 +++ b/crypto/testmgr.h
3709 struct hash_testvec {
3710 /* only used with keyed hash algorithms */
3715 + const char *plaintext;
3716 + const char *digest;
3717 unsigned char tap[MAX_TAP];
3718 unsigned short psize;
3720 @@ -63,11 +63,11 @@ struct hash_testvec {
3723 struct cipher_testvec {
3731 + const char *iv_out;
3732 + const char *input;
3733 + const char *result;
3734 unsigned short tap[MAX_TAP];
3736 unsigned char also_non_np;
3737 @@ -80,11 +80,11 @@ struct cipher_testvec {
3740 struct aead_testvec {
3748 + const char *input;
3749 + const char *assoc;
3750 + const char *result;
3751 unsigned char tap[MAX_TAP];
3752 unsigned char atap[MAX_TAP];
3754 @@ -99,10 +99,10 @@ struct aead_testvec {
3757 struct cprng_testvec {
3765 + const char *result;
3767 unsigned short dtlen;
3768 unsigned short vlen;
3769 @@ -111,24 +111,38 @@ struct cprng_testvec {
3772 struct drbg_testvec {
3773 - unsigned char *entropy;
3774 + const unsigned char *entropy;
3776 - unsigned char *entpra;
3777 - unsigned char *entprb;
3778 + const unsigned char *entpra;
3779 + const unsigned char *entprb;
3781 - unsigned char *addtla;
3782 - unsigned char *addtlb;
3783 + const unsigned char *addtla;
3784 + const unsigned char *addtlb;
3786 - unsigned char *pers;
3787 + const unsigned char *pers;
3789 - unsigned char *expected;
3790 + const unsigned char *expected;
3794 +struct tls_testvec {
3795 + char *key; /* wrapped keys for encryption and authentication */
3796 + char *iv; /* initialization vector */
3797 + char *input; /* input data */
3798 + char *assoc; /* associated data: seq num, type, version, input len */
3799 + char *result; /* result data */
3800 + unsigned char fail; /* the test failure is expected */
3801 + unsigned char novrfy; /* dec verification failure expected */
3802 + unsigned char klen; /* key length */
3803 + unsigned short ilen; /* input data length */
3804 + unsigned short alen; /* associated data length */
3805 + unsigned short rlen; /* result length */
3808 struct akcipher_testvec {
3809 - unsigned char *key;
3812 + const unsigned char *key;
3813 + const unsigned char *m;
3814 + const unsigned char *c;
3815 unsigned int key_len;
3816 unsigned int m_size;
3817 unsigned int c_size;
3818 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3821 struct kpp_testvec {
3822 - unsigned char *secret;
3823 - unsigned char *b_public;
3824 - unsigned char *expected_a_public;
3825 - unsigned char *expected_ss;
3826 + const unsigned char *secret;
3827 + const unsigned char *b_public;
3828 + const unsigned char *expected_a_public;
3829 + const unsigned char *expected_ss;
3830 unsigned short secret_size;
3831 unsigned short b_public_size;
3832 unsigned short expected_a_public_size;
3833 unsigned short expected_ss_size;
3836 -static char zeroed_string[48];
3837 +static const char zeroed_string[48];
3840 - * RSA test vectors. Borrowed from openSSL.
3841 + * TLS1.0 synthetic test vectors
3843 -#ifdef CONFIG_CRYPTO_FIPS
3844 -#define RSA_TEST_VECTORS 2
3845 +static struct tls_testvec tls_enc_tv_template[] = {
3847 +#ifdef __LITTLE_ENDIAN
3848 + .key = "\x08\x00" /* rta length */
3849 + "\x01\x00" /* rta type */
3851 + .key = "\x00\x08" /* rta length */
3852 + "\x00\x01" /* rta type */
3854 + "\x00\x00\x00\x10" /* enc key length */
3855 + "authenticationkey20benckeyis16_bytes",
3856 + .klen = 8 + 20 + 16,
3857 + .iv = "iv0123456789abcd",
3858 + .input = "Single block msg",
3860 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3861 + "\x00\x03\x01\x00\x10",
3863 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3864 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3865 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3866 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3867 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3868 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3869 + .rlen = 16 + 20 + 12,
3871 +#ifdef __LITTLE_ENDIAN
3872 + .key = "\x08\x00" /* rta length */
3873 + "\x01\x00" /* rta type */
3875 + .key = "\x00\x08" /* rta length */
3876 + "\x00\x01" /* rta type */
3878 + "\x00\x00\x00\x10" /* enc key length */
3879 + "authenticationkey20benckeyis16_bytes",
3880 + .klen = 8 + 20 + 16,
3881 + .iv = "iv0123456789abcd",
3884 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3885 + "\x00\x03\x01\x00\x00",
3887 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3888 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3889 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3890 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3893 +#ifdef __LITTLE_ENDIAN
3894 + .key = "\x08\x00" /* rta length */
3895 + "\x01\x00" /* rta type */
3897 + .key = "\x00\x08" /* rta length */
3898 + "\x00\x01" /* rta type */
3900 + "\x00\x00\x00\x10" /* enc key length */
3901 + "authenticationkey20benckeyis16_bytes",
3902 + .klen = 8 + 20 + 16,
3903 + .iv = "iv0123456789abcd",
3904 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
3905 + " plaintext285 bytes plaintext285 bytes plaintext285"
3906 + " bytes plaintext285 bytes plaintext285 bytes"
3907 + " plaintext285 bytes plaintext285 bytes plaintext285"
3908 + " bytes plaintext285 bytes plaintext285 bytes"
3909 + " plaintext285 bytes plaintext285 bytes plaintext285"
3910 + " bytes plaintext285 bytes plaintext",
3912 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3913 + "\x00\x03\x01\x01\x1d",
3915 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3916 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3917 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3918 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3919 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3920 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3921 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3922 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3923 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3924 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3925 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3926 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3927 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3928 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3929 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3930 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3931 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3932 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3933 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3934 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3935 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3936 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3937 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3938 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3939 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3940 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3941 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3942 + .rlen = 285 + 20 + 15,
3946 +static struct tls_testvec tls_dec_tv_template[] = {
3948 +#ifdef __LITTLE_ENDIAN
3949 + .key = "\x08\x00" /* rta length */
3950 + "\x01\x00" /* rta type */
3952 + .key = "\x00\x08" /* rta length */
3953 + "\x00\x01" /* rta type */
3955 + "\x00\x00\x00\x10" /* enc key length */
3956 + "authenticationkey20benckeyis16_bytes",
3957 + .klen = 8 + 20 + 16,
3958 + .iv = "iv0123456789abcd",
3959 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3960 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3961 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3962 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3963 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3964 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3965 + .ilen = 16 + 20 + 12,
3966 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3967 + "\x00\x03\x01\x00\x30",
3969 + .result = "Single block msg",
3972 +#ifdef __LITTLE_ENDIAN
3973 + .key = "\x08\x00" /* rta length */
3974 + "\x01\x00" /* rta type */
3976 -#define RSA_TEST_VECTORS 5
3977 + .key = "\x00\x08" /* rta length */
3978 + "\x00\x01" /* rta type */
3980 -static struct akcipher_testvec rsa_tv_template[] = {
3981 + "\x00\x00\x00\x10" /* enc key length */
3982 + "authenticationkey20benckeyis16_bytes",
3983 + .klen = 8 + 20 + 16,
3984 + .iv = "iv0123456789abcd",
3985 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3986 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3987 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3988 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3990 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3991 + "\x00\x03\x01\x00\x20",
3996 +#ifdef __LITTLE_ENDIAN
3997 + .key = "\x08\x00" /* rta length */
3998 + "\x01\x00" /* rta type */
4000 + .key = "\x00\x08" /* rta length */
4001 + "\x00\x01" /* rta type */
4003 + "\x00\x00\x00\x10" /* enc key length */
4004 + "authenticationkey20benckeyis16_bytes",
4005 + .klen = 8 + 20 + 16,
4006 + .iv = "iv0123456789abcd",
4007 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4008 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4009 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4010 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4011 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4012 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4013 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4014 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4015 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4016 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4017 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4018 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4019 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4020 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4021 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4022 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4023 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4024 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4025 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4026 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4027 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4028 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4029 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4030 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4031 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4032 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4033 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4035 + .ilen = 285 + 20 + 15,
4036 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4037 + "\x00\x03\x01\x01\x40",
4039 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4040 + " plaintext285 bytes plaintext285 bytes plaintext285"
4041 + " bytes plaintext285 bytes plaintext285 bytes"
4042 + " plaintext285 bytes plaintext285 bytes plaintext285"
4043 + " bytes plaintext285 bytes plaintext285 bytes"
4044 + " plaintext285 bytes plaintext285 bytes plaintext",
4050 + * RSA test vectors. Borrowed from openSSL.
4052 +static const struct akcipher_testvec rsa_tv_template[] = {
4054 #ifndef CONFIG_CRYPTO_FIPS
4056 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_te
4059 .public_key_vec = true,
4060 +#ifndef CONFIG_CRYPTO_FIPS
4063 "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4064 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_te
4072 -#define DH_TEST_VECTORS 2
4074 -struct kpp_testvec dh_tv_template[] = {
4075 +static const struct kpp_testvec dh_tv_template[] = {
4078 #ifdef __LITTLE_ENDIAN
4079 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4083 -#ifdef CONFIG_CRYPTO_FIPS
4084 -#define ECDH_TEST_VECTORS 1
4086 -#define ECDH_TEST_VECTORS 2
4088 -struct kpp_testvec ecdh_tv_template[] = {
4089 +static const struct kpp_testvec ecdh_tv_template[] = {
4091 #ifndef CONFIG_CRYPTO_FIPS
4093 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] =
4095 * MD4 test vectors from RFC1320
4097 -#define MD4_TEST_VECTORS 7
4099 -static struct hash_testvec md4_tv_template [] = {
4100 +static const struct hash_testvec md4_tv_template[] = {
4103 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4104 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_templa
4108 -#define SHA3_224_TEST_VECTORS 3
4109 -static struct hash_testvec sha3_224_tv_template[] = {
4110 +static const struct hash_testvec sha3_224_tv_template[] = {
4113 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4114 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_t
4118 -#define SHA3_256_TEST_VECTORS 3
4119 -static struct hash_testvec sha3_256_tv_template[] = {
4120 +static const struct hash_testvec sha3_256_tv_template[] = {
4123 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4124 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_t
4128 -#define SHA3_384_TEST_VECTORS 3
4129 -static struct hash_testvec sha3_384_tv_template[] = {
4130 +static const struct hash_testvec sha3_384_tv_template[] = {
4133 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4134 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_t
4138 -#define SHA3_512_TEST_VECTORS 3
4139 -static struct hash_testvec sha3_512_tv_template[] = {
4140 +static const struct hash_testvec sha3_512_tv_template[] = {
4143 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4144 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_t
4146 * MD5 test vectors from RFC1321
4148 -#define MD5_TEST_VECTORS 7
4150 -static struct hash_testvec md5_tv_template[] = {
4151 +static const struct hash_testvec md5_tv_template[] = {
4153 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4154 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4155 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_templa
4157 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4159 -#define RMD128_TEST_VECTORS 10
4161 -static struct hash_testvec rmd128_tv_template[] = {
4162 +static const struct hash_testvec rmd128_tv_template[] = {
4164 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4165 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4166 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_tem
4168 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4170 -#define RMD160_TEST_VECTORS 10
4172 -static struct hash_testvec rmd160_tv_template[] = {
4173 +static const struct hash_testvec rmd160_tv_template[] = {
4175 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4176 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4177 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_tem
4179 * RIPEMD-256 test vectors
4181 -#define RMD256_TEST_VECTORS 8
4183 -static struct hash_testvec rmd256_tv_template[] = {
4184 +static const struct hash_testvec rmd256_tv_template[] = {
4186 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4187 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4188 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_tem
4190 * RIPEMD-320 test vectors
4192 -#define RMD320_TEST_VECTORS 8
4194 -static struct hash_testvec rmd320_tv_template[] = {
4195 +static const struct hash_testvec rmd320_tv_template[] = {
4197 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4198 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4199 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_tem
4203 -#define CRCT10DIF_TEST_VECTORS 3
4204 -static struct hash_testvec crct10dif_tv_template[] = {
4205 +static const struct hash_testvec crct10dif_tv_template[] = {
4207 - .plaintext = "abc",
4209 -#ifdef __LITTLE_ENDIAN
4210 - .digest = "\x3b\x44",
4212 - .digest = "\x44\x3b",
4215 - .plaintext = "1234567890123456789012345678901234567890"
4216 - "123456789012345678901234567890123456789",
4218 -#ifdef __LITTLE_ENDIAN
4219 - .digest = "\x70\x4b",
4221 - .digest = "\x4b\x70",
4225 - "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4227 -#ifdef __LITTLE_ENDIAN
4228 - .digest = "\xe3\x9c",
4230 - .digest = "\x9c\xe3",
4234 + .plaintext = "abc",
4236 + .digest = (u8 *)(u16 []){ 0x443b },
4238 + .plaintext = "1234567890123456789012345678901234567890"
4239 + "123456789012345678901234567890123456789",
4241 + .digest = (u8 *)(u16 []){ 0x4b70 },
4243 + .tap = { 63, 16 },
4245 + .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
4248 + .digest = (u8 *)(u16 []){ 0x9ce3 },
4250 + .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
4252 + .plaintext = "1234567890123456789012345678901234567890"
4253 + "1234567890123456789012345678901234567890"
4254 + "1234567890123456789012345678901234567890"
4255 + "1234567890123456789012345678901234567890"
4256 + "1234567890123456789012345678901234567890"
4257 + "1234567890123456789012345678901234567890"
4258 + "1234567890123456789012345678901234567890"
4259 + "123456789012345678901234567890123456789",
4261 + .digest = (u8 *)(u16 []){ 0x44c6 },
4263 + .plaintext = "1234567890123456789012345678901234567890"
4264 + "1234567890123456789012345678901234567890"
4265 + "1234567890123456789012345678901234567890"
4266 + "1234567890123456789012345678901234567890"
4267 + "1234567890123456789012345678901234567890"
4268 + "1234567890123456789012345678901234567890"
4269 + "1234567890123456789012345678901234567890"
4270 + "123456789012345678901234567890123456789",
4272 + .digest = (u8 *)(u16 []){ 0x44c6 },
4274 + .tap = { 1, 255, 57, 6 },
4278 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_
4279 * SHA1 test vectors from from FIPS PUB 180-1
4280 * Long vector from CAVS 5.0
4282 -#define SHA1_TEST_VECTORS 6
4284 -static struct hash_testvec sha1_tv_template[] = {
4285 +static const struct hash_testvec sha1_tv_template[] = {
4289 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_templ
4291 * SHA224 test vectors from from FIPS PUB 180-2
4293 -#define SHA224_TEST_VECTORS 5
4295 -static struct hash_testvec sha224_tv_template[] = {
4296 +static const struct hash_testvec sha224_tv_template[] = {
4300 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_tem
4302 * SHA256 test vectors from from NIST
4304 -#define SHA256_TEST_VECTORS 5
4306 -static struct hash_testvec sha256_tv_template[] = {
4307 +static const struct hash_testvec sha256_tv_template[] = {
4311 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_tem
4313 * SHA384 test vectors from from NIST and kerneli
4315 -#define SHA384_TEST_VECTORS 6
4317 -static struct hash_testvec sha384_tv_template[] = {
4318 +static const struct hash_testvec sha384_tv_template[] = {
4322 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_tem
4324 * SHA512 test vectors from from NIST and kerneli
4326 -#define SHA512_TEST_VECTORS 6
4328 -static struct hash_testvec sha512_tv_template[] = {
4329 +static const struct hash_testvec sha512_tv_template[] = {
4333 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_tem
4334 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4337 -#define WP512_TEST_VECTORS 8
4339 -static struct hash_testvec wp512_tv_template[] = {
4340 +static const struct hash_testvec wp512_tv_template[] = {
4344 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_temp
4348 -#define WP384_TEST_VECTORS 8
4350 -static struct hash_testvec wp384_tv_template[] = {
4351 +static const struct hash_testvec wp384_tv_template[] = {
4355 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_temp
4359 -#define WP256_TEST_VECTORS 8
4361 -static struct hash_testvec wp256_tv_template[] = {
4362 +static const struct hash_testvec wp256_tv_template[] = {
4366 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_temp
4368 * TIGER test vectors from Tiger website
4370 -#define TGR192_TEST_VECTORS 6
4372 -static struct hash_testvec tgr192_tv_template[] = {
4373 +static const struct hash_testvec tgr192_tv_template[] = {
4377 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_tem
4381 -#define TGR160_TEST_VECTORS 6
4383 -static struct hash_testvec tgr160_tv_template[] = {
4384 +static const struct hash_testvec tgr160_tv_template[] = {
4388 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_tem
4392 -#define TGR128_TEST_VECTORS 6
4394 -static struct hash_testvec tgr128_tv_template[] = {
4395 +static const struct hash_testvec tgr128_tv_template[] = {
4399 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_tem
4403 -#define GHASH_TEST_VECTORS 6
4405 -static struct hash_testvec ghash_tv_template[] =
4406 +static const struct hash_testvec ghash_tv_template[] =
4409 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4410 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_temp
4411 * HMAC-MD5 test vectors from RFC2202
4412 * (These need to be fixed to not use strlen).
4414 -#define HMAC_MD5_TEST_VECTORS 7
4416 -static struct hash_testvec hmac_md5_tv_template[] =
4417 +static const struct hash_testvec hmac_md5_tv_template[] =
4420 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4421 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_t
4423 * HMAC-RIPEMD128 test vectors from RFC2286
4425 -#define HMAC_RMD128_TEST_VECTORS 7
4427 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4428 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4430 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4432 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_t
4434 * HMAC-RIPEMD160 test vectors from RFC2286
4436 -#define HMAC_RMD160_TEST_VECTORS 7
4438 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4439 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4441 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4443 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_t
4445 * HMAC-SHA1 test vectors from RFC2202
4447 -#define HMAC_SHA1_TEST_VECTORS 7
4449 -static struct hash_testvec hmac_sha1_tv_template[] = {
4450 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4452 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4454 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_
4456 * SHA224 HMAC test vectors from RFC4231
4458 -#define HMAC_SHA224_TEST_VECTORS 4
4460 -static struct hash_testvec hmac_sha224_tv_template[] = {
4461 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4463 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4464 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4465 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_t
4466 * HMAC-SHA256 test vectors from
4467 * draft-ietf-ipsec-ciph-sha-256-01.txt
4469 -#define HMAC_SHA256_TEST_VECTORS 10
4471 -static struct hash_testvec hmac_sha256_tv_template[] = {
4472 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4474 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
4475 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4476 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_t
4480 -#define CMAC_AES_TEST_VECTORS 6
4482 -static struct hash_testvec aes_cmac128_tv_template[] = {
4483 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4484 { /* From NIST Special Publication 800-38B, AES-128 */
4485 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4486 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4487 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_t
4491 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4492 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4494 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4495 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4496 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4497 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4498 + .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4499 + "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4503 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4504 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4505 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4506 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4507 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4508 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4510 + .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4511 + "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4517 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4518 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4519 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4520 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4521 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4522 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4523 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4524 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4525 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4526 + "\xad\x2b\x41\x7b\xe6\x6c\x37",
4527 + .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4528 + "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4532 + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4533 + "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4534 + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4535 + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4536 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4537 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4538 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4539 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4540 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4541 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4542 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4543 + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4545 + .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4546 + "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4552 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4553 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4555 * From NIST Special Publication 800-38B, Three Key TDEA
4556 * Corrected test vectors from:
4557 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac
4561 -#define XCBC_AES_TEST_VECTORS 6
4563 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4564 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4566 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4567 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4568 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_t
4572 -#define VMAC_AES_TEST_VECTORS 11
4573 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4574 - '\x02', '\x03', '\x02', '\x02',
4575 - '\x02', '\x04', '\x01', '\x07',
4576 - '\x04', '\x01', '\x04', '\x03',};
4577 -static char vmac_string2[128] = {'a', 'b', 'c',};
4578 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4579 - 'a', 'b', 'c', 'a', 'b', 'c',
4580 - 'a', 'b', 'c', 'a', 'b', 'c',
4581 - 'a', 'b', 'c', 'a', 'b', 'c',
4582 - 'a', 'b', 'c', 'a', 'b', 'c',
4583 - 'a', 'b', 'c', 'a', 'b', 'c',
4584 - 'a', 'b', 'c', 'a', 'b', 'c',
4585 - 'a', 'b', 'c', 'a', 'b', 'c',
4588 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4589 - 'i', 'j', 'l', 'm',
4590 - 'o', 'p', 'r', 's',
4591 - 't', 'u', 'w', 'x', 'z'};
4593 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4594 - 'o', 'l', 'k', ']', '%',
4595 - '9', '2', '7', '!', 'A'};
4597 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4598 - 'i', '!', '#', 'w', '0',
4599 - 'z', '/', '4', 'A', 'n'};
4600 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4601 + '\x02', '\x03', '\x02', '\x02',
4602 + '\x02', '\x04', '\x01', '\x07',
4603 + '\x04', '\x01', '\x04', '\x03',};
4604 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4605 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4606 + 'a', 'b', 'c', 'a', 'b', 'c',
4607 + 'a', 'b', 'c', 'a', 'b', 'c',
4608 + 'a', 'b', 'c', 'a', 'b', 'c',
4609 + 'a', 'b', 'c', 'a', 'b', 'c',
4610 + 'a', 'b', 'c', 'a', 'b', 'c',
4611 + 'a', 'b', 'c', 'a', 'b', 'c',
4612 + 'a', 'b', 'c', 'a', 'b', 'c',
4615 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4616 + 'i', 'j', 'l', 'm',
4617 + 'o', 'p', 'r', 's',
4618 + 't', 'u', 'w', 'x', 'z'};
4620 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4621 + 'o', 'l', 'k', ']', '%',
4622 + '9', '2', '7', '!', 'A'};
4624 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4625 + 'i', '!', '#', 'w', '0',
4626 + 'z', '/', '4', 'A', 'n'};
4628 -static struct hash_testvec aes_vmac128_tv_template[] = {
4629 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4631 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4632 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4633 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_t
4634 * SHA384 HMAC test vectors from RFC4231
4637 -#define HMAC_SHA384_TEST_VECTORS 4
4639 -static struct hash_testvec hmac_sha384_tv_template[] = {
4640 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4642 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4643 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4644 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_t
4645 * SHA512 HMAC test vectors from RFC4231
4648 -#define HMAC_SHA512_TEST_VECTORS 4
4650 -static struct hash_testvec hmac_sha512_tv_template[] = {
4651 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4653 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4654 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4655 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_t
4659 -#define HMAC_SHA3_224_TEST_VECTORS 4
4661 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4662 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4664 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4665 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4666 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224
4670 -#define HMAC_SHA3_256_TEST_VECTORS 4
4672 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4673 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4675 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4676 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4677 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256
4681 -#define HMAC_SHA3_384_TEST_VECTORS 4
4683 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4684 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4686 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4687 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4688 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384
4692 -#define HMAC_SHA3_512_TEST_VECTORS 4
4694 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4695 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4697 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4698 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4699 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512
4700 * Poly1305 test vectors from RFC7539 A.3.
4703 -#define POLY1305_TEST_VECTORS 11
4705 -static struct hash_testvec poly1305_tv_template[] = {
4706 +static const struct hash_testvec poly1305_tv_template[] = {
4707 { /* Test Vector #1 */
4708 .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
4709 "\x00\x00\x00\x00\x00\x00\x00\x00"
4710 @@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_t
4714 -#define DES_ENC_TEST_VECTORS 11
4715 -#define DES_DEC_TEST_VECTORS 5
4716 -#define DES_CBC_ENC_TEST_VECTORS 6
4717 -#define DES_CBC_DEC_TEST_VECTORS 5
4718 -#define DES_CTR_ENC_TEST_VECTORS 2
4719 -#define DES_CTR_DEC_TEST_VECTORS 2
4720 -#define DES3_EDE_ENC_TEST_VECTORS 4
4721 -#define DES3_EDE_DEC_TEST_VECTORS 4
4722 -#define DES3_EDE_CBC_ENC_TEST_VECTORS 2
4723 -#define DES3_EDE_CBC_DEC_TEST_VECTORS 2
4724 -#define DES3_EDE_CTR_ENC_TEST_VECTORS 2
4725 -#define DES3_EDE_CTR_DEC_TEST_VECTORS 2
4727 -static struct cipher_testvec des_enc_tv_template[] = {
4728 +static const struct cipher_testvec des_enc_tv_template[] = {
4729 { /* From Applied Cryptography */
4730 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4732 @@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_
4736 -static struct cipher_testvec des_dec_tv_template[] = {
4737 +static const struct cipher_testvec des_dec_tv_template[] = {
4738 { /* From Applied Cryptography */
4739 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4741 @@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_
4745 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4746 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4747 { /* From OpenSSL */
4748 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4750 @@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc
4754 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4755 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4757 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4759 @@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec
4763 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4764 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4765 { /* Generated with Crypto++ */
4766 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4768 @@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc
4772 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4773 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4774 { /* Generated with Crypto++ */
4775 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4777 @@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec
4781 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4782 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4783 { /* These are from openssl */
4784 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4785 "\x55\x55\x55\x55\x55\x55\x55\x55"
4786 @@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_en
4790 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4791 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4792 { /* These are from openssl */
4793 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4794 "\x55\x55\x55\x55\x55\x55\x55\x55"
4795 @@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_de
4799 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4800 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4801 { /* Generated from openssl */
4802 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4803 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4804 @@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cb
4808 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4809 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4810 { /* Generated from openssl */
4811 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4812 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4813 @@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cb
4817 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4818 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4819 { /* Generated with Crypto++ */
4820 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4821 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4822 @@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ct
4826 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4827 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4828 { /* Generated with Crypto++ */
4829 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4830 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4831 @@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ct
4833 * Blowfish test vectors.
4835 -#define BF_ENC_TEST_VECTORS 7
4836 -#define BF_DEC_TEST_VECTORS 7
4837 -#define BF_CBC_ENC_TEST_VECTORS 2
4838 -#define BF_CBC_DEC_TEST_VECTORS 2
4839 -#define BF_CTR_ENC_TEST_VECTORS 2
4840 -#define BF_CTR_DEC_TEST_VECTORS 2
4842 -static struct cipher_testvec bf_enc_tv_template[] = {
4843 +static const struct cipher_testvec bf_enc_tv_template[] = {
4844 { /* DES test vectors from OpenSSL */
4845 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4847 @@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_t
4851 -static struct cipher_testvec bf_dec_tv_template[] = {
4852 +static const struct cipher_testvec bf_dec_tv_template[] = {
4853 { /* DES test vectors from OpenSSL */
4854 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4856 @@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_t
4860 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4861 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4862 { /* From OpenSSL */
4863 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4864 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4865 @@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_
4869 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4870 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4871 { /* From OpenSSL */
4872 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4873 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4874 @@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_
4878 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4879 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4880 { /* Generated with Crypto++ */
4881 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4882 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4883 @@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_
4887 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4888 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4889 { /* Generated with Crypto++ */
4890 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4891 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4892 @@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_
4894 * Twofish test vectors.
4896 -#define TF_ENC_TEST_VECTORS 4
4897 -#define TF_DEC_TEST_VECTORS 4
4898 -#define TF_CBC_ENC_TEST_VECTORS 5
4899 -#define TF_CBC_DEC_TEST_VECTORS 5
4900 -#define TF_CTR_ENC_TEST_VECTORS 2
4901 -#define TF_CTR_DEC_TEST_VECTORS 2
4902 -#define TF_LRW_ENC_TEST_VECTORS 8
4903 -#define TF_LRW_DEC_TEST_VECTORS 8
4904 -#define TF_XTS_ENC_TEST_VECTORS 5
4905 -#define TF_XTS_DEC_TEST_VECTORS 5
4907 -static struct cipher_testvec tf_enc_tv_template[] = {
4908 +static const struct cipher_testvec tf_enc_tv_template[] = {
4910 .key = zeroed_string,
4912 @@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_t
4916 -static struct cipher_testvec tf_dec_tv_template[] = {
4917 +static const struct cipher_testvec tf_dec_tv_template[] = {
4919 .key = zeroed_string,
4921 @@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_t
4925 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4926 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4927 { /* Generated with Nettle */
4928 .key = zeroed_string,
4930 @@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_
4934 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4935 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4936 { /* Reverse of the first four above */
4937 .key = zeroed_string,
4939 @@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_
4943 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4944 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4945 { /* Generated with Crypto++ */
4946 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4947 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4948 @@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_
4952 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4953 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4954 { /* Generated with Crypto++ */
4955 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4956 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4957 @@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_
4961 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4962 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4963 /* Generated from AES-LRW test vectors */
4965 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4966 @@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_
4970 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4971 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
4972 /* Generated from AES-LRW test vectors */
4973 /* same as enc vectors with input and result reversed */
4975 @@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_
4979 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
4980 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
4981 /* Generated from AES-XTS test vectors */
4983 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
4984 @@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_
4988 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
4989 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
4990 /* Generated from AES-XTS test vectors */
4991 /* same as enc vectors with input and result reversed */
4993 @@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_
4994 * Serpent test vectors. These are backwards because Serpent writes
4995 * octet sequences in right-to-left mode.
4997 -#define SERPENT_ENC_TEST_VECTORS 5
4998 -#define SERPENT_DEC_TEST_VECTORS 5
5000 -#define TNEPRES_ENC_TEST_VECTORS 4
5001 -#define TNEPRES_DEC_TEST_VECTORS 4
5003 -#define SERPENT_CBC_ENC_TEST_VECTORS 1
5004 -#define SERPENT_CBC_DEC_TEST_VECTORS 1
5006 -#define SERPENT_CTR_ENC_TEST_VECTORS 2
5007 -#define SERPENT_CTR_DEC_TEST_VECTORS 2
5009 -#define SERPENT_LRW_ENC_TEST_VECTORS 8
5010 -#define SERPENT_LRW_DEC_TEST_VECTORS 8
5012 -#define SERPENT_XTS_ENC_TEST_VECTORS 5
5013 -#define SERPENT_XTS_DEC_TEST_VECTORS 5
5015 -static struct cipher_testvec serpent_enc_tv_template[] = {
5016 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5018 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
5019 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5020 @@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc
5024 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5025 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5026 { /* KeySize=128, PT=0, I=1 */
5027 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5028 "\x00\x00\x00\x00\x00\x00\x00\x00",
5029 @@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc
5033 -static struct cipher_testvec serpent_dec_tv_template[] = {
5034 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5036 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5037 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5038 @@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec
5042 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5043 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5045 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5046 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5047 @@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec
5051 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5052 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5053 { /* Generated with Crypto++ */
5054 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5055 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5056 @@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc
5060 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5061 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5062 { /* Generated with Crypto++ */
5063 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5064 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5065 @@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc
5069 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5070 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5071 { /* Generated with Crypto++ */
5072 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5073 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5074 @@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr
5078 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5079 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5080 { /* Generated with Crypto++ */
5081 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5082 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5083 @@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr
5087 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5088 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5089 /* Generated from AES-LRW test vectors */
5091 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5092 @@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw
5096 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5097 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5098 /* Generated from AES-LRW test vectors */
5099 /* same as enc vectors with input and result reversed */
5101 @@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw
5105 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5106 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5107 /* Generated from AES-XTS test vectors */
5109 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5110 @@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts
5114 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5115 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5116 /* Generated from AES-XTS test vectors */
5117 /* same as enc vectors with input and result reversed */
5119 @@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts
5122 /* Cast6 test vectors from RFC 2612 */
5123 -#define CAST6_ENC_TEST_VECTORS 4
5124 -#define CAST6_DEC_TEST_VECTORS 4
5125 -#define CAST6_CBC_ENC_TEST_VECTORS 1
5126 -#define CAST6_CBC_DEC_TEST_VECTORS 1
5127 -#define CAST6_CTR_ENC_TEST_VECTORS 2
5128 -#define CAST6_CTR_DEC_TEST_VECTORS 2
5129 -#define CAST6_LRW_ENC_TEST_VECTORS 1
5130 -#define CAST6_LRW_DEC_TEST_VECTORS 1
5131 -#define CAST6_XTS_ENC_TEST_VECTORS 1
5132 -#define CAST6_XTS_DEC_TEST_VECTORS 1
5134 -static struct cipher_testvec cast6_enc_tv_template[] = {
5135 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5137 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5138 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5139 @@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_t
5143 -static struct cipher_testvec cast6_dec_tv_template[] = {
5144 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5146 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5147 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5148 @@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_t
5152 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5153 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5154 { /* Generated from TF test vectors */
5155 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5156 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5157 @@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_e
5161 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5162 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5163 { /* Generated from TF test vectors */
5164 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5165 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5166 @@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_d
5170 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5171 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5172 { /* Generated from TF test vectors */
5173 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5174 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5175 @@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_e
5179 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5180 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5181 { /* Generated from TF test vectors */
5182 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5183 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5184 @@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_d
5188 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5189 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5190 { /* Generated from TF test vectors */
5191 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5192 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5193 @@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_e
5197 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5198 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5199 { /* Generated from TF test vectors */
5200 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5201 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5202 @@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_d
5206 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5207 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5208 { /* Generated from TF test vectors */
5209 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5210 "\x23\x53\x60\x28\x74\x71\x35\x26"
5211 @@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_e
5215 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5216 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5217 { /* Generated from TF test vectors */
5218 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5219 "\x23\x53\x60\x28\x74\x71\x35\x26"
5220 @@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_d
5224 -#define AES_ENC_TEST_VECTORS 4
5225 -#define AES_DEC_TEST_VECTORS 4
5226 -#define AES_CBC_ENC_TEST_VECTORS 5
5227 -#define AES_CBC_DEC_TEST_VECTORS 5
5228 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5229 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5230 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5231 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5232 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5233 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5234 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5235 -#define AES_LRW_ENC_TEST_VECTORS 8
5236 -#define AES_LRW_DEC_TEST_VECTORS 8
5237 -#define AES_XTS_ENC_TEST_VECTORS 5
5238 -#define AES_XTS_DEC_TEST_VECTORS 5
5239 -#define AES_CTR_ENC_TEST_VECTORS 5
5240 -#define AES_CTR_DEC_TEST_VECTORS 5
5241 -#define AES_OFB_ENC_TEST_VECTORS 1
5242 -#define AES_OFB_DEC_TEST_VECTORS 1
5243 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5244 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5245 -#define AES_GCM_ENC_TEST_VECTORS 9
5246 -#define AES_GCM_DEC_TEST_VECTORS 8
5247 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5248 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5249 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5250 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5251 -#define AES_CCM_ENC_TEST_VECTORS 8
5252 -#define AES_CCM_DEC_TEST_VECTORS 7
5253 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5254 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5256 -static struct cipher_testvec aes_enc_tv_template[] = {
5257 +static const struct cipher_testvec aes_enc_tv_template[] = {
5258 { /* From FIPS-197 */
5259 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5260 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5261 @@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_
5265 -static struct cipher_testvec aes_dec_tv_template[] = {
5266 +static const struct cipher_testvec aes_dec_tv_template[] = {
5267 { /* From FIPS-197 */
5268 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5269 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5270 @@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_
5274 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5275 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5276 { /* From RFC 3602 */
5277 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5278 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5279 @@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc
5283 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5284 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5285 { /* From RFC 3602 */
5286 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5287 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5288 @@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec
5292 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5293 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5294 { /* Input data from RFC 2410 Case 1 */
5295 #ifdef __LITTLE_ENDIAN
5296 .key = "\x08\x00" /* rta length */
5297 @@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_
5301 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5302 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5304 #ifdef __LITTLE_ENDIAN
5305 .key = "\x08\x00" /* rta length */
5306 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_
5310 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5311 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5312 { /* RFC 3602 Case 1 */
5313 #ifdef __LITTLE_ENDIAN
5314 .key = "\x08\x00" /* rta length */
5315 @@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes
5319 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5320 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5321 { /* Input data from RFC 2410 Case 1 */
5322 #ifdef __LITTLE_ENDIAN
5323 .key = "\x08\x00" /* rta length */
5324 @@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb
5328 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5329 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5331 #ifdef __LITTLE_ENDIAN
5332 .key = "\x08\x00" /* rta length */
5333 @@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb
5337 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5338 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5339 { /* RFC 3602 Case 1 */
5340 #ifdef __LITTLE_ENDIAN
5341 .key = "\x08\x00" /* rta length */
5342 @@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_a
5346 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5347 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5348 { /* RFC 3602 Case 1 */
5349 #ifdef __LITTLE_ENDIAN
5350 .key = "\x08\x00" /* rta length */
5351 @@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_a
5355 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5357 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5358 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5359 { /*Generated with cryptopp*/
5360 #ifdef __LITTLE_ENDIAN
5361 .key = "\x08\x00" /* rta length */
5362 @@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des
5366 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1
5368 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5369 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5370 { /*Generated with cryptopp*/
5371 #ifdef __LITTLE_ENDIAN
5372 .key = "\x08\x00" /* rta length */
5373 @@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_d
5377 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1
5379 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5380 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5381 { /*Generated with cryptopp*/
5382 #ifdef __LITTLE_ENDIAN
5383 .key = "\x08\x00" /* rta length */
5384 @@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_d
5388 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1
5390 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5391 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5392 { /*Generated with cryptopp*/
5393 #ifdef __LITTLE_ENDIAN
5394 .key = "\x08\x00" /* rta length */
5395 @@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_d
5399 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1
5401 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5402 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5403 { /*Generated with cryptopp*/
5404 #ifdef __LITTLE_ENDIAN
5405 .key = "\x08\x00" /* rta length */
5406 @@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_d
5410 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1
5412 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5413 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5414 { /*Generated with cryptopp*/
5415 #ifdef __LITTLE_ENDIAN
5416 .key = "\x08\x00" /* rta length */
5417 @@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des
5421 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1
5423 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5424 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5425 { /*Generated with cryptopp*/
5426 #ifdef __LITTLE_ENDIAN
5427 .key = "\x08\x00" /* rta length */
5428 @@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_d
5432 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1
5434 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5435 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5436 { /*Generated with cryptopp*/
5437 #ifdef __LITTLE_ENDIAN
5438 .key = "\x08\x00" /* rta length */
5439 @@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_d
5443 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1
5445 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5446 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5447 { /*Generated with cryptopp*/
5448 #ifdef __LITTLE_ENDIAN
5449 .key = "\x08\x00" /* rta length */
5450 @@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_d
5454 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1
5456 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5457 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5458 { /*Generated with cryptopp*/
5459 #ifdef __LITTLE_ENDIAN
5460 .key = "\x08\x00" /* rta length */
5461 @@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_d
5465 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5466 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5467 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5468 { /* LRW-32-AES 1 */
5469 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5470 @@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc
5474 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5475 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5476 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5477 /* same as enc vectors with input and result reversed */
5478 { /* LRW-32-AES 1 */
5479 @@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec
5483 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5484 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5485 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5487 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5488 @@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc
5492 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5493 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5494 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5496 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5497 @@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec
5501 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5502 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5503 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5504 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5505 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5506 @@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc
5510 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5511 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5512 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5513 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5514 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5515 @@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec
5519 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5520 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5521 { /* From RFC 3686 */
5522 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5523 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5524 @@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc
5528 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5529 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5530 { /* From RFC 3686 */
5531 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5532 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5533 @@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc
5537 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5538 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5539 /* From NIST Special Publication 800-38A, Appendix F.5 */
5541 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5542 @@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc
5546 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5547 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5548 /* From NIST Special Publication 800-38A, Appendix F.5 */
5550 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5551 @@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec
5555 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5556 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5557 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5558 .key = zeroed_string,
5560 @@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_t
5564 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5565 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5566 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5567 .key = zeroed_string,
5569 @@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_t
5573 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5574 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5575 { /* Generated using Crypto++ */
5576 .key = zeroed_string,
5578 @@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc41
5582 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5583 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5584 { /* Generated using Crypto++ */
5585 .key = zeroed_string,
5587 @@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc41
5591 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5592 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5593 { /* From draft-mcgrew-gcm-test-01 */
5594 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5595 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5596 @@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc45
5600 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5601 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5602 { /* From draft-mcgrew-gcm-test-01 */
5603 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5604 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5605 @@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc45
5609 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5610 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5611 { /* From RFC 3610 */
5612 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5613 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5614 @@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_t
5618 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5619 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5620 { /* From RFC 3610 */
5621 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5622 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5623 @@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_t
5624 * These vectors are copied/generated from the ones for rfc4106 with
5625 * the key truncated by one byte..
5627 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5628 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5629 { /* Generated using Crypto++ */
5630 .key = zeroed_string,
5632 @@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc43
5636 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5637 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5638 { /* Generated using Crypto++ */
5639 .key = zeroed_string,
5641 @@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc43
5643 * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5645 -#define RFC7539_ENC_TEST_VECTORS 2
5646 -#define RFC7539_DEC_TEST_VECTORS 2
5647 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5648 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5650 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5651 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5652 @@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_t
5656 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5657 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5659 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5660 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5661 @@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_t
5663 * draft-irtf-cfrg-chacha20-poly1305
5665 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5666 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5667 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5668 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5670 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5671 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5672 @@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_en
5676 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5677 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5679 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5680 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5681 @@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_de
5682 * semiblock of the ciphertext from the test vector. For decryption, iv is
5683 * the first semiblock of the ciphertext.
5685 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5686 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5688 .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5689 "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5690 @@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_
5694 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5695 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5697 .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5698 "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5699 @@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_
5700 * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5701 * Only AES-128 is supported at this time.
5703 -#define ANSI_CPRNG_AES_TEST_VECTORS 6
5705 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5706 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5708 .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5709 "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5710 @@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_a
5711 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5712 * w/o personalization string, w/ and w/o additional input string).
5714 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5715 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5717 .entropy = (unsigned char *)
5718 "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5719 @@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha25
5723 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5724 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5726 .entropy = (unsigned char *)
5727 "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5728 @@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_
5732 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5733 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5735 .entropy = (unsigned char *)
5736 "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5737 @@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_a
5738 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5739 * w/o personalization string, w/ and w/o additional input string).
5741 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5742 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5744 .entropy = (unsigned char *)
5745 "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5746 @@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha
5750 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5751 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5753 .entropy = (unsigned char *)
5754 "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5755 @@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hma
5759 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5760 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5762 .entropy = (unsigned char *)
5763 "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5764 @@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr
5768 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5769 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5771 .entropy = (unsigned char *)
5772 "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5773 @@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr
5777 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5778 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5780 .entropy = (unsigned char *)
5781 "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5782 @@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr
5785 /* Cast5 test vectors from RFC 2144 */
5786 -#define CAST5_ENC_TEST_VECTORS 4
5787 -#define CAST5_DEC_TEST_VECTORS 4
5788 -#define CAST5_CBC_ENC_TEST_VECTORS 1
5789 -#define CAST5_CBC_DEC_TEST_VECTORS 1
5790 -#define CAST5_CTR_ENC_TEST_VECTORS 2
5791 -#define CAST5_CTR_DEC_TEST_VECTORS 2
5793 -static struct cipher_testvec cast5_enc_tv_template[] = {
5794 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5796 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5797 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5798 @@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_t
5802 -static struct cipher_testvec cast5_dec_tv_template[] = {
5803 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5805 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5806 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5807 @@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_t
5811 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5812 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5813 { /* Generated from TF test vectors */
5814 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5815 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5816 @@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_e
5820 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5821 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5822 { /* Generated from TF test vectors */
5823 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5824 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5825 @@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_d
5829 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5830 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5831 { /* Generated from TF test vectors */
5832 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5833 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5834 @@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_e
5838 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5839 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5840 { /* Generated from TF test vectors */
5841 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5842 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5843 @@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_d
5845 * ARC4 test vectors from OpenSSL
5847 -#define ARC4_ENC_TEST_VECTORS 7
5848 -#define ARC4_DEC_TEST_VECTORS 7
5850 -static struct cipher_testvec arc4_enc_tv_template[] = {
5851 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5853 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5855 @@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv
5859 -static struct cipher_testvec arc4_dec_tv_template[] = {
5860 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5862 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5864 @@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv
5868 -#define TEA_ENC_TEST_VECTORS 4
5869 -#define TEA_DEC_TEST_VECTORS 4
5871 -static struct cipher_testvec tea_enc_tv_template[] = {
5872 +static const struct cipher_testvec tea_enc_tv_template[] = {
5874 .key = zeroed_string,
5876 @@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_
5880 -static struct cipher_testvec tea_dec_tv_template[] = {
5881 +static const struct cipher_testvec tea_dec_tv_template[] = {
5883 .key = zeroed_string,
5885 @@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_
5889 -#define XTEA_ENC_TEST_VECTORS 4
5890 -#define XTEA_DEC_TEST_VECTORS 4
5892 -static struct cipher_testvec xtea_enc_tv_template[] = {
5893 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5895 .key = zeroed_string,
5897 @@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv
5901 -static struct cipher_testvec xtea_dec_tv_template[] = {
5902 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5904 .key = zeroed_string,
5906 @@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv
5908 * KHAZAD test vectors.
5910 -#define KHAZAD_ENC_TEST_VECTORS 5
5911 -#define KHAZAD_DEC_TEST_VECTORS 5
5913 -static struct cipher_testvec khazad_enc_tv_template[] = {
5914 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5916 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5917 "\x00\x00\x00\x00\x00\x00\x00\x00",
5918 @@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_
5922 -static struct cipher_testvec khazad_dec_tv_template[] = {
5923 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5925 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5926 "\x00\x00\x00\x00\x00\x00\x00\x00",
5927 @@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_
5928 * Anubis test vectors.
5931 -#define ANUBIS_ENC_TEST_VECTORS 5
5932 -#define ANUBIS_DEC_TEST_VECTORS 5
5933 -#define ANUBIS_CBC_ENC_TEST_VECTORS 2
5934 -#define ANUBIS_CBC_DEC_TEST_VECTORS 2
5936 -static struct cipher_testvec anubis_enc_tv_template[] = {
5937 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5939 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5940 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5941 @@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_
5945 -static struct cipher_testvec anubis_dec_tv_template[] = {
5946 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5948 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5949 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5950 @@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_
5954 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5955 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5957 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5958 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5959 @@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_
5963 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5964 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5966 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5967 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5968 @@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_
5972 -#define XETA_ENC_TEST_VECTORS 4
5973 -#define XETA_DEC_TEST_VECTORS 4
5975 -static struct cipher_testvec xeta_enc_tv_template[] = {
5976 +static const struct cipher_testvec xeta_enc_tv_template[] = {
5978 .key = zeroed_string,
5980 @@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv
5984 -static struct cipher_testvec xeta_dec_tv_template[] = {
5985 +static const struct cipher_testvec xeta_dec_tv_template[] = {
5987 .key = zeroed_string,
5989 @@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv
5991 * FCrypt test vectors
5993 -#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
5994 -#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
5996 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
5997 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
5998 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
5999 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6001 @@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc
6005 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6006 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6007 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6008 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6010 @@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc
6012 * CAMELLIA test vectors.
6014 -#define CAMELLIA_ENC_TEST_VECTORS 4
6015 -#define CAMELLIA_DEC_TEST_VECTORS 4
6016 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6017 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6018 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6019 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6020 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6021 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6022 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6023 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6025 -static struct cipher_testvec camellia_enc_tv_template[] = {
6026 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6028 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6029 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6030 @@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_en
6034 -static struct cipher_testvec camellia_dec_tv_template[] = {
6035 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6037 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6038 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6039 @@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_de
6043 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6044 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6046 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6047 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6048 @@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cb
6052 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6053 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6055 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6056 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6057 @@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cb
6061 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6062 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6063 { /* Generated with Crypto++ */
6064 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6065 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6066 @@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ct
6070 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6071 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6072 { /* Generated with Crypto++ */
6073 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6074 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6075 @@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ct
6079 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6080 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6081 /* Generated from AES-LRW test vectors */
6083 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6084 @@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lr
6088 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6089 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6090 /* Generated from AES-LRW test vectors */
6091 /* same as enc vectors with input and result reversed */
6093 @@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lr
6097 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6098 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6099 /* Generated from AES-XTS test vectors */
6101 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6102 @@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xt
6106 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6107 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6108 /* Generated from AES-XTS test vectors */
6109 /* same as enc vectors with input and result reversed */
6111 @@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xt
6115 -#define SEED_ENC_TEST_VECTORS 4
6116 -#define SEED_DEC_TEST_VECTORS 4
6118 -static struct cipher_testvec seed_enc_tv_template[] = {
6119 +static const struct cipher_testvec seed_enc_tv_template[] = {
6121 .key = zeroed_string,
6123 @@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv
6127 -static struct cipher_testvec seed_dec_tv_template[] = {
6128 +static const struct cipher_testvec seed_dec_tv_template[] = {
6130 .key = zeroed_string,
6132 @@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv
6136 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6137 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6138 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6140 * Testvectors from verified.test-vectors submitted to ECRYPT.
6141 * They are truncated to size 39, 64, 111, 129 to test a variety
6142 @@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_str
6146 -#define CHACHA20_ENC_TEST_VECTORS 4
6147 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6148 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6149 { /* RFC7539 A.2. Test Vector #1 */
6150 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6151 "\x00\x00\x00\x00\x00\x00\x00\x00"
6152 @@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_en
6154 * CTS (Cipher Text Stealing) mode tests
6156 -#define CTS_MODE_ENC_TEST_VECTORS 6
6157 -#define CTS_MODE_DEC_TEST_VECTORS 6
6158 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6159 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6160 { /* from rfc3962 */
6162 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6163 @@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_en
6167 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6168 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6169 { /* from rfc3962 */
6171 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6172 @@ -33308,10 +33351,7 @@ struct comp_testvec {
6173 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6176 -#define DEFLATE_COMP_TEST_VECTORS 2
6177 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6179 -static struct comp_testvec deflate_comp_tv_template[] = {
6180 +static const struct comp_testvec deflate_comp_tv_template[] = {
6184 @@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_
6188 -static struct comp_testvec deflate_decomp_tv_template[] = {
6189 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6193 @@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decom
6195 * LZO test vectors (null-terminated strings).
6197 -#define LZO_COMP_TEST_VECTORS 2
6198 -#define LZO_DECOMP_TEST_VECTORS 2
6200 -static struct comp_testvec lzo_comp_tv_template[] = {
6201 +static const struct comp_testvec lzo_comp_tv_template[] = {
6205 @@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_t
6209 -static struct comp_testvec lzo_decomp_tv_template[] = {
6210 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6214 @@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv
6216 #define MICHAEL_MIC_TEST_VECTORS 6
6218 -static struct hash_testvec michael_mic_tv_template[] = {
6219 +static const struct hash_testvec michael_mic_tv_template[] = {
6221 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6223 @@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_t
6225 * CRC32 test vectors
6227 -#define CRC32_TEST_VECTORS 14
6229 -static struct hash_testvec crc32_tv_template[] = {
6230 +static const struct hash_testvec crc32_tv_template[] = {
6232 .key = "\x87\xa9\xcb\xed",
6234 @@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_temp
6236 * CRC32C test vectors
6238 -#define CRC32C_TEST_VECTORS 15
6240 -static struct hash_testvec crc32c_tv_template[] = {
6241 +static const struct hash_testvec crc32c_tv_template[] = {
6244 .digest = "\x00\x00\x00\x00",
6245 @@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_tem
6247 * Blakcifn CRC test vectors
6249 -#define BFIN_CRC_TEST_VECTORS 6
6251 -static struct hash_testvec bfin_crc_tv_template[] = {
6252 +static const struct hash_testvec bfin_crc_tv_template[] = {
6255 .digest = "\x00\x00\x00\x00",
6256 @@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_t
6260 -#define LZ4_COMP_TEST_VECTORS 1
6261 -#define LZ4_DECOMP_TEST_VECTORS 1
6263 static struct comp_testvec lz4_comp_tv_template[] = {
6266 @@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv
6270 -#define LZ4HC_COMP_TEST_VECTORS 1
6271 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6273 static struct comp_testvec lz4hc_comp_tv_template[] = {
6280 + * Copyright 2013 Freescale Semiconductor, Inc.
6281 + * Copyright 2017 NXP Semiconductor, Inc.
6283 + * This program is free software; you can redistribute it and/or modify it
6284 + * under the terms of the GNU General Public License as published by the Free
6285 + * Software Foundation; either version 2 of the License, or (at your option)
6286 + * any later version.
6290 +#include <crypto/internal/aead.h>
6291 +#include <crypto/internal/hash.h>
6292 +#include <crypto/internal/skcipher.h>
6293 +#include <crypto/authenc.h>
6294 +#include <crypto/null.h>
6295 +#include <crypto/scatterwalk.h>
6296 +#include <linux/err.h>
6297 +#include <linux/init.h>
6298 +#include <linux/module.h>
6299 +#include <linux/rtnetlink.h>
6301 +struct tls_instance_ctx {
6302 + struct crypto_ahash_spawn auth;
6303 + struct crypto_skcipher_spawn enc;
6306 +struct crypto_tls_ctx {
6307 + unsigned int reqoff;
6308 + struct crypto_ahash *auth;
6309 + struct crypto_skcipher *enc;
6310 + struct crypto_skcipher *null;
6313 +struct tls_request_ctx {
6315 + * cryptlen holds the payload length in the case of encryption or
6316 + * payload_len + icv_len + padding_len in case of decryption
6318 + unsigned int cryptlen;
6319 + /* working space for partial results */
6320 + struct scatterlist tmp[2];
6321 + struct scatterlist cipher[2];
6322 + struct scatterlist dst[2];
6327 + struct completion completion;
6331 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6333 + struct async_op *areq = req->data;
6335 + if (err == -EINPROGRESS)
6339 + complete(&areq->completion);
6342 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6343 + unsigned int keylen)
6345 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6346 + struct crypto_ahash *auth = ctx->auth;
6347 + struct crypto_skcipher *enc = ctx->enc;
6348 + struct crypto_authenc_keys keys;
6349 + int err = -EINVAL;
6351 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6354 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6355 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6356 + CRYPTO_TFM_REQ_MASK);
6357 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6358 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6359 + CRYPTO_TFM_RES_MASK);
6364 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6365 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6366 + CRYPTO_TFM_REQ_MASK);
6367 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6368 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6369 + CRYPTO_TFM_RES_MASK);
6375 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6380 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6381 + * @hash: (output) buffer to save the digest into
6382 + * @src: (input) scatterlist with the assoc and payload data
6383 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
6384 + * @req: (input) aead request
6386 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6387 + unsigned int srclen, struct aead_request *req)
6389 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6390 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6391 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6392 + struct async_op ahash_op;
6393 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6394 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6395 + int err = -EBADMSG;
6397 + /* Bail out if the request assoc len is 0 */
6398 + if (!req->assoclen)
6401 + init_completion(&ahash_op.completion);
6403 + /* the hash transform to be executed comes from the original request */
6404 + ahash_request_set_tfm(ahreq, ctx->auth);
6405 + /* prepare the hash request with input data and result pointer */
6406 + ahash_request_set_crypt(ahreq, src, hash, srclen);
6407 + /* set the notifier for when the async hash function returns */
6408 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6409 + tls_async_op_done, &ahash_op);
6411 + /* Calculate the digest on the given data. The result is put in hash */
6412 + err = crypto_ahash_digest(ahreq);
6413 + if (err == -EINPROGRESS) {
6414 + err = wait_for_completion_interruptible(&ahash_op.completion);
6416 + err = ahash_op.err;
6423 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6424 + * @hash: (output) buffer to save the digest and padding into
6425 + * @phashlen: (output) the size of digest + padding
6426 + * @req: (input) aead request
6428 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6429 + struct aead_request *req)
6431 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6432 + unsigned int hash_size = crypto_aead_authsize(tls);
6433 + unsigned int block_size = crypto_aead_blocksize(tls);
6434 + unsigned int srclen = req->cryptlen + hash_size;
6435 + unsigned int icvlen = req->cryptlen + req->assoclen;
6436 + unsigned int padlen;
6439 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
6443 + /* add padding after digest */
6444 + padlen = block_size - (srclen % block_size);
6445 + memset(hash + hash_size, padlen - 1, padlen);
6447 + *phashlen = hash_size + padlen;
6452 +static int crypto_tls_copy_data(struct aead_request *req,
6453 + struct scatterlist *src,
6454 + struct scatterlist *dst,
6457 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6458 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6459 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6461 + skcipher_request_set_tfm(skreq, ctx->null);
6462 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6464 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6466 + return crypto_skcipher_encrypt(skreq);
6469 +static int crypto_tls_encrypt(struct aead_request *req)
6471 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6472 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6473 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6474 + struct skcipher_request *skreq;
6475 + struct scatterlist *cipher = treq_ctx->cipher;
6476 + struct scatterlist *tmp = treq_ctx->tmp;
6477 + struct scatterlist *sg, *src, *dst;
6478 + unsigned int cryptlen, phashlen;
6479 + u8 *hash = treq_ctx->tail;
6483 + * The hash result is saved at the beginning of the tls request ctx
6484 + * and is aligned as required by the hash transform. Enough space was
6485 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
6486 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6487 + * the result is not overwritten by the second (cipher) request.
6489 + hash = (u8 *)ALIGN((unsigned long)hash +
6490 + crypto_ahash_alignmask(ctx->auth),
6491 + crypto_ahash_alignmask(ctx->auth) + 1);
6494 + * STEP 1: create ICV together with necessary padding
6496 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
6501 + * STEP 2: Hash and padding are combined with the payload
6502 + * depending on the form it arrives. Scatter tables must have at least
6503 + * one page of data before chaining with another table and can't have
6504 + * an empty data page. The following code addresses these requirements.
6506 + * If the payload is empty, only the hash is encrypted, otherwise the
6507 + * payload scatterlist is merged with the hash. A special merging case
6508 + * is when the payload has only one page of data. In that case the
6509 + * payload page is moved to another scatterlist and prepared there for
6512 + if (req->cryptlen) {
6513 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6515 + sg_init_table(cipher, 2);
6516 + sg_set_buf(cipher + 1, hash, phashlen);
6518 + if (sg_is_last(src)) {
6519 + sg_set_page(cipher, sg_page(src), req->cryptlen,
6523 + unsigned int rem_len = req->cryptlen;
6525 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6526 + rem_len -= min(rem_len, sg->length);
6528 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6529 + sg_chain(sg, 1, cipher);
6532 + sg_init_one(cipher, hash, phashlen);
6537 + * If src != dst copy the associated data from source to destination.
6538 + * In both cases fast-forward passed the associated data in the dest.
6540 + if (req->src != req->dst) {
6541 + err = crypto_tls_copy_data(req, req->src, req->dst,
6546 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6549 + * STEP 3: encrypt the frame and return the result
6551 + cryptlen = req->cryptlen + phashlen;
6554 + * The hash and the cipher are applied at different times and their
6555 + * requests can use the same memory space without interference
6557 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6558 + skcipher_request_set_tfm(skreq, ctx->enc);
6559 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6560 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6561 + req->base.complete, req->base.data);
6563 + * Apply the cipher transform. The result will be in req->dst when the
6564 + * asynchronuous call terminates
6566 + err = crypto_skcipher_encrypt(skreq);
6571 +static int crypto_tls_decrypt(struct aead_request *req)
6573 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6574 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6575 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6576 + unsigned int cryptlen = req->cryptlen;
6577 + unsigned int hash_size = crypto_aead_authsize(tls);
6578 + unsigned int block_size = crypto_aead_blocksize(tls);
6579 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6580 + struct scatterlist *tmp = treq_ctx->tmp;
6581 + struct scatterlist *src, *dst;
6583 + u8 padding[255]; /* padding can be 0-255 bytes */
6586 + u8 *ihash, *hash = treq_ctx->tail;
6589 + int err = -EINVAL;
6591 + struct async_op ciph_op;
6594 + * Rule out bad packets. The input packet length must be at least one
6595 + * byte more than the hash_size
6597 + if (cryptlen <= hash_size || cryptlen % block_size)
6601 + * Step 1 - Decrypt the source. Fast-forward past the associated data
6602 + * to the encrypted data. The result will be overwritten in place so
6603 + * that the decrypted data will be adjacent to the associated data. The
6604 + * last step (computing the hash) will have it's input data already
6605 + * prepared and ready to be accessed at req->src.
6607 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6610 + init_completion(&ciph_op.completion);
6611 + skcipher_request_set_tfm(skreq, ctx->enc);
6612 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6613 + tls_async_op_done, &ciph_op);
6614 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6615 + err = crypto_skcipher_decrypt(skreq);
6616 + if (err == -EINPROGRESS) {
6617 + err = wait_for_completion_interruptible(&ciph_op.completion);
6619 + err = ciph_op.err;
6625 + * Step 2 - Verify padding
6626 + * Retrieve the last byte of the payload; this is the padding size.
6629 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6631 + /* RFC recommendation for invalid padding size. */
6632 + if (cryptlen < pad_size + hash_size) {
6634 + paderr = -EBADMSG;
6636 + cryptlen -= pad_size;
6637 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6639 + /* Padding content must be equal with pad_size. We verify it all */
6640 + for (i = 0; i < pad_size; i++)
6641 + if (padding[i] != pad_size)
6642 + paderr = -EBADMSG;
6645 + * Step 3 - Verify hash
6646 + * Align the digest result as required by the hash transform. Enough
6647 + * space was allocated in crypto_tls_init_tfm
6649 + hash = (u8 *)ALIGN((unsigned long)hash +
6650 + crypto_ahash_alignmask(ctx->auth),
6651 + crypto_ahash_alignmask(ctx->auth) + 1);
6653 + * Two bytes at the end of the associated data make the length field.
6654 + * It must be updated with the length of the cleartext message before
6655 + * the hash is calculated.
6657 + len_field = sg_virt(req->src) + req->assoclen - 2;
6658 + cryptlen -= hash_size;
6659 + *len_field = htons(cryptlen);
6661 + /* This is the hash from the decrypted packet. Save it for later */
6662 + ihash = hash + hash_size;
6663 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6665 + /* Now compute and compare our ICV with the one from the packet */
6666 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6668 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6670 + if (req->src != req->dst) {
6671 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6677 + /* return the first found error */
6682 + aead_request_complete(req, err);
6686 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6688 + struct aead_instance *inst = aead_alg_instance(tfm);
6689 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6690 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6691 + struct crypto_ahash *auth;
6692 + struct crypto_skcipher *enc;
6693 + struct crypto_skcipher *null;
6696 + auth = crypto_spawn_ahash(&ictx->auth);
6698 + return PTR_ERR(auth);
6700 + enc = crypto_spawn_skcipher(&ictx->enc);
6701 + err = PTR_ERR(enc);
6703 + goto err_free_ahash;
6705 + null = crypto_get_default_null_skcipher2();
6706 + err = PTR_ERR(null);
6708 + goto err_free_skcipher;
6715 + * Allow enough space for two digests. The two digests will be compared
6716 + * during the decryption phase. One will come from the decrypted packet
6717 + * and the other will be calculated. For encryption, one digest is
6718 + * padded (up to a cipher blocksize) and chained with the payload
6720 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6721 + crypto_ahash_alignmask(auth),
6722 + crypto_ahash_alignmask(auth) + 1) +
6723 + max(crypto_ahash_digestsize(auth),
6724 + crypto_skcipher_blocksize(enc));
6726 + crypto_aead_set_reqsize(tfm,
6727 + sizeof(struct tls_request_ctx) +
6729 + max_t(unsigned int,
6730 + crypto_ahash_reqsize(auth) +
6731 + sizeof(struct ahash_request),
6732 + crypto_skcipher_reqsize(enc) +
6733 + sizeof(struct skcipher_request)));
6738 + crypto_free_skcipher(enc);
6740 + crypto_free_ahash(auth);
6744 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6746 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6748 + crypto_free_ahash(ctx->auth);
6749 + crypto_free_skcipher(ctx->enc);
6750 + crypto_put_default_null_skcipher2();
6753 +static void crypto_tls_free(struct aead_instance *inst)
6755 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6757 + crypto_drop_skcipher(&ctx->enc);
6758 + crypto_drop_ahash(&ctx->auth);
6762 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6764 + struct crypto_attr_type *algt;
6765 + struct aead_instance *inst;
6766 + struct hash_alg_common *auth;
6767 + struct crypto_alg *auth_base;
6768 + struct skcipher_alg *enc;
6769 + struct tls_instance_ctx *ctx;
6770 + const char *enc_name;
6773 + algt = crypto_get_attr_type(tb);
6775 + return PTR_ERR(algt);
6777 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6780 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6781 + CRYPTO_ALG_TYPE_AHASH_MASK |
6782 + crypto_requires_sync(algt->type, algt->mask));
6784 + return PTR_ERR(auth);
6786 + auth_base = &auth->base;
6788 + enc_name = crypto_attr_alg_name(tb[2]);
6789 + err = PTR_ERR(enc_name);
6790 + if (IS_ERR(enc_name))
6791 + goto out_put_auth;
6793 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6796 + goto out_put_auth;
6798 + ctx = aead_instance_ctx(inst);
6800 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
6801 + aead_crypto_instance(inst));
6803 + goto err_free_inst;
6805 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6806 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6807 + crypto_requires_sync(algt->type,
6810 + goto err_drop_auth;
6812 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
6814 + err = -ENAMETOOLONG;
6815 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6816 + "tls10(%s,%s)", auth_base->cra_name,
6817 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6818 + goto err_drop_enc;
6820 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6821 + "tls10(%s,%s)", auth_base->cra_driver_name,
6822 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6823 + goto err_drop_enc;
6825 + inst->alg.base.cra_flags = (auth_base->cra_flags |
6826 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6827 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6828 + auth_base->cra_priority;
6829 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6830 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6831 + enc->base.cra_alignmask;
6832 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6834 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6835 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6836 + inst->alg.maxauthsize = auth->digestsize;
6838 + inst->alg.init = crypto_tls_init_tfm;
6839 + inst->alg.exit = crypto_tls_exit_tfm;
6841 + inst->alg.setkey = crypto_tls_setkey;
6842 + inst->alg.encrypt = crypto_tls_encrypt;
6843 + inst->alg.decrypt = crypto_tls_decrypt;
6845 + inst->free = crypto_tls_free;
6847 + err = aead_register_instance(tmpl, inst);
6849 + goto err_drop_enc;
6852 + crypto_mod_put(auth_base);
6856 + crypto_drop_skcipher(&ctx->enc);
6858 + crypto_drop_ahash(&ctx->auth);
6865 +static struct crypto_template crypto_tls_tmpl = {
6867 + .create = crypto_tls_create,
6868 + .module = THIS_MODULE,
6871 +static int __init crypto_tls_module_init(void)
6873 + return crypto_register_template(&crypto_tls_tmpl);
6876 +static void __exit crypto_tls_module_exit(void)
6878 + crypto_unregister_template(&crypto_tls_tmpl);
6881 +module_init(crypto_tls_module_init);
6882 +module_exit(crypto_tls_module_exit);
6884 +MODULE_LICENSE("GPL");
6885 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6886 --- a/drivers/crypto/caam/Kconfig
6887 +++ b/drivers/crypto/caam/Kconfig
6889 +config CRYPTO_DEV_FSL_CAAM_COMMON
6892 config CRYPTO_DEV_FSL_CAAM
6893 - tristate "Freescale CAAM-Multicore driver backend"
6894 + tristate "Freescale CAAM-Multicore platform driver backend"
6895 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6896 + select CRYPTO_DEV_FSL_CAAM_COMMON
6899 Enables the driver module for Freescale's Cryptographic Accelerator
6900 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6901 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6902 To compile this driver as a module, choose M here: the module
6903 will be called caam.
6905 +if CRYPTO_DEV_FSL_CAAM
6907 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6908 + bool "Enable debug output in CAAM driver"
6910 + Selecting this will enable printing of various debug
6911 + information in the CAAM driver.
6913 config CRYPTO_DEV_FSL_CAAM_JR
6914 tristate "Freescale CAAM Job Ring driver backend"
6915 - depends on CRYPTO_DEV_FSL_CAAM
6918 Enables the driver module for Job Rings which are part of
6919 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6920 To compile this driver as a module, choose M here: the module
6921 will be called caam_jr.
6923 +if CRYPTO_DEV_FSL_CAAM_JR
6925 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6927 - depends on CRYPTO_DEV_FSL_CAAM_JR
6931 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6933 config CRYPTO_DEV_FSL_CAAM_INTC
6934 bool "Job Ring interrupt coalescing"
6935 - depends on CRYPTO_DEV_FSL_CAAM_JR
6937 Enable the Job Ring's interrupt coalescing feature.
6939 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
6941 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6942 tristate "Register algorithm implementations with the Crypto API"
6943 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6946 select CRYPTO_AUTHENC
6947 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6948 To compile this as a module, choose M here: the module
6949 will be called caamalg.
6951 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6952 + tristate "Queue Interface as Crypto API backend"
6953 + depends on FSL_SDK_DPA && NET
6955 + select CRYPTO_AUTHENC
6956 + select CRYPTO_BLKCIPHER
6958 + Selecting this will use CAAM Queue Interface (QI) for sending
6959 + & receiving crypto jobs to/from CAAM. This gives better performance
6960 + than job ring interface when the number of cores are more than the
6961 + number of job rings assigned to the kernel. The number of portals
6962 + assigned to the kernel should also be more than the number of
6965 + To compile this as a module, choose M here: the module
6966 + will be called caamalg_qi.
6968 config CRYPTO_DEV_FSL_CAAM_AHASH_API
6969 tristate "Register hash algorithm implementations with Crypto API"
6970 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6974 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
6976 config CRYPTO_DEV_FSL_CAAM_PKC_API
6977 tristate "Register public key cryptography implementations with Crypto API"
6978 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6982 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
6984 config CRYPTO_DEV_FSL_CAAM_RNG_API
6985 tristate "Register caam device for hwrng API"
6986 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6990 @@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
6991 To compile this as a module, choose M here: the module
6992 will be called caamrng.
6994 -config CRYPTO_DEV_FSL_CAAM_IMX
6995 - def_bool SOC_IMX6 || SOC_IMX7D
6996 - depends on CRYPTO_DEV_FSL_CAAM
6997 +endif # CRYPTO_DEV_FSL_CAAM_JR
6999 -config CRYPTO_DEV_FSL_CAAM_DEBUG
7000 - bool "Enable debug output in CAAM driver"
7001 - depends on CRYPTO_DEV_FSL_CAAM
7003 - Selecting this will enable printing of various debug
7004 - information in the CAAM driver.
7005 +endif # CRYPTO_DEV_FSL_CAAM
7007 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7008 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7009 + depends on FSL_MC_DPIO
7010 + select CRYPTO_DEV_FSL_CAAM_COMMON
7011 + select CRYPTO_BLKCIPHER
7012 + select CRYPTO_AUTHENC
7013 + select CRYPTO_AEAD
7015 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7016 + It handles DPSECI DPAA2 objects that sit on the Management Complex
7019 + To compile this as a module, choose M here: the module
7020 + will be called dpaa2_caam.
7022 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7023 + def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7024 + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7025 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7026 --- a/drivers/crypto/caam/Makefile
7027 +++ b/drivers/crypto/caam/Makefile
7028 @@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
7029 ccflags-y := -DDEBUG
7032 +ccflags-y += -DVERSION=\"\"
7034 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7035 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7036 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7037 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7038 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7039 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7040 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7041 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7042 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7045 -caam_jr-objs := jr.o key_gen.o error.o
7046 +caam_jr-objs := jr.o key_gen.o
7047 caam_pkc-y := caampkc.o pkc_desc.o
7048 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7049 + ccflags-y += -DCONFIG_CAAM_QI
7053 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7055 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
7056 --- a/drivers/crypto/caam/caamalg.c
7057 +++ b/drivers/crypto/caam/caamalg.c
7059 * caam - Freescale FSL CAAM support for crypto API
7061 * Copyright 2008-2011 Freescale Semiconductor, Inc.
7062 + * Copyright 2016 NXP
7064 * Based on talitos crypto API driver.
7068 #include "sg_sw_sec4.h"
7069 #include "key_gen.h"
7070 +#include "caamalg_desc.h"
7075 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
7076 CTR_RFC3686_NONCE_SIZE + \
7077 SHA512_DIGEST_SIZE * 2)
7078 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7079 -#define CAAM_MAX_IV_LENGTH 16
7081 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7082 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7084 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7087 -/* length of descriptors text */
7088 -#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
7089 -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7090 -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7091 -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
7093 -/* Note: Nonce is counted in enckeylen */
7094 -#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
7096 -#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
7097 -#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7098 -#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7100 -#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
7101 -#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7102 -#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7104 -#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
7105 -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7106 -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7108 -#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
7109 -#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7110 -#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7112 -#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
7113 -#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
7115 -#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
7118 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7119 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7121 @@ -112,47 +81,11 @@
7122 #define debug(format, arg...)
7126 -#include <linux/highmem.h>
7128 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7129 - int prefix_type, int rowsize, int groupsize,
7130 - struct scatterlist *sg, size_t tlen, bool ascii,
7133 - struct scatterlist *it;
7138 - for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7140 - * make sure the scatterlist's page
7141 - * has a valid virtual memory mapping
7143 - it_page = kmap_atomic(sg_page(it));
7144 - if (unlikely(!it_page)) {
7145 - printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7149 - buf = it_page + it->offset;
7150 - len = min_t(size_t, tlen, it->length);
7151 - print_hex_dump(level, prefix_str, prefix_type, rowsize,
7152 - groupsize, buf, len, ascii);
7155 - kunmap_atomic(it_page);
7160 static struct list_head alg_list;
7162 struct caam_alg_entry {
7163 int class1_alg_type;
7164 int class2_alg_type;
7169 @@ -163,302 +96,67 @@ struct caam_aead_alg {
7173 -/* Set DK bit in class 1 operation if shared */
7174 -static inline void append_dec_op1(u32 *desc, u32 type)
7176 - u32 *jump_cmd, *uncond_jump_cmd;
7178 - /* DK bit is valid only for AES */
7179 - if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7180 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7185 - jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7186 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7188 - uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7189 - set_jump_tgt_here(desc, jump_cmd);
7190 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7191 - OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7192 - set_jump_tgt_here(desc, uncond_jump_cmd);
7196 - * For aead functions, read payload and write payload,
7197 - * both of which are specified in req->src and req->dst
7199 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7201 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7202 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7203 - KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7207 - * For ablkcipher encrypt and decrypt, read from req->src and
7208 - * write to req->dst
7210 -static inline void ablkcipher_append_src_dst(u32 *desc)
7212 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7213 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7214 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7215 - KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7216 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7220 * per-session context
7223 - struct device *jrdev;
7224 u32 sh_desc_enc[DESC_MAX_USED_LEN];
7225 u32 sh_desc_dec[DESC_MAX_USED_LEN];
7226 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7227 + u8 key[CAAM_MAX_KEY_SIZE];
7228 dma_addr_t sh_desc_enc_dma;
7229 dma_addr_t sh_desc_dec_dma;
7230 dma_addr_t sh_desc_givenc_dma;
7231 - u32 class1_alg_type;
7232 - u32 class2_alg_type;
7234 - u8 key[CAAM_MAX_KEY_SIZE];
7236 - unsigned int enckeylen;
7237 - unsigned int split_key_len;
7238 - unsigned int split_key_pad_len;
7239 + struct device *jrdev;
7240 + struct alginfo adata;
7241 + struct alginfo cdata;
7242 unsigned int authsize;
7245 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7246 - int keys_fit_inline, bool is_rfc3686)
7249 - unsigned int enckeylen = ctx->enckeylen;
7252 - * RFC3686 specific:
7253 - * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7254 - * | enckeylen = encryption key size + nonce size
7257 - enckeylen -= CTR_RFC3686_NONCE_SIZE;
7259 - if (keys_fit_inline) {
7260 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7261 - ctx->split_key_len, CLASS_2 |
7262 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7263 - append_key_as_imm(desc, (void *)ctx->key +
7264 - ctx->split_key_pad_len, enckeylen,
7265 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7267 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7268 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7269 - append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7270 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7273 - /* Load Counter into CONTEXT1 reg */
7275 - nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7277 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7278 - LDST_CLASS_IND_CCB |
7279 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7281 - MOVE_SRC_OUTFIFO |
7282 - MOVE_DEST_CLASS1CTX |
7283 - (16 << MOVE_OFFSET_SHIFT) |
7284 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7288 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7289 - int keys_fit_inline, bool is_rfc3686)
7291 - u32 *key_jump_cmd;
7293 - /* Note: Context registers are saved. */
7294 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7296 - /* Skip if already shared */
7297 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7300 - append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7302 - set_jump_tgt_here(desc, key_jump_cmd);
7305 static int aead_null_set_sh_desc(struct crypto_aead *aead)
7307 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7308 struct device *jrdev = ctx->jrdev;
7309 - bool keys_fit_inline = false;
7310 - u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7312 + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7313 + ctx->adata.keylen_pad;
7316 * Job Descriptor and Shared Descriptors
7317 * must all fit into the 64-word Descriptor h/w Buffer
7319 - if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7320 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7321 - keys_fit_inline = true;
7322 + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7323 + ctx->adata.key_inline = true;
7324 + ctx->adata.key_virt = ctx->key;
7326 + ctx->adata.key_inline = false;
7327 + ctx->adata.key_dma = ctx->key_dma;
7330 /* aead_encrypt shared descriptor */
7331 desc = ctx->sh_desc_enc;
7333 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7335 - /* Skip if already shared */
7336 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7338 - if (keys_fit_inline)
7339 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7340 - ctx->split_key_len, CLASS_2 |
7341 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7343 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7344 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7345 - set_jump_tgt_here(desc, key_jump_cmd);
7347 - /* assoclen + cryptlen = seqinlen */
7348 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7350 - /* Prepare to read and write cryptlen + assoclen bytes */
7351 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7352 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7355 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7356 - * thus need to do some magic, i.e. self-patch the descriptor
7359 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7361 - (0x6 << MOVE_LEN_SHIFT));
7362 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7363 - MOVE_DEST_DESCBUF |
7365 - (0x8 << MOVE_LEN_SHIFT));
7367 - /* Class 2 operation */
7368 - append_operation(desc, ctx->class2_alg_type |
7369 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7371 - /* Read and write cryptlen bytes */
7372 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7374 - set_move_tgt_here(desc, read_move_cmd);
7375 - set_move_tgt_here(desc, write_move_cmd);
7376 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7377 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7381 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7382 - LDST_SRCDST_BYTE_CONTEXT);
7384 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7387 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7388 - dev_err(jrdev, "unable to map shared descriptor\n");
7392 - print_hex_dump(KERN_ERR,
7393 - "aead null enc shdesc@"__stringify(__LINE__)": ",
7394 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7395 - desc_bytes(desc), 1);
7397 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
7398 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7399 + desc_bytes(desc), DMA_TO_DEVICE);
7402 * Job Descriptor and Shared Descriptors
7403 * must all fit into the 64-word Descriptor h/w Buffer
7405 - keys_fit_inline = false;
7406 - if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7407 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7408 - keys_fit_inline = true;
7410 - desc = ctx->sh_desc_dec;
7411 + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7412 + ctx->adata.key_inline = true;
7413 + ctx->adata.key_virt = ctx->key;
7415 + ctx->adata.key_inline = false;
7416 + ctx->adata.key_dma = ctx->key_dma;
7419 /* aead_decrypt shared descriptor */
7420 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7422 - /* Skip if already shared */
7423 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7425 - if (keys_fit_inline)
7426 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7427 - ctx->split_key_len, CLASS_2 |
7428 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7430 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7431 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7432 - set_jump_tgt_here(desc, key_jump_cmd);
7434 - /* Class 2 operation */
7435 - append_operation(desc, ctx->class2_alg_type |
7436 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7438 - /* assoclen + cryptlen = seqoutlen */
7439 - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7441 - /* Prepare to read and write cryptlen + assoclen bytes */
7442 - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7443 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7446 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7447 - * thus need to do some magic, i.e. self-patch the descriptor
7450 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7452 - (0x6 << MOVE_LEN_SHIFT));
7453 - write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7454 - MOVE_DEST_DESCBUF |
7456 - (0x8 << MOVE_LEN_SHIFT));
7458 - /* Read and write cryptlen bytes */
7459 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7462 - * Insert a NOP here, since we need at least 4 instructions between
7463 - * code patching the descriptor buffer and the location being patched.
7465 - jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7466 - set_jump_tgt_here(desc, jump_cmd);
7468 - set_move_tgt_here(desc, read_move_cmd);
7469 - set_move_tgt_here(desc, write_move_cmd);
7470 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7471 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7473 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7476 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7477 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7479 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7482 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7483 - dev_err(jrdev, "unable to map shared descriptor\n");
7487 - print_hex_dump(KERN_ERR,
7488 - "aead null dec shdesc@"__stringify(__LINE__)": ",
7489 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7490 - desc_bytes(desc), 1);
7492 + desc = ctx->sh_desc_dec;
7493 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
7494 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7495 + desc_bytes(desc), DMA_TO_DEVICE);
7499 @@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypt
7500 unsigned int ivsize = crypto_aead_ivsize(aead);
7501 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7502 struct device *jrdev = ctx->jrdev;
7503 - bool keys_fit_inline;
7504 - u32 geniv, moveiv;
7505 u32 ctx1_iv_off = 0;
7507 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7508 + u32 *desc, *nonce = NULL;
7510 + unsigned int data_len[2];
7511 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7512 OP_ALG_AAI_CTR_MOD128);
7513 const bool is_rfc3686 = alg->caam.rfc3686;
7515 @@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypt
7518 /* NULL encryption / decryption */
7519 - if (!ctx->enckeylen)
7520 + if (!ctx->cdata.keylen)
7521 return aead_null_set_sh_desc(aead);
7524 @@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypt
7526 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7530 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7531 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7532 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7535 + data_len[0] = ctx->adata.keylen_pad;
7536 + data_len[1] = ctx->cdata.keylen;
7538 if (alg->caam.geniv)
7540 @@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypt
7541 * Job Descriptor and Shared Descriptors
7542 * must all fit into the 64-word Descriptor h/w Buffer
7544 - keys_fit_inline = false;
7545 - if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7546 - ctx->split_key_pad_len + ctx->enckeylen +
7547 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7548 - CAAM_DESC_BYTES_MAX)
7549 - keys_fit_inline = true;
7551 - /* aead_encrypt shared descriptor */
7552 - desc = ctx->sh_desc_enc;
7554 - /* Note: Context registers are saved. */
7555 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7557 - /* Class 2 operation */
7558 - append_operation(desc, ctx->class2_alg_type |
7559 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7560 + if (desc_inline_query(DESC_AEAD_ENC_LEN +
7561 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7562 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7563 + ARRAY_SIZE(data_len)) < 0)
7566 - /* Read and write assoclen bytes */
7567 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7568 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7570 + ctx->adata.key_virt = ctx->key;
7572 + ctx->adata.key_dma = ctx->key_dma;
7574 - /* Skip assoc data */
7575 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7577 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7579 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7581 - /* read assoc before reading payload */
7582 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7584 + ctx->adata.key_inline = !!(inl_mask & 1);
7585 + ctx->cdata.key_inline = !!(inl_mask & 2);
7587 - /* Load Counter into CONTEXT1 reg */
7589 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7590 - LDST_SRCDST_BYTE_CONTEXT |
7591 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7592 - LDST_OFFSET_SHIFT));
7594 - /* Class 1 operation */
7595 - append_operation(desc, ctx->class1_alg_type |
7596 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7598 - /* Read and write cryptlen bytes */
7599 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7600 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7601 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7604 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7605 - LDST_SRCDST_BYTE_CONTEXT);
7607 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7610 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7611 - dev_err(jrdev, "unable to map shared descriptor\n");
7615 - print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7616 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7617 - desc_bytes(desc), 1);
7619 + /* aead_encrypt shared descriptor */
7620 + desc = ctx->sh_desc_enc;
7621 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7622 + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7624 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7625 + desc_bytes(desc), DMA_TO_DEVICE);
7629 * Job Descriptor and Shared Descriptors
7630 * must all fit into the 64-word Descriptor h/w Buffer
7632 - keys_fit_inline = false;
7633 - if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7634 - ctx->split_key_pad_len + ctx->enckeylen +
7635 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7636 - CAAM_DESC_BYTES_MAX)
7637 - keys_fit_inline = true;
7639 - /* aead_decrypt shared descriptor */
7640 - desc = ctx->sh_desc_dec;
7642 - /* Note: Context registers are saved. */
7643 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7645 - /* Class 2 operation */
7646 - append_operation(desc, ctx->class2_alg_type |
7647 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7648 + if (desc_inline_query(DESC_AEAD_DEC_LEN +
7649 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7650 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7651 + ARRAY_SIZE(data_len)) < 0)
7654 - /* Read and write assoclen bytes */
7655 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7656 - if (alg->caam.geniv)
7657 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7659 + ctx->adata.key_virt = ctx->key;
7661 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7663 - /* Skip assoc data */
7664 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7666 - /* read assoc before reading payload */
7667 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7670 - if (alg->caam.geniv) {
7671 - append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7672 - LDST_SRCDST_BYTE_CONTEXT |
7673 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
7674 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7675 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7678 - /* Load Counter into CONTEXT1 reg */
7680 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7681 - LDST_SRCDST_BYTE_CONTEXT |
7682 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7683 - LDST_OFFSET_SHIFT));
7684 + ctx->adata.key_dma = ctx->key_dma;
7686 - /* Choose operation */
7688 - append_operation(desc, ctx->class1_alg_type |
7689 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7691 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7693 - append_dec_op1(desc, ctx->class1_alg_type);
7694 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7696 - /* Read and write cryptlen bytes */
7697 - append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7698 - append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7699 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7702 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7703 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7704 + ctx->adata.key_inline = !!(inl_mask & 1);
7705 + ctx->cdata.key_inline = !!(inl_mask & 2);
7707 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7710 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7711 - dev_err(jrdev, "unable to map shared descriptor\n");
7715 - print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7716 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7717 - desc_bytes(desc), 1);
7719 + /* aead_decrypt shared descriptor */
7720 + desc = ctx->sh_desc_dec;
7721 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7722 + ctx->authsize, alg->caam.geniv, is_rfc3686,
7723 + nonce, ctx1_iv_off, false);
7724 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7725 + desc_bytes(desc), DMA_TO_DEVICE);
7727 if (!alg->caam.geniv)
7729 @@ -655,107 +277,32 @@ skip_enc:
7730 * Job Descriptor and Shared Descriptors
7731 * must all fit into the 64-word Descriptor h/w Buffer
7733 - keys_fit_inline = false;
7734 - if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7735 - ctx->split_key_pad_len + ctx->enckeylen +
7736 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7737 - CAAM_DESC_BYTES_MAX)
7738 - keys_fit_inline = true;
7740 - /* aead_givencrypt shared descriptor */
7741 - desc = ctx->sh_desc_enc;
7743 - /* Note: Context registers are saved. */
7744 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7745 + if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7746 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7747 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7748 + ARRAY_SIZE(data_len)) < 0)
7754 + ctx->adata.key_virt = ctx->key;
7756 + ctx->adata.key_dma = ctx->key_dma;
7759 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7760 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7761 - NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7762 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7763 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7764 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7765 - append_move(desc, MOVE_WAITCOMP |
7766 - MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7767 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7768 - (ivsize << MOVE_LEN_SHIFT));
7769 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7772 - /* Copy IV to class 1 context */
7773 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7774 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7775 - (ivsize << MOVE_LEN_SHIFT));
7777 - /* Return to encryption */
7778 - append_operation(desc, ctx->class2_alg_type |
7779 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7781 - /* Read and write assoclen bytes */
7782 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7783 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7785 - /* ivsize + cryptlen = seqoutlen - authsize */
7786 - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7788 - /* Skip assoc data */
7789 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7791 - /* read assoc before reading payload */
7792 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7795 - /* Copy iv from outfifo to class 2 fifo */
7796 - moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7797 - NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7798 - append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7799 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7800 - append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7801 - LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7803 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7805 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7807 - /* Load Counter into CONTEXT1 reg */
7809 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7810 - LDST_SRCDST_BYTE_CONTEXT |
7811 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7812 - LDST_OFFSET_SHIFT));
7814 - /* Class 1 operation */
7815 - append_operation(desc, ctx->class1_alg_type |
7816 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7818 - /* Will write ivsize + cryptlen */
7819 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7821 - /* Not need to reload iv */
7822 - append_seq_fifo_load(desc, ivsize,
7823 - FIFOLD_CLASS_SKIP);
7825 - /* Will read cryptlen */
7826 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7827 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7828 - FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7829 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7832 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7833 - LDST_SRCDST_BYTE_CONTEXT);
7834 + ctx->adata.key_inline = !!(inl_mask & 1);
7835 + ctx->cdata.key_inline = !!(inl_mask & 2);
7837 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7840 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7841 - dev_err(jrdev, "unable to map shared descriptor\n");
7845 - print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7846 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7847 - desc_bytes(desc), 1);
7849 + /* aead_givencrypt shared descriptor */
7850 + desc = ctx->sh_desc_enc;
7851 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7852 + ctx->authsize, is_rfc3686, nonce,
7853 + ctx1_iv_off, false);
7854 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7855 + desc_bytes(desc), DMA_TO_DEVICE);
7859 @@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto
7861 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7862 struct device *jrdev = ctx->jrdev;
7863 - bool keys_fit_inline = false;
7864 - u32 *key_jump_cmd, *zero_payload_jump_cmd,
7865 - *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7866 + unsigned int ivsize = crypto_aead_ivsize(aead);
7868 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7869 + ctx->cdata.keylen;
7871 - if (!ctx->enckeylen || !ctx->authsize)
7872 + if (!ctx->cdata.keylen || !ctx->authsize)
7876 @@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto
7877 * Job Descriptor and Shared Descriptor
7878 * must fit into the 64-word Descriptor h/w Buffer
7880 - if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7881 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7882 - keys_fit_inline = true;
7883 + if (rem_bytes >= DESC_GCM_ENC_LEN) {
7884 + ctx->cdata.key_inline = true;
7885 + ctx->cdata.key_virt = ctx->key;
7887 + ctx->cdata.key_inline = false;
7888 + ctx->cdata.key_dma = ctx->key_dma;
7891 desc = ctx->sh_desc_enc;
7893 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7895 - /* skip key loading if they are loaded due to sharing */
7896 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7897 - JUMP_COND_SHRD | JUMP_COND_SELF);
7898 - if (keys_fit_inline)
7899 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7900 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7902 - append_key(desc, ctx->key_dma, ctx->enckeylen,
7903 - CLASS_1 | KEY_DEST_CLASS_REG);
7904 - set_jump_tgt_here(desc, key_jump_cmd);
7906 - /* class 1 operation */
7907 - append_operation(desc, ctx->class1_alg_type |
7908 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7910 - /* if assoclen + cryptlen is ZERO, skip to ICV write */
7911 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7912 - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7913 - JUMP_COND_MATH_Z);
7915 - /* if assoclen is ZERO, skip reading the assoc data */
7916 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7917 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7918 - JUMP_COND_MATH_Z);
7920 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7922 - /* skip assoc data */
7923 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7925 - /* cryptlen = seqinlen - assoclen */
7926 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7928 - /* if cryptlen is ZERO jump to zero-payload commands */
7929 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7930 - JUMP_COND_MATH_Z);
7932 - /* read assoc data */
7933 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7934 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7935 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7937 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7939 - /* write encrypted data */
7940 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7942 - /* read payload data */
7943 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7944 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7946 - /* jump the zero-payload commands */
7947 - append_jump(desc, JUMP_TEST_ALL | 2);
7949 - /* zero-payload commands */
7950 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
7952 - /* read assoc data */
7953 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7954 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
7956 - /* There is no input data */
7957 - set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
7960 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
7961 - LDST_SRCDST_BYTE_CONTEXT);
7963 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7966 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7967 - dev_err(jrdev, "unable to map shared descriptor\n");
7971 - print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
7972 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7973 - desc_bytes(desc), 1);
7975 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
7976 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7977 + desc_bytes(desc), DMA_TO_DEVICE);
7980 * Job Descriptor and Shared Descriptors
7981 * must all fit into the 64-word Descriptor h/w Buffer
7983 - keys_fit_inline = false;
7984 - if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
7985 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7986 - keys_fit_inline = true;
7987 + if (rem_bytes >= DESC_GCM_DEC_LEN) {
7988 + ctx->cdata.key_inline = true;
7989 + ctx->cdata.key_virt = ctx->key;
7991 + ctx->cdata.key_inline = false;
7992 + ctx->cdata.key_dma = ctx->key_dma;
7995 desc = ctx->sh_desc_dec;
7997 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7999 - /* skip key loading if they are loaded due to sharing */
8000 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8001 - JUMP_TEST_ALL | JUMP_COND_SHRD |
8003 - if (keys_fit_inline)
8004 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8005 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8007 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8008 - CLASS_1 | KEY_DEST_CLASS_REG);
8009 - set_jump_tgt_here(desc, key_jump_cmd);
8011 - /* class 1 operation */
8012 - append_operation(desc, ctx->class1_alg_type |
8013 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8015 - /* if assoclen is ZERO, skip reading the assoc data */
8016 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8017 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8018 - JUMP_COND_MATH_Z);
8020 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8022 - /* skip assoc data */
8023 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8025 - /* read assoc data */
8026 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8027 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8029 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8031 - /* cryptlen = seqoutlen - assoclen */
8032 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8034 - /* jump to zero-payload command if cryptlen is zero */
8035 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8036 - JUMP_COND_MATH_Z);
8038 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8040 - /* store encrypted data */
8041 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8043 - /* read payload data */
8044 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8045 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8047 - /* zero-payload command */
8048 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
8051 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8052 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8054 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8057 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8058 - dev_err(jrdev, "unable to map shared descriptor\n");
8062 - print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8063 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8064 - desc_bytes(desc), 1);
8066 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8067 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8068 + desc_bytes(desc), DMA_TO_DEVICE);
8072 @@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct cr
8074 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8075 struct device *jrdev = ctx->jrdev;
8076 - bool keys_fit_inline = false;
8077 - u32 *key_jump_cmd;
8078 + unsigned int ivsize = crypto_aead_ivsize(aead);
8080 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8081 + ctx->cdata.keylen;
8083 - if (!ctx->enckeylen || !ctx->authsize)
8084 + if (!ctx->cdata.keylen || !ctx->authsize)
8088 @@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct cr
8089 * Job Descriptor and Shared Descriptor
8090 * must fit into the 64-word Descriptor h/w Buffer
8092 - if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8093 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8094 - keys_fit_inline = true;
8095 + if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8096 + ctx->cdata.key_inline = true;
8097 + ctx->cdata.key_virt = ctx->key;
8099 + ctx->cdata.key_inline = false;
8100 + ctx->cdata.key_dma = ctx->key_dma;
8103 desc = ctx->sh_desc_enc;
8105 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8107 - /* Skip key loading if it is loaded due to sharing */
8108 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8110 - if (keys_fit_inline)
8111 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8112 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8114 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8115 - CLASS_1 | KEY_DEST_CLASS_REG);
8116 - set_jump_tgt_here(desc, key_jump_cmd);
8118 - /* Class 1 operation */
8119 - append_operation(desc, ctx->class1_alg_type |
8120 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8122 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8123 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8125 - /* Read assoc data */
8126 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8127 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8130 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8132 - /* Will read cryptlen bytes */
8133 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8135 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8136 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8138 - /* Skip assoc data */
8139 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8141 - /* cryptlen = seqoutlen - assoclen */
8142 - append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8144 - /* Write encrypted data */
8145 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8147 - /* Read payload data */
8148 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8149 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8152 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8153 - LDST_SRCDST_BYTE_CONTEXT);
8155 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8158 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8159 - dev_err(jrdev, "unable to map shared descriptor\n");
8163 - print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8164 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8165 - desc_bytes(desc), 1);
8167 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8169 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8170 + desc_bytes(desc), DMA_TO_DEVICE);
8173 * Job Descriptor and Shared Descriptors
8174 * must all fit into the 64-word Descriptor h/w Buffer
8176 - keys_fit_inline = false;
8177 - if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8178 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8179 - keys_fit_inline = true;
8180 + if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8181 + ctx->cdata.key_inline = true;
8182 + ctx->cdata.key_virt = ctx->key;
8184 + ctx->cdata.key_inline = false;
8185 + ctx->cdata.key_dma = ctx->key_dma;
8188 desc = ctx->sh_desc_dec;
8190 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8192 - /* Skip key loading if it is loaded due to sharing */
8193 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8194 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8195 - if (keys_fit_inline)
8196 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8197 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8199 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8200 - CLASS_1 | KEY_DEST_CLASS_REG);
8201 - set_jump_tgt_here(desc, key_jump_cmd);
8203 - /* Class 1 operation */
8204 - append_operation(desc, ctx->class1_alg_type |
8205 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8207 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8208 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8210 - /* Read assoc data */
8211 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8212 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8215 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8217 - /* Will read cryptlen bytes */
8218 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8220 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8221 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8223 - /* Skip assoc data */
8224 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8226 - /* Will write cryptlen bytes */
8227 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8229 - /* Store payload data */
8230 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8232 - /* Read encrypted data */
8233 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8234 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8237 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8238 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8240 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8243 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8244 - dev_err(jrdev, "unable to map shared descriptor\n");
8248 - print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8249 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8250 - desc_bytes(desc), 1);
8252 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8254 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8255 + desc_bytes(desc), DMA_TO_DEVICE);
8259 @@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct cr
8261 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8262 struct device *jrdev = ctx->jrdev;
8263 - bool keys_fit_inline = false;
8264 - u32 *key_jump_cmd;
8265 - u32 *read_move_cmd, *write_move_cmd;
8266 + unsigned int ivsize = crypto_aead_ivsize(aead);
8268 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8269 + ctx->cdata.keylen;
8271 - if (!ctx->enckeylen || !ctx->authsize)
8272 + if (!ctx->cdata.keylen || !ctx->authsize)
8276 @@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct cr
8277 * Job Descriptor and Shared Descriptor
8278 * must fit into the 64-word Descriptor h/w Buffer
8280 - if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8281 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8282 - keys_fit_inline = true;
8283 + if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8284 + ctx->cdata.key_inline = true;
8285 + ctx->cdata.key_virt = ctx->key;
8287 + ctx->cdata.key_inline = false;
8288 + ctx->cdata.key_dma = ctx->key_dma;
8291 desc = ctx->sh_desc_enc;
8293 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8295 - /* Skip key loading if it is loaded due to sharing */
8296 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8298 - if (keys_fit_inline)
8299 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8300 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8302 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8303 - CLASS_1 | KEY_DEST_CLASS_REG);
8304 - set_jump_tgt_here(desc, key_jump_cmd);
8306 - /* Class 1 operation */
8307 - append_operation(desc, ctx->class1_alg_type |
8308 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8310 - /* assoclen + cryptlen = seqinlen */
8311 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8314 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8315 - * thus need to do some magic, i.e. self-patch the descriptor
8318 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8319 - (0x6 << MOVE_LEN_SHIFT));
8320 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8321 - (0x8 << MOVE_LEN_SHIFT));
8323 - /* Will read assoclen + cryptlen bytes */
8324 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8326 - /* Will write assoclen + cryptlen bytes */
8327 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8329 - /* Read and write assoclen + cryptlen bytes */
8330 - aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8332 - set_move_tgt_here(desc, read_move_cmd);
8333 - set_move_tgt_here(desc, write_move_cmd);
8334 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8335 - /* Move payload data to OFIFO */
8336 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8339 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8340 - LDST_SRCDST_BYTE_CONTEXT);
8342 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8345 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8346 - dev_err(jrdev, "unable to map shared descriptor\n");
8350 - print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8351 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8352 - desc_bytes(desc), 1);
8354 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8356 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8357 + desc_bytes(desc), DMA_TO_DEVICE);
8360 * Job Descriptor and Shared Descriptors
8361 * must all fit into the 64-word Descriptor h/w Buffer
8363 - keys_fit_inline = false;
8364 - if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8365 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8366 - keys_fit_inline = true;
8367 + if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8368 + ctx->cdata.key_inline = true;
8369 + ctx->cdata.key_virt = ctx->key;
8371 + ctx->cdata.key_inline = false;
8372 + ctx->cdata.key_dma = ctx->key_dma;
8375 desc = ctx->sh_desc_dec;
8377 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8379 - /* Skip key loading if it is loaded due to sharing */
8380 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8381 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8382 - if (keys_fit_inline)
8383 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8384 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8386 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8387 - CLASS_1 | KEY_DEST_CLASS_REG);
8388 - set_jump_tgt_here(desc, key_jump_cmd);
8390 - /* Class 1 operation */
8391 - append_operation(desc, ctx->class1_alg_type |
8392 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8394 - /* assoclen + cryptlen = seqoutlen */
8395 - append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8398 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8399 - * thus need to do some magic, i.e. self-patch the descriptor
8402 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8403 - (0x6 << MOVE_LEN_SHIFT));
8404 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8405 - (0x8 << MOVE_LEN_SHIFT));
8407 - /* Will read assoclen + cryptlen bytes */
8408 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8410 - /* Will write assoclen + cryptlen bytes */
8411 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8413 - /* Store payload data */
8414 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8416 - /* In-snoop assoclen + cryptlen data */
8417 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8418 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8420 - set_move_tgt_here(desc, read_move_cmd);
8421 - set_move_tgt_here(desc, write_move_cmd);
8422 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8423 - /* Move payload data to OFIFO */
8424 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8425 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8428 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8429 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8431 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8434 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8435 - dev_err(jrdev, "unable to map shared descriptor\n");
8439 - print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8440 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8441 - desc_bytes(desc), 1);
8443 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8445 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8446 + desc_bytes(desc), DMA_TO_DEVICE);
8450 @@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct cr
8454 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8457 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8458 - ctx->split_key_pad_len, key_in, authkeylen,
8462 static int aead_setkey(struct crypto_aead *aead,
8463 const u8 *key, unsigned int keylen)
8465 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8466 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8467 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8468 struct device *jrdev = ctx->jrdev;
8469 struct crypto_authenc_keys keys;
8470 @@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aea
8471 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8474 - /* Pick class 2 key length from algorithm submask */
8475 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8476 - OP_ALG_ALGSEL_SHIFT] * 2;
8477 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8479 - if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8483 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8484 keys.authkeylen + keys.enckeylen, keys.enckeylen,
8486 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8487 - ctx->split_key_len, ctx->split_key_pad_len);
8488 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8489 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8492 - ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8493 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8494 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
8500 /* postpend encryption key to auth split key */
8501 - memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8503 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8504 - keys.enckeylen, DMA_TO_DEVICE);
8505 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8506 - dev_err(jrdev, "unable to map key i/o memory\n");
8509 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8510 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8511 + keys.enckeylen, DMA_TO_DEVICE);
8513 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8514 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8515 - ctx->split_key_pad_len + keys.enckeylen, 1);
8516 + ctx->adata.keylen_pad + keys.enckeylen, 1);
8519 - ctx->enckeylen = keys.enckeylen;
8521 - ret = aead_set_sh_desc(aead);
8523 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8524 - keys.enckeylen, DMA_TO_DEVICE);
8528 + ctx->cdata.keylen = keys.enckeylen;
8529 + return aead_set_sh_desc(aead);
8531 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8533 @@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead
8535 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8536 struct device *jrdev = ctx->jrdev;
8540 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8541 @@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead
8544 memcpy(ctx->key, key, keylen);
8545 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8547 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8548 - dev_err(jrdev, "unable to map key i/o memory\n");
8551 - ctx->enckeylen = keylen;
8553 - ret = gcm_set_sh_desc(aead);
8555 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8558 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8559 + ctx->cdata.keylen = keylen;
8562 + return gcm_set_sh_desc(aead);
8565 static int rfc4106_setkey(struct crypto_aead *aead,
8566 @@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_
8568 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8569 struct device *jrdev = ctx->jrdev;
8574 @@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_
8575 * The last four bytes of the key material are used as the salt value
8576 * in the nonce. Update the AES key length.
8578 - ctx->enckeylen = keylen - 4;
8580 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8582 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8583 - dev_err(jrdev, "unable to map key i/o memory\n");
8587 - ret = rfc4106_set_sh_desc(aead);
8589 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8594 + ctx->cdata.keylen = keylen - 4;
8595 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8597 + return rfc4106_set_sh_desc(aead);
8600 static int rfc4543_setkey(struct crypto_aead *aead,
8601 @@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_
8603 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8604 struct device *jrdev = ctx->jrdev;
8609 @@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_
8610 * The last four bytes of the key material are used as the salt value
8611 * in the nonce. Update the AES key length.
8613 - ctx->enckeylen = keylen - 4;
8615 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8617 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8618 - dev_err(jrdev, "unable to map key i/o memory\n");
8622 - ret = rfc4543_set_sh_desc(aead);
8624 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8629 + ctx->cdata.keylen = keylen - 4;
8630 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8632 + return rfc4543_set_sh_desc(aead);
8635 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8636 const u8 *key, unsigned int keylen)
8638 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8639 - struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8640 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8641 const char *alg_name = crypto_tfm_alg_name(tfm);
8642 struct device *jrdev = ctx->jrdev;
8644 - u32 *key_jump_cmd;
8645 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8649 u32 ctx1_iv_off = 0;
8650 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8651 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8652 OP_ALG_AAI_CTR_MOD128);
8653 const bool is_rfc3686 = (ctr_mode &&
8654 (strstr(alg_name, "rfc3686") != NULL));
8656 + memcpy(ctx->key, key, keylen);
8658 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8659 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8660 @@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct cryp
8661 keylen -= CTR_RFC3686_NONCE_SIZE;
8664 - memcpy(ctx->key, key, keylen);
8665 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8667 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8668 - dev_err(jrdev, "unable to map key i/o memory\n");
8671 - ctx->enckeylen = keylen;
8672 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8673 + ctx->cdata.keylen = keylen;
8674 + ctx->cdata.key_virt = ctx->key;
8675 + ctx->cdata.key_inline = true;
8677 /* ablkcipher_encrypt shared descriptor */
8678 desc = ctx->sh_desc_enc;
8679 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8680 - /* Skip if already shared */
8681 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8684 - /* Load class1 key only */
8685 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8686 - ctx->enckeylen, CLASS_1 |
8687 - KEY_DEST_CLASS_REG);
8689 - /* Load nonce into CONTEXT1 reg */
8691 - nonce = (u8 *)key + keylen;
8692 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8693 - LDST_CLASS_IND_CCB |
8694 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8695 - append_move(desc, MOVE_WAITCOMP |
8696 - MOVE_SRC_OUTFIFO |
8697 - MOVE_DEST_CLASS1CTX |
8698 - (16 << MOVE_OFFSET_SHIFT) |
8699 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8702 - set_jump_tgt_here(desc, key_jump_cmd);
8705 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8706 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8707 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8709 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8710 + desc_bytes(desc), DMA_TO_DEVICE);
8712 - /* Load counter into CONTEXT1 reg */
8714 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8715 - LDST_SRCDST_BYTE_CONTEXT |
8716 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8717 - LDST_OFFSET_SHIFT));
8719 - /* Load operation */
8720 - append_operation(desc, ctx->class1_alg_type |
8721 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8723 - /* Perform operation */
8724 - ablkcipher_append_src_dst(desc);
8726 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8729 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8730 - dev_err(jrdev, "unable to map shared descriptor\n");
8734 - print_hex_dump(KERN_ERR,
8735 - "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8736 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8737 - desc_bytes(desc), 1);
8739 /* ablkcipher_decrypt shared descriptor */
8740 desc = ctx->sh_desc_dec;
8741 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8743 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8744 + desc_bytes(desc), DMA_TO_DEVICE);
8746 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8747 - /* Skip if already shared */
8748 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8751 - /* Load class1 key only */
8752 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8753 - ctx->enckeylen, CLASS_1 |
8754 - KEY_DEST_CLASS_REG);
8756 - /* Load nonce into CONTEXT1 reg */
8758 - nonce = (u8 *)key + keylen;
8759 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8760 - LDST_CLASS_IND_CCB |
8761 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8762 - append_move(desc, MOVE_WAITCOMP |
8763 - MOVE_SRC_OUTFIFO |
8764 - MOVE_DEST_CLASS1CTX |
8765 - (16 << MOVE_OFFSET_SHIFT) |
8766 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8769 - set_jump_tgt_here(desc, key_jump_cmd);
8772 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8773 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8775 - /* Load counter into CONTEXT1 reg */
8777 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8778 - LDST_SRCDST_BYTE_CONTEXT |
8779 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8780 - LDST_OFFSET_SHIFT));
8782 - /* Choose operation */
8784 - append_operation(desc, ctx->class1_alg_type |
8785 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8787 - append_dec_op1(desc, ctx->class1_alg_type);
8789 - /* Perform operation */
8790 - ablkcipher_append_src_dst(desc);
8792 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8795 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8796 - dev_err(jrdev, "unable to map shared descriptor\n");
8801 - print_hex_dump(KERN_ERR,
8802 - "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8803 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8804 - desc_bytes(desc), 1);
8806 /* ablkcipher_givencrypt shared descriptor */
8807 desc = ctx->sh_desc_givenc;
8808 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8810 + dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8811 + desc_bytes(desc), DMA_TO_DEVICE);
8813 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8814 - /* Skip if already shared */
8815 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8818 - /* Load class1 key only */
8819 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8820 - ctx->enckeylen, CLASS_1 |
8821 - KEY_DEST_CLASS_REG);
8823 - /* Load Nonce into CONTEXT1 reg */
8825 - nonce = (u8 *)key + keylen;
8826 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8827 - LDST_CLASS_IND_CCB |
8828 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8829 - append_move(desc, MOVE_WAITCOMP |
8830 - MOVE_SRC_OUTFIFO |
8831 - MOVE_DEST_CLASS1CTX |
8832 - (16 << MOVE_OFFSET_SHIFT) |
8833 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8835 - set_jump_tgt_here(desc, key_jump_cmd);
8838 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8839 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8840 - NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8841 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8842 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8843 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8844 - append_move(desc, MOVE_WAITCOMP |
8846 - MOVE_DEST_CLASS1CTX |
8847 - (crt->ivsize << MOVE_LEN_SHIFT) |
8848 - (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8849 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8851 - /* Copy generated IV to memory */
8852 - append_seq_store(desc, crt->ivsize,
8853 - LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8854 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
8856 - /* Load Counter into CONTEXT1 reg */
8858 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8859 - LDST_SRCDST_BYTE_CONTEXT |
8860 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8861 - LDST_OFFSET_SHIFT));
8864 - append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8865 - (1 << JUMP_OFFSET_SHIFT));
8867 - /* Load operation */
8868 - append_operation(desc, ctx->class1_alg_type |
8869 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8871 - /* Perform operation */
8872 - ablkcipher_append_src_dst(desc);
8874 - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8877 - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8878 - dev_err(jrdev, "unable to map shared descriptor\n");
8882 - print_hex_dump(KERN_ERR,
8883 - "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8884 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8885 - desc_bytes(desc), 1);
8892 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8893 @@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct
8895 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8896 struct device *jrdev = ctx->jrdev;
8897 - u32 *key_jump_cmd, *desc;
8898 - __be64 sector_size = cpu_to_be64(512);
8901 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
8902 crypto_ablkcipher_set_flags(ablkcipher,
8903 @@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct
8906 memcpy(ctx->key, key, keylen);
8907 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8908 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8909 - dev_err(jrdev, "unable to map key i/o memory\n");
8912 - ctx->enckeylen = keylen;
8913 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8914 + ctx->cdata.keylen = keylen;
8915 + ctx->cdata.key_virt = ctx->key;
8916 + ctx->cdata.key_inline = true;
8918 /* xts_ablkcipher_encrypt shared descriptor */
8919 desc = ctx->sh_desc_enc;
8920 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8921 - /* Skip if already shared */
8922 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8925 - /* Load class1 keys only */
8926 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8927 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8929 - /* Load sector size with index 40 bytes (0x28) */
8930 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8931 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8932 - append_data(desc, (void *)§or_size, 8);
8934 - set_jump_tgt_here(desc, key_jump_cmd);
8937 - * create sequence for loading the sector index
8938 - * Upper 8B of IV - will be used as sector index
8939 - * Lower 8B of IV - will be discarded
8941 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
8942 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
8943 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8945 - /* Load operation */
8946 - append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
8949 - /* Perform operation */
8950 - ablkcipher_append_src_dst(desc);
8952 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
8954 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8955 - dev_err(jrdev, "unable to map shared descriptor\n");
8959 - print_hex_dump(KERN_ERR,
8960 - "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
8961 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
8963 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
8964 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8965 + desc_bytes(desc), DMA_TO_DEVICE);
8967 /* xts_ablkcipher_decrypt shared descriptor */
8968 desc = ctx->sh_desc_dec;
8970 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8971 - /* Skip if already shared */
8972 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8975 - /* Load class1 key only */
8976 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8977 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8979 - /* Load sector size with index 40 bytes (0x28) */
8980 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8981 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8982 - append_data(desc, (void *)§or_size, 8);
8984 - set_jump_tgt_here(desc, key_jump_cmd);
8987 - * create sequence for loading the sector index
8988 - * Upper 8B of IV - will be used as sector index
8989 - * Lower 8B of IV - will be discarded
8991 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
8992 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
8993 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8995 - /* Load operation */
8996 - append_dec_op1(desc, ctx->class1_alg_type);
8998 - /* Perform operation */
8999 - ablkcipher_append_src_dst(desc);
9001 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9003 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9004 - dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9005 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9006 - dev_err(jrdev, "unable to map shared descriptor\n");
9010 - print_hex_dump(KERN_ERR,
9011 - "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9012 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9014 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9015 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9016 + desc_bytes(desc), DMA_TO_DEVICE);
9022 * aead_edesc - s/w-extended aead descriptor
9023 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9024 - * @src_nents: number of segments in input scatterlist
9025 - * @dst_nents: number of segments in output scatterlist
9026 - * @iv_dma: dma address of iv for checking continuity and link table
9027 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9028 + * @src_nents: number of segments in input s/w scatterlist
9029 + * @dst_nents: number of segments in output s/w scatterlist
9030 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9031 * @sec4_sg_dma: bus physical mapped address of h/w link table
9032 + * @sec4_sg: pointer to h/w link table
9033 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9039 - dma_addr_t iv_dma;
9041 dma_addr_t sec4_sg_dma;
9042 struct sec4_sg_entry *sec4_sg;
9043 @@ -1899,12 +739,12 @@ struct aead_edesc {
9046 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9047 - * @src_nents: number of segments in input scatterlist
9048 - * @dst_nents: number of segments in output scatterlist
9049 + * @src_nents: number of segments in input s/w scatterlist
9050 + * @dst_nents: number of segments in output s/w scatterlist
9051 * @iv_dma: dma address of iv for checking continuity and link table
9052 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9053 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9054 * @sec4_sg_dma: bus physical mapped address of h/w link table
9055 + * @sec4_sg: pointer to h/w link table
9056 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9058 struct ablkcipher_edesc {
9059 @@ -1924,10 +764,11 @@ static void caam_unmap(struct device *de
9063 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9064 - dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9066 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9067 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9069 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9070 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9074 @@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(stru
9075 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9078 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9079 - offsetof(struct ablkcipher_edesc, hw_desc));
9080 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9083 caam_jr_strstatus(jrdev, err);
9084 @@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(stru
9085 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9086 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9087 edesc->src_nents > 1 ? 100 : ivsize, 1);
9088 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9089 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9090 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9092 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9093 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9094 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9096 ablkcipher_unmap(jrdev, edesc, req);
9098 @@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(stru
9099 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9102 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9103 - offsetof(struct ablkcipher_edesc, hw_desc));
9104 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9106 caam_jr_strstatus(jrdev, err);
9108 @@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(stru
9109 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9110 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9112 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9113 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9114 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9116 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9117 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9118 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9120 ablkcipher_unmap(jrdev, edesc, req);
9122 @@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_re
9123 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9126 - src_dma = sg_dma_address(req->src);
9127 + src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9130 src_dma = edesc->sec4_sg_dma;
9131 @@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_re
9132 out_options = in_options;
9134 if (unlikely(req->src != req->dst)) {
9135 - if (!edesc->dst_nents) {
9136 + if (edesc->dst_nents == 1) {
9137 dst_dma = sg_dma_address(req->dst);
9139 dst_dma = edesc->sec4_sg_dma +
9140 @@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_req
9141 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9144 - append_data(desc, ctx->key + ctx->enckeylen, 4);
9145 + append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9147 append_data(desc, req->iv, ivsize);
9148 /* End of blank commands */
9149 @@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead
9150 struct caam_aead_alg, aead);
9151 unsigned int ivsize = crypto_aead_ivsize(aead);
9152 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9153 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9154 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9155 OP_ALG_AAI_CTR_MOD128);
9156 const bool is_rfc3686 = alg->caam.rfc3686;
9157 u32 *desc = edesc->hw_desc;
9158 @@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_
9159 int len, sec4_sg_index = 0;
9162 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9163 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9164 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9165 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9167 - printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9168 - dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
9169 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9170 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9171 + pr_err("asked=%d, nbytes%d\n",
9172 + (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9174 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
9175 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9176 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9178 len = desc_len(sh_desc);
9179 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9180 @@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_
9181 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9183 if (likely(req->src == req->dst)) {
9184 - if (!edesc->src_nents && iv_contig) {
9185 + if (edesc->src_nents == 1 && iv_contig) {
9186 dst_dma = sg_dma_address(req->src);
9188 dst_dma = edesc->sec4_sg_dma +
9189 @@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_
9190 out_options = LDST_SGF;
9193 - if (!edesc->dst_nents) {
9194 + if (edesc->dst_nents == 1) {
9195 dst_dma = sg_dma_address(req->dst);
9197 dst_dma = edesc->sec4_sg_dma +
9198 @@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32
9199 int len, sec4_sg_index = 0;
9202 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9203 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9204 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9205 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9207 - dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9208 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9209 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9211 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9212 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9213 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9215 len = desc_len(sh_desc);
9216 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9218 - if (!edesc->src_nents) {
9219 + if (edesc->src_nents == 1) {
9220 src_dma = sg_dma_address(req->src);
9223 @@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_all
9224 struct crypto_aead *aead = crypto_aead_reqtfm(req);
9225 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9226 struct device *jrdev = ctx->jrdev;
9227 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9228 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9229 - int src_nents, dst_nents = 0;
9230 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9231 + GFP_KERNEL : GFP_ATOMIC;
9232 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9233 struct aead_edesc *edesc;
9235 - bool all_contig = true;
9236 - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9237 + int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9238 unsigned int authsize = ctx->authsize;
9240 if (unlikely(req->dst != req->src)) {
9241 - src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9242 - dst_nents = sg_count(req->dst,
9243 - req->assoclen + req->cryptlen +
9244 - (encrypt ? authsize : (-authsize)));
9246 - src_nents = sg_count(req->src,
9247 - req->assoclen + req->cryptlen +
9248 - (encrypt ? authsize : 0));
9251 - /* Check if data are contiguous. */
9252 - all_contig = !src_nents;
9253 - if (!all_contig) {
9254 - src_nents = src_nents ? : 1;
9255 - sec4_sg_len = src_nents;
9258 - sec4_sg_len += dst_nents;
9260 - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9261 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9263 + if (unlikely(src_nents < 0)) {
9264 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9265 + req->assoclen + req->cryptlen);
9266 + return ERR_PTR(src_nents);
9269 - /* allocate space for base edesc and hw desc commands, link tables */
9270 - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9273 - dev_err(jrdev, "could not allocate extended descriptor\n");
9274 - return ERR_PTR(-ENOMEM);
9275 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9277 + (encrypt ? authsize :
9279 + if (unlikely(dst_nents < 0)) {
9280 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9281 + req->assoclen + req->cryptlen +
9282 + (encrypt ? authsize : (-authsize)));
9283 + return ERR_PTR(dst_nents);
9286 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9288 + (encrypt ? authsize : 0));
9289 + if (unlikely(src_nents < 0)) {
9290 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9291 + req->assoclen + req->cryptlen +
9292 + (encrypt ? authsize : 0));
9293 + return ERR_PTR(src_nents);
9297 if (likely(req->src == req->dst)) {
9298 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9299 - DMA_BIDIRECTIONAL);
9300 - if (unlikely(!sgc)) {
9301 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9302 + DMA_BIDIRECTIONAL);
9303 + if (unlikely(!mapped_src_nents)) {
9304 dev_err(jrdev, "unable to map source\n");
9306 return ERR_PTR(-ENOMEM);
9309 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9311 - if (unlikely(!sgc)) {
9312 - dev_err(jrdev, "unable to map source\n");
9314 - return ERR_PTR(-ENOMEM);
9315 + /* Cover also the case of null (zero length) input data */
9317 + mapped_src_nents = dma_map_sg(jrdev, req->src,
9318 + src_nents, DMA_TO_DEVICE);
9319 + if (unlikely(!mapped_src_nents)) {
9320 + dev_err(jrdev, "unable to map source\n");
9321 + return ERR_PTR(-ENOMEM);
9324 + mapped_src_nents = 0;
9327 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9329 - if (unlikely(!sgc)) {
9330 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9332 + if (unlikely(!mapped_dst_nents)) {
9333 dev_err(jrdev, "unable to map destination\n");
9334 - dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9337 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9338 return ERR_PTR(-ENOMEM);
9342 + sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9343 + sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9344 + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9346 + /* allocate space for base edesc and hw desc commands, link tables */
9347 + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9350 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9352 + return ERR_PTR(-ENOMEM);
9355 edesc->src_nents = src_nents;
9356 edesc->dst_nents = dst_nents;
9357 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9359 - *all_contig_ptr = all_contig;
9360 + *all_contig_ptr = !(mapped_src_nents > 1);
9363 - if (!all_contig) {
9364 - sg_to_sec4_sg_last(req->src, src_nents,
9365 - edesc->sec4_sg + sec4_sg_index, 0);
9366 - sec4_sg_index += src_nents;
9367 + if (mapped_src_nents > 1) {
9368 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9369 + edesc->sec4_sg + sec4_sg_index, 0);
9370 + sec4_sg_index += mapped_src_nents;
9373 - sg_to_sec4_sg_last(req->dst, dst_nents,
9374 + if (mapped_dst_nents > 1) {
9375 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9376 edesc->sec4_sg + sec4_sg_index, 0);
9379 @@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_requ
9384 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9385 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9386 - dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9387 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9388 - req->assoclen + req->cryptlen, 1, may_sleep);
9390 + caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9391 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9392 + req->assoclen + req->cryptlen, 1);
9394 /* allocate extended descriptor */
9395 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9396 @@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkciph
9397 struct device *jrdev = ctx->jrdev;
9398 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9399 GFP_KERNEL : GFP_ATOMIC;
9400 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9401 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9402 struct ablkcipher_edesc *edesc;
9403 dma_addr_t iv_dma = 0;
9404 - bool iv_contig = false;
9407 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9408 - int sec4_sg_index;
9409 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9411 - src_nents = sg_count(req->src, req->nbytes);
9412 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9413 + if (unlikely(src_nents < 0)) {
9414 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9416 + return ERR_PTR(src_nents);
9419 - if (req->dst != req->src)
9420 - dst_nents = sg_count(req->dst, req->nbytes);
9421 + if (req->dst != req->src) {
9422 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9423 + if (unlikely(dst_nents < 0)) {
9424 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9426 + return ERR_PTR(dst_nents);
9430 if (likely(req->src == req->dst)) {
9431 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9432 - DMA_BIDIRECTIONAL);
9433 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9434 + DMA_BIDIRECTIONAL);
9435 + if (unlikely(!mapped_src_nents)) {
9436 + dev_err(jrdev, "unable to map source\n");
9437 + return ERR_PTR(-ENOMEM);
9440 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9442 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9444 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9446 + if (unlikely(!mapped_src_nents)) {
9447 + dev_err(jrdev, "unable to map source\n");
9448 + return ERR_PTR(-ENOMEM);
9451 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9453 + if (unlikely(!mapped_dst_nents)) {
9454 + dev_err(jrdev, "unable to map destination\n");
9455 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9456 + return ERR_PTR(-ENOMEM);
9460 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9461 if (dma_mapping_error(jrdev, iv_dma)) {
9462 dev_err(jrdev, "unable to map IV\n");
9463 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9465 return ERR_PTR(-ENOMEM);
9469 - * Check if iv can be contiguous with source and destination.
9470 - * If so, include it. If not, create scatterlist.
9472 - if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9475 - src_nents = src_nents ? : 1;
9476 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9477 - sizeof(struct sec4_sg_entry);
9478 + if (mapped_src_nents == 1 &&
9479 + iv_dma + ivsize == sg_dma_address(req->src)) {
9483 + in_contig = false;
9484 + sec4_sg_ents = 1 + mapped_src_nents;
9486 + dst_sg_idx = sec4_sg_ents;
9487 + sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9488 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9490 /* allocate space for base edesc and hw desc commands, link tables */
9491 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9494 dev_err(jrdev, "could not allocate extended descriptor\n");
9495 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9496 + iv_dma, ivsize, 0, 0);
9497 return ERR_PTR(-ENOMEM);
9500 @@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkciph
9501 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9504 - sec4_sg_index = 0;
9507 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9508 - sg_to_sec4_sg_last(req->src, src_nents,
9509 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9510 edesc->sec4_sg + 1, 0);
9511 - sec4_sg_index += 1 + src_nents;
9515 - sg_to_sec4_sg_last(req->dst, dst_nents,
9516 - edesc->sec4_sg + sec4_sg_index, 0);
9517 + if (mapped_dst_nents > 1) {
9518 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9519 + edesc->sec4_sg + dst_sg_idx, 0);
9522 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9523 sec4_sg_bytes, DMA_TO_DEVICE);
9524 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9525 dev_err(jrdev, "unable to map S/G table\n");
9526 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9527 + iv_dma, ivsize, 0, 0);
9529 return ERR_PTR(-ENOMEM);
9532 @@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkciph
9536 - *iv_contig_out = iv_contig;
9537 + *iv_contig_out = in_contig;
9541 @@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkciph
9542 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9543 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9544 struct device *jrdev = ctx->jrdev;
9545 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9546 - CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9547 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9548 GFP_KERNEL : GFP_ATOMIC;
9549 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9550 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9551 struct ablkcipher_edesc *edesc;
9552 dma_addr_t iv_dma = 0;
9553 - bool iv_contig = false;
9556 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9557 - int sec4_sg_index;
9559 - src_nents = sg_count(req->src, req->nbytes);
9560 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9562 - if (unlikely(req->dst != req->src))
9563 - dst_nents = sg_count(req->dst, req->nbytes);
9564 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9565 + if (unlikely(src_nents < 0)) {
9566 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9568 + return ERR_PTR(src_nents);
9571 if (likely(req->src == req->dst)) {
9572 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9573 - DMA_BIDIRECTIONAL);
9574 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9575 + DMA_BIDIRECTIONAL);
9576 + if (unlikely(!mapped_src_nents)) {
9577 + dev_err(jrdev, "unable to map source\n");
9578 + return ERR_PTR(-ENOMEM);
9581 + dst_nents = src_nents;
9582 + mapped_dst_nents = src_nents;
9584 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9586 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9588 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9590 + if (unlikely(!mapped_src_nents)) {
9591 + dev_err(jrdev, "unable to map source\n");
9592 + return ERR_PTR(-ENOMEM);
9595 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9596 + if (unlikely(dst_nents < 0)) {
9597 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9599 + return ERR_PTR(dst_nents);
9602 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9604 + if (unlikely(!mapped_dst_nents)) {
9605 + dev_err(jrdev, "unable to map destination\n");
9606 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9607 + return ERR_PTR(-ENOMEM);
9612 @@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkciph
9613 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9614 if (dma_mapping_error(jrdev, iv_dma)) {
9615 dev_err(jrdev, "unable to map IV\n");
9616 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9618 return ERR_PTR(-ENOMEM);
9621 - if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9624 - dst_nents = dst_nents ? : 1;
9625 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9626 - sizeof(struct sec4_sg_entry);
9627 + sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9628 + dst_sg_idx = sec4_sg_ents;
9629 + if (mapped_dst_nents == 1 &&
9630 + iv_dma + ivsize == sg_dma_address(req->dst)) {
9631 + out_contig = true;
9633 + out_contig = false;
9634 + sec4_sg_ents += 1 + mapped_dst_nents;
9637 /* allocate space for base edesc and hw desc commands, link tables */
9638 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9639 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9642 dev_err(jrdev, "could not allocate extended descriptor\n");
9643 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9644 + iv_dma, ivsize, 0, 0);
9645 return ERR_PTR(-ENOMEM);
9648 @@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkciph
9649 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9652 - sec4_sg_index = 0;
9654 - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9655 - sec4_sg_index += src_nents;
9657 + if (mapped_src_nents > 1)
9658 + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9662 - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9663 + if (!out_contig) {
9664 + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9666 - sec4_sg_index += 1;
9667 - sg_to_sec4_sg_last(req->dst, dst_nents,
9668 - edesc->sec4_sg + sec4_sg_index, 0);
9669 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9670 + edesc->sec4_sg + dst_sg_idx + 1, 0);
9673 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9674 sec4_sg_bytes, DMA_TO_DEVICE);
9675 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9676 dev_err(jrdev, "unable to map S/G table\n");
9677 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9678 + iv_dma, ivsize, 0, 0);
9680 return ERR_PTR(-ENOMEM);
9682 edesc->iv_dma = iv_dma;
9683 @@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph
9687 - *iv_contig_out = iv_contig;
9688 + *iv_contig_out = out_contig;
9692 @@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct
9693 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9694 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9695 struct device *jrdev = ctx->jrdev;
9697 + bool iv_contig = false;
9701 @@ -2933,7 +1840,6 @@ struct caam_alg_template {
9703 u32 class1_alg_type;
9704 u32 class2_alg_type;
9708 static struct caam_alg_template driver_algs[] = {
9709 @@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads
9711 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9712 OP_ALG_AAI_HMAC_PRECOMP,
9713 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9717 @@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads
9719 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9720 OP_ALG_AAI_HMAC_PRECOMP,
9721 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9725 @@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads
9727 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9728 OP_ALG_AAI_HMAC_PRECOMP,
9729 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9733 @@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads
9735 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9736 OP_ALG_AAI_HMAC_PRECOMP,
9737 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9741 @@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads
9743 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9744 OP_ALG_AAI_HMAC_PRECOMP,
9745 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9749 @@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads
9751 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9752 OP_ALG_AAI_HMAC_PRECOMP,
9753 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9757 @@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads
9758 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9759 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9760 OP_ALG_AAI_HMAC_PRECOMP,
9761 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9765 @@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads
9766 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9767 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9768 OP_ALG_AAI_HMAC_PRECOMP,
9769 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9773 @@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads
9774 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9775 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9776 OP_ALG_AAI_HMAC_PRECOMP,
9777 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9781 @@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads
9782 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9783 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9784 OP_ALG_AAI_HMAC_PRECOMP,
9785 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9789 @@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads
9790 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9791 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9792 OP_ALG_AAI_HMAC_PRECOMP,
9793 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9797 @@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads
9798 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9799 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9800 OP_ALG_AAI_HMAC_PRECOMP,
9801 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9805 @@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads
9806 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9807 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9808 OP_ALG_AAI_HMAC_PRECOMP,
9809 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9813 @@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads
9814 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9815 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9816 OP_ALG_AAI_HMAC_PRECOMP,
9817 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9821 @@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads
9822 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9823 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9824 OP_ALG_AAI_HMAC_PRECOMP,
9825 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9829 @@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads
9830 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9831 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9832 OP_ALG_AAI_HMAC_PRECOMP,
9833 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9837 @@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads
9838 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9839 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9840 OP_ALG_AAI_HMAC_PRECOMP,
9841 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9845 @@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads
9846 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9847 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9848 OP_ALG_AAI_HMAC_PRECOMP,
9849 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9853 @@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads
9854 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9855 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9856 OP_ALG_AAI_HMAC_PRECOMP,
9857 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9861 @@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads
9862 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9863 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9864 OP_ALG_AAI_HMAC_PRECOMP,
9865 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9869 @@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads
9870 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9871 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9872 OP_ALG_AAI_HMAC_PRECOMP,
9873 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9877 @@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads
9878 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9879 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9880 OP_ALG_AAI_HMAC_PRECOMP,
9881 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9885 @@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads
9886 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9887 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9888 OP_ALG_AAI_HMAC_PRECOMP,
9889 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9893 @@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads
9894 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9895 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9896 OP_ALG_AAI_HMAC_PRECOMP,
9897 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9901 @@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads
9902 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9903 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9904 OP_ALG_AAI_HMAC_PRECOMP,
9905 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9909 @@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads
9910 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9911 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9912 OP_ALG_AAI_HMAC_PRECOMP,
9913 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9917 @@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads
9918 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9919 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9920 OP_ALG_AAI_HMAC_PRECOMP,
9921 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9925 @@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads
9926 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9927 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9928 OP_ALG_AAI_HMAC_PRECOMP,
9929 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9933 @@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads
9934 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9935 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9936 OP_ALG_AAI_HMAC_PRECOMP,
9937 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9941 @@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads
9942 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9943 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9944 OP_ALG_AAI_HMAC_PRECOMP,
9945 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9949 @@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads
9950 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9951 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9952 OP_ALG_AAI_HMAC_PRECOMP,
9953 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9957 @@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads
9958 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9959 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9960 OP_ALG_AAI_HMAC_PRECOMP,
9961 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9965 @@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads
9966 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9967 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9968 OP_ALG_AAI_HMAC_PRECOMP,
9969 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9973 @@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads
9974 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9975 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9976 OP_ALG_AAI_HMAC_PRECOMP,
9977 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9981 @@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads
9982 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9983 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9984 OP_ALG_AAI_HMAC_PRECOMP,
9985 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9989 @@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads
9990 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9991 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9992 OP_ALG_AAI_HMAC_PRECOMP,
9993 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9997 @@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads
9998 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
9999 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10000 OP_ALG_AAI_HMAC_PRECOMP,
10001 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10005 @@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads
10006 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10007 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10008 OP_ALG_AAI_HMAC_PRECOMP,
10009 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10013 @@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads
10014 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10015 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10016 OP_ALG_AAI_HMAC_PRECOMP,
10017 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10021 @@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads
10022 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10023 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10024 OP_ALG_AAI_HMAC_PRECOMP,
10025 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10029 @@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads
10030 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10031 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10032 OP_ALG_AAI_HMAC_PRECOMP,
10033 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10037 @@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads
10038 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10039 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10040 OP_ALG_AAI_HMAC_PRECOMP,
10041 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10045 @@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads
10046 OP_ALG_AAI_CTR_MOD128,
10047 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10048 OP_ALG_AAI_HMAC_PRECOMP,
10049 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10053 @@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads
10054 OP_ALG_AAI_CTR_MOD128,
10055 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10056 OP_ALG_AAI_HMAC_PRECOMP,
10057 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10061 @@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads
10062 OP_ALG_AAI_CTR_MOD128,
10063 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10064 OP_ALG_AAI_HMAC_PRECOMP,
10065 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10069 @@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads
10070 OP_ALG_AAI_CTR_MOD128,
10071 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10072 OP_ALG_AAI_HMAC_PRECOMP,
10073 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10077 @@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads
10078 OP_ALG_AAI_CTR_MOD128,
10079 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10080 OP_ALG_AAI_HMAC_PRECOMP,
10081 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10085 @@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads
10086 OP_ALG_AAI_CTR_MOD128,
10087 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10088 OP_ALG_AAI_HMAC_PRECOMP,
10089 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10093 @@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads
10094 OP_ALG_AAI_CTR_MOD128,
10095 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10096 OP_ALG_AAI_HMAC_PRECOMP,
10097 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10101 @@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads
10102 OP_ALG_AAI_CTR_MOD128,
10103 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10104 OP_ALG_AAI_HMAC_PRECOMP,
10105 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10109 @@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads
10110 OP_ALG_AAI_CTR_MOD128,
10111 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10112 OP_ALG_AAI_HMAC_PRECOMP,
10113 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10117 @@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads
10118 OP_ALG_AAI_CTR_MOD128,
10119 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10120 OP_ALG_AAI_HMAC_PRECOMP,
10121 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10125 @@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads
10126 OP_ALG_AAI_CTR_MOD128,
10127 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10128 OP_ALG_AAI_HMAC_PRECOMP,
10129 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10133 @@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads
10134 OP_ALG_AAI_CTR_MOD128,
10135 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10136 OP_ALG_AAI_HMAC_PRECOMP,
10137 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10141 @@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
10143 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10145 + dma_addr_t dma_addr;
10147 ctx->jrdev = caam_jr_alloc();
10148 if (IS_ERR(ctx->jrdev)) {
10149 pr_err("Job Ring Device allocation for transform failed\n");
10150 return PTR_ERR(ctx->jrdev);
10153 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10154 + offsetof(struct caam_ctx,
10155 + sh_desc_enc_dma),
10156 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10157 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10158 + dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10159 + caam_jr_free(ctx->jrdev);
10163 + ctx->sh_desc_enc_dma = dma_addr;
10164 + ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10166 + ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10168 + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10170 /* copy descriptor header template value */
10171 - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10172 - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10173 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10174 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10175 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10179 @@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_
10181 static void caam_exit_common(struct caam_ctx *ctx)
10183 - if (ctx->sh_desc_enc_dma &&
10184 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10185 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10186 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10187 - if (ctx->sh_desc_dec_dma &&
10188 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10189 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10190 - desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10191 - if (ctx->sh_desc_givenc_dma &&
10192 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10193 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10194 - desc_bytes(ctx->sh_desc_givenc),
10196 - if (ctx->key_dma &&
10197 - !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10198 - dma_unmap_single(ctx->jrdev, ctx->key_dma,
10199 - ctx->enckeylen + ctx->split_key_pad_len,
10202 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10203 + offsetof(struct caam_ctx, sh_desc_enc_dma),
10204 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10205 caam_jr_free(ctx->jrdev);
10208 @@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_
10210 t_alg->caam.class1_alg_type = template->class1_alg_type;
10211 t_alg->caam.class2_alg_type = template->class2_alg_type;
10212 - t_alg->caam.alg_op = template->alg_op;
10217 +++ b/drivers/crypto/caam/caamalg_desc.c
10220 + * Shared descriptors for aead, ablkcipher algorithms
10222 + * Copyright 2016 NXP
10225 +#include "compat.h"
10226 +#include "desc_constr.h"
10227 +#include "caamalg_desc.h"
10230 + * For aead functions, read payload and write payload,
10231 + * both of which are specified in req->src and req->dst
10233 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10235 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10236 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10237 + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10240 +/* Set DK bit in class 1 operation if shared */
10241 +static inline void append_dec_op1(u32 *desc, u32 type)
10243 + u32 *jump_cmd, *uncond_jump_cmd;
10245 + /* DK bit is valid only for AES */
10246 + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10247 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10252 + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10253 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10255 + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10256 + set_jump_tgt_here(desc, jump_cmd);
10257 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10258 + OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10259 + set_jump_tgt_here(desc, uncond_jump_cmd);
10263 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10264 + * (non-protocol) with no (null) encryption.
10265 + * @desc: pointer to buffer used for descriptor construction
10266 + * @adata: pointer to authentication transform definitions. Note that since a
10267 + * split key is to be used, the size of the split key itself is
10268 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10269 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10270 + * @icvsize: integrity check value (ICV) size (truncated or full)
10272 + * Note: Requires an MDHA split key.
10274 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10275 + unsigned int icvsize)
10277 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10279 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10281 + /* Skip if already shared */
10282 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10284 + if (adata->key_inline)
10285 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10286 + adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10289 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10290 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10291 + set_jump_tgt_here(desc, key_jump_cmd);
10293 + /* assoclen + cryptlen = seqinlen */
10294 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10296 + /* Prepare to read and write cryptlen + assoclen bytes */
10297 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10298 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10301 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10302 + * thus need to do some magic, i.e. self-patch the descriptor
10305 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10306 + MOVE_DEST_MATH3 |
10307 + (0x6 << MOVE_LEN_SHIFT));
10308 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10309 + MOVE_DEST_DESCBUF |
10311 + (0x8 << MOVE_LEN_SHIFT));
10313 + /* Class 2 operation */
10314 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10317 + /* Read and write cryptlen bytes */
10318 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10320 + set_move_tgt_here(desc, read_move_cmd);
10321 + set_move_tgt_here(desc, write_move_cmd);
10322 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10323 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10327 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10328 + LDST_SRCDST_BYTE_CONTEXT);
10331 + print_hex_dump(KERN_ERR,
10332 + "aead null enc shdesc@" __stringify(__LINE__)": ",
10333 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10336 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10339 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10340 + * (non-protocol) with no (null) decryption.
10341 + * @desc: pointer to buffer used for descriptor construction
10342 + * @adata: pointer to authentication transform definitions. Note that since a
10343 + * split key is to be used, the size of the split key itself is
10344 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10345 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10346 + * @icvsize: integrity check value (ICV) size (truncated or full)
10348 + * Note: Requires an MDHA split key.
10350 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10351 + unsigned int icvsize)
10353 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10355 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10357 + /* Skip if already shared */
10358 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10360 + if (adata->key_inline)
10361 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10362 + adata->keylen, CLASS_2 |
10363 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10365 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10366 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10367 + set_jump_tgt_here(desc, key_jump_cmd);
10369 + /* Class 2 operation */
10370 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10371 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10373 + /* assoclen + cryptlen = seqoutlen */
10374 + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10376 + /* Prepare to read and write cryptlen + assoclen bytes */
10377 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10378 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10381 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10382 + * thus need to do some magic, i.e. self-patch the descriptor
10385 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10386 + MOVE_DEST_MATH2 |
10387 + (0x6 << MOVE_LEN_SHIFT));
10388 + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10389 + MOVE_DEST_DESCBUF |
10391 + (0x8 << MOVE_LEN_SHIFT));
10393 + /* Read and write cryptlen bytes */
10394 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10397 + * Insert a NOP here, since we need at least 4 instructions between
10398 + * code patching the descriptor buffer and the location being patched.
10400 + jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10401 + set_jump_tgt_here(desc, jump_cmd);
10403 + set_move_tgt_here(desc, read_move_cmd);
10404 + set_move_tgt_here(desc, write_move_cmd);
10405 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10406 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10408 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10411 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10412 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10415 + print_hex_dump(KERN_ERR,
10416 + "aead null dec shdesc@" __stringify(__LINE__)": ",
10417 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10420 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10422 +static void init_sh_desc_key_aead(u32 * const desc,
10423 + struct alginfo * const cdata,
10424 + struct alginfo * const adata,
10425 + const bool is_rfc3686, u32 *nonce)
10427 + u32 *key_jump_cmd;
10428 + unsigned int enckeylen = cdata->keylen;
10430 + /* Note: Context registers are saved. */
10431 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10433 + /* Skip if already shared */
10434 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10438 + * RFC3686 specific:
10439 + * | key = {AUTH_KEY, ENC_KEY, NONCE}
10440 + * | enckeylen = encryption key size + nonce size
10443 + enckeylen -= CTR_RFC3686_NONCE_SIZE;
10445 + if (adata->key_inline)
10446 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10447 + adata->keylen, CLASS_2 |
10448 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10450 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10451 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10453 + if (cdata->key_inline)
10454 + append_key_as_imm(desc, cdata->key_virt, enckeylen,
10455 + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10457 + append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10458 + KEY_DEST_CLASS_REG);
10460 + /* Load Counter into CONTEXT1 reg */
10461 + if (is_rfc3686) {
10462 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10463 + LDST_CLASS_IND_CCB |
10464 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10465 + append_move(desc,
10466 + MOVE_SRC_OUTFIFO |
10467 + MOVE_DEST_CLASS1CTX |
10468 + (16 << MOVE_OFFSET_SHIFT) |
10469 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10472 + set_jump_tgt_here(desc, key_jump_cmd);
10476 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10477 + * (non-protocol).
10478 + * @desc: pointer to buffer used for descriptor construction
10479 + * @cdata: pointer to block cipher transform definitions
10480 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10481 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10482 + * @adata: pointer to authentication transform definitions. Note that since a
10483 + * split key is to be used, the size of the split key itself is
10484 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10485 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10486 + * @ivsize: initialization vector size
10487 + * @icvsize: integrity check value (ICV) size (truncated or full)
10488 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10489 + * @nonce: pointer to rfc3686 nonce
10490 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10491 + * @is_qi: true when called from caam/qi
10493 + * Note: Requires an MDHA split key.
10495 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10496 + struct alginfo *adata, unsigned int ivsize,
10497 + unsigned int icvsize, const bool is_rfc3686,
10498 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
10500 + /* Note: Context registers are saved. */
10501 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10503 + /* Class 2 operation */
10504 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10508 + u32 *wait_load_cmd;
10510 + /* REG3 = assoclen */
10511 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10512 + LDST_SRCDST_WORD_DECO_MATH3 |
10513 + (4 << LDST_OFFSET_SHIFT));
10515 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10516 + JUMP_COND_CALM | JUMP_COND_NCP |
10517 + JUMP_COND_NOP | JUMP_COND_NIP |
10519 + set_jump_tgt_here(desc, wait_load_cmd);
10521 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10522 + LDST_SRCDST_BYTE_CONTEXT |
10523 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10526 + /* Read and write assoclen bytes */
10527 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10528 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10530 + /* Skip assoc data */
10531 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10533 + /* read assoc before reading payload */
10534 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10537 + /* Load Counter into CONTEXT1 reg */
10539 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10540 + LDST_SRCDST_BYTE_CONTEXT |
10541 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10542 + LDST_OFFSET_SHIFT));
10544 + /* Class 1 operation */
10545 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10548 + /* Read and write cryptlen bytes */
10549 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10550 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10551 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10554 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10555 + LDST_SRCDST_BYTE_CONTEXT);
10558 + print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10559 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10562 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10565 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10566 + * (non-protocol).
10567 + * @desc: pointer to buffer used for descriptor construction
10568 + * @cdata: pointer to block cipher transform definitions
10569 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10570 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10571 + * @adata: pointer to authentication transform definitions. Note that since a
10572 + * split key is to be used, the size of the split key itself is
10573 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10574 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10575 + * @ivsize: initialization vector size
10576 + * @icvsize: integrity check value (ICV) size (truncated or full)
10577 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10578 + * @nonce: pointer to rfc3686 nonce
10579 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10580 + * @is_qi: true when called from caam/qi
10582 + * Note: Requires an MDHA split key.
10584 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10585 + struct alginfo *adata, unsigned int ivsize,
10586 + unsigned int icvsize, const bool geniv,
10587 + const bool is_rfc3686, u32 *nonce,
10588 + const u32 ctx1_iv_off, const bool is_qi)
10590 + /* Note: Context registers are saved. */
10591 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10593 + /* Class 2 operation */
10594 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10595 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10598 + u32 *wait_load_cmd;
10600 + /* REG3 = assoclen */
10601 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10602 + LDST_SRCDST_WORD_DECO_MATH3 |
10603 + (4 << LDST_OFFSET_SHIFT));
10605 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10606 + JUMP_COND_CALM | JUMP_COND_NCP |
10607 + JUMP_COND_NOP | JUMP_COND_NIP |
10609 + set_jump_tgt_here(desc, wait_load_cmd);
10612 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10613 + LDST_SRCDST_BYTE_CONTEXT |
10614 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10617 + /* Read and write assoclen bytes */
10618 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10620 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
10622 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10624 + /* Skip assoc data */
10625 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10627 + /* read assoc before reading payload */
10628 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10632 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10633 + LDST_SRCDST_BYTE_CONTEXT |
10634 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10635 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10636 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10639 + /* Load Counter into CONTEXT1 reg */
10641 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10642 + LDST_SRCDST_BYTE_CONTEXT |
10643 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10644 + LDST_OFFSET_SHIFT));
10646 + /* Choose operation */
10648 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10651 + append_dec_op1(desc, cdata->algtype);
10653 + /* Read and write cryptlen bytes */
10654 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10655 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10656 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10659 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10660 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10663 + print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10664 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10667 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10670 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10671 + * (non-protocol) with HW-generated initialization
10673 + * @desc: pointer to buffer used for descriptor construction
10674 + * @cdata: pointer to block cipher transform definitions
10675 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10676 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10677 + * @adata: pointer to authentication transform definitions. Note that since a
10678 + * split key is to be used, the size of the split key itself is
10679 + * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
10680 + * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10681 + * @ivsize: initialization vector size
10682 + * @icvsize: integrity check value (ICV) size (truncated or full)
10683 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10684 + * @nonce: pointer to rfc3686 nonce
10685 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10686 + * @is_qi: true when called from caam/qi
10688 + * Note: Requires an MDHA split key.
10690 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10691 + struct alginfo *adata, unsigned int ivsize,
10692 + unsigned int icvsize, const bool is_rfc3686,
10693 + u32 *nonce, const u32 ctx1_iv_off,
10694 + const bool is_qi)
10696 + u32 geniv, moveiv;
10698 + /* Note: Context registers are saved. */
10699 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
10702 + u32 *wait_load_cmd;
10704 + /* REG3 = assoclen */
10705 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10706 + LDST_SRCDST_WORD_DECO_MATH3 |
10707 + (4 << LDST_OFFSET_SHIFT));
10709 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10710 + JUMP_COND_CALM | JUMP_COND_NCP |
10711 + JUMP_COND_NOP | JUMP_COND_NIP |
10713 + set_jump_tgt_here(desc, wait_load_cmd);
10716 + if (is_rfc3686) {
10718 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10719 + LDST_SRCDST_BYTE_CONTEXT |
10720 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10725 + /* Generate IV */
10726 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10727 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10728 + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10729 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10730 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10731 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10732 + append_move(desc, MOVE_WAITCOMP |
10733 + MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10734 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10735 + (ivsize << MOVE_LEN_SHIFT));
10736 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10739 + /* Copy IV to class 1 context */
10740 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10741 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10742 + (ivsize << MOVE_LEN_SHIFT));
10744 + /* Return to encryption */
10745 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10748 + /* Read and write assoclen bytes */
10749 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10750 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10752 + /* Skip assoc data */
10753 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10755 + /* read assoc before reading payload */
10756 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10759 + /* Copy iv from outfifo to class 2 fifo */
10760 + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10761 + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10762 + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10763 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10764 + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10765 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10767 + /* Load Counter into CONTEXT1 reg */
10769 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10770 + LDST_SRCDST_BYTE_CONTEXT |
10771 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10772 + LDST_OFFSET_SHIFT));
10774 + /* Class 1 operation */
10775 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10778 + /* Will write ivsize + cryptlen */
10779 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10781 + /* Not need to reload iv */
10782 + append_seq_fifo_load(desc, ivsize,
10783 + FIFOLD_CLASS_SKIP);
10785 + /* Will read cryptlen */
10786 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10787 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10788 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10789 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10792 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10793 + LDST_SRCDST_BYTE_CONTEXT);
10796 + print_hex_dump(KERN_ERR,
10797 + "aead givenc shdesc@" __stringify(__LINE__)": ",
10798 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10801 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10804 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10805 + * @desc: pointer to buffer used for descriptor construction
10806 + * @cdata: pointer to block cipher transform definitions
10807 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10808 + * with OP_ALG_AAI_CBC
10809 + * @adata: pointer to authentication transform definitions. Note that since a
10810 + * split key is to be used, the size of the split key itself is
10811 + * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
10812 + * OP_ALG_AAI_HMAC_PRECOMP.
10813 + * @assoclen: associated data length
10814 + * @ivsize: initialization vector size
10815 + * @authsize: authentication data size
10816 + * @blocksize: block cipher size
10818 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10819 + struct alginfo *adata, unsigned int assoclen,
10820 + unsigned int ivsize, unsigned int authsize,
10821 + unsigned int blocksize)
10823 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
10824 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10827 + * Compute the index (in bytes) for the LOAD with destination of
10828 + * Class 1 Data Size Register and for the LOAD that generates padding
10830 + if (adata->key_inline) {
10831 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10832 + cdata->keylen - 4 * CAAM_CMD_SZ;
10833 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10834 + cdata->keylen - 2 * CAAM_CMD_SZ;
10836 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10838 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10842 + stidx = 1 << HDR_START_IDX_SHIFT;
10843 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10845 + /* skip key loading if they are loaded due to sharing */
10846 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10849 + if (adata->key_inline)
10850 + append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
10851 + adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
10854 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10855 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10857 + if (cdata->key_inline)
10858 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
10859 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
10861 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
10862 + KEY_DEST_CLASS_REG);
10864 + set_jump_tgt_here(desc, key_jump_cmd);
10866 + /* class 2 operation */
10867 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10869 + /* class 1 operation */
10870 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10873 + /* payloadlen = input data length - (assoclen + ivlen) */
10874 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
10876 + /* math1 = payloadlen + icvlen */
10877 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
10879 + /* padlen = block_size - math1 % block_size */
10880 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
10881 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
10883 + /* cryptlen = payloadlen + icvlen + padlen */
10884 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
10887 + * update immediate data with the padding length value
10888 + * for the LOAD in the class 1 data size register.
10890 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10891 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
10892 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10893 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
10895 + /* overwrite PL field for the padding iNFO FIFO entry */
10896 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
10897 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
10898 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
10899 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
10901 + /* store encrypted payload, icv and padding */
10902 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
10904 + /* if payload length is zero, jump to zero-payload commands */
10905 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
10906 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
10907 + JUMP_COND_MATH_Z);
10909 + /* load iv in context1 */
10910 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10911 + LDST_CLASS_1_CCB | ivsize);
10913 + /* read assoc for authentication */
10914 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10915 + FIFOLD_TYPE_MSG);
10916 + /* insnoop payload */
10917 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
10918 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
10920 + /* jump the zero-payload commands */
10921 + append_jump(desc, JUMP_TEST_ALL | 3);
10923 + /* zero-payload commands */
10924 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
10926 + /* load iv in context1 */
10927 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
10928 + LDST_CLASS_1_CCB | ivsize);
10930 + /* assoc data is the only data for authentication */
10931 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
10932 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
10934 + /* send icv to encryption */
10935 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
10938 + /* update class 1 data size register with padding length */
10939 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
10940 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10942 + /* generate padding and send it to encryption */
10943 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
10944 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
10945 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
10946 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10949 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
10950 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
10951 + desc_bytes(desc), 1);
10954 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
10957 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
10958 + * @desc: pointer to buffer used for descriptor construction
10959 + * @cdata: pointer to block cipher transform definitions
10960 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10961 + * with OP_ALG_AAI_CBC
10962 + * @adata: pointer to authentication transform definitions. Note that since a
10963 + * split key is to be used, the size of the split key itself is
10964 + * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
10965 + * OP_ALG_AAI_HMAC_PRECOMP.
10966 + * @assoclen: associated data length
10967 + * @ivsize: initialization vector size
10968 + * @authsize: authentication data size
10969 + * @blocksize: block cipher size
10971 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
10972 + struct alginfo *adata, unsigned int assoclen,
10973 + unsigned int ivsize, unsigned int authsize,
10974 + unsigned int blocksize)
10976 + u32 stidx, jumpback;
10977 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
10979 + * Pointer Size bool determines the size of address pointers.
10980 + * false - Pointers fit in one 32-bit word.
10981 + * true - Pointers fit in two 32-bit words.
10983 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
10985 + stidx = 1 << HDR_START_IDX_SHIFT;
10986 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10988 + /* skip key loading if they are loaded due to sharing */
10989 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10992 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
10993 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
10995 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
10996 + KEY_DEST_CLASS_REG);
10998 + set_jump_tgt_here(desc, key_jump_cmd);
11000 + /* class 2 operation */
11001 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11002 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11003 + /* class 1 operation */
11004 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11007 + /* VSIL = input data length - 2 * block_size */
11008 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11012 + * payloadlen + icvlen + padlen = input data length - (assoclen +
11015 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11017 + /* skip data to the last but one cipher block */
11018 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11020 + /* load iv for the last cipher block */
11021 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11022 + LDST_CLASS_1_CCB | ivsize);
11024 + /* read last cipher block */
11025 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11026 + FIFOLD_TYPE_LAST1 | blocksize);
11028 + /* move decrypted block into math0 and math1 */
11029 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11032 + /* reset AES CHA */
11033 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11034 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11036 + /* rewind input sequence */
11037 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11039 + /* key1 is in decryption form */
11040 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11041 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11043 + /* load iv in context1 */
11044 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11045 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11047 + /* read sequence number */
11048 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11049 + /* load Type, Version and Len fields in math0 */
11050 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11051 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11053 + /* compute (padlen - 1) */
11054 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11056 + /* math2 = icvlen + (padlen - 1) + 1 */
11057 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11059 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11061 + /* VSOL = payloadlen + icvlen + padlen */
11062 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11064 +#ifdef __LITTLE_ENDIAN
11065 + append_moveb(desc, MOVE_WAITCOMP |
11066 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11068 + /* update Len field */
11069 + append_math_sub(desc, REG0, REG0, REG2, 8);
11071 + /* store decrypted payload, icv and padding */
11072 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11074 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11075 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11077 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11078 + JUMP_COND_MATH_Z);
11080 + /* send Type, Version and Len(pre ICV) fields to authentication */
11081 + append_move(desc, MOVE_WAITCOMP |
11082 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11083 + (3 << MOVE_OFFSET_SHIFT) | 5);
11085 + /* outsnooping payload */
11086 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11087 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11089 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11091 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11092 + /* send Type, Version and Len(pre ICV) fields to authentication */
11093 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11094 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11095 + (3 << MOVE_OFFSET_SHIFT) | 5);
11097 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
11098 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11100 + /* load icvlen and padlen */
11101 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11102 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11104 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11105 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11108 + * Start a new input sequence using the SEQ OUT PTR command options,
11109 + * pointer and length used when the current output sequence was defined.
11113 + * Move the lower 32 bits of Shared Descriptor address, the
11114 + * SEQ OUT PTR command, Output Pointer (2 words) and
11115 + * Output Length into math registers.
11117 +#ifdef __LITTLE_ENDIAN
11118 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11119 + MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
11122 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11123 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11126 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11127 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
11128 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11129 + /* Append a JUMP command after the copied fields */
11130 + jumpback = CMD_JUMP | (char)-9;
11131 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11132 + LDST_SRCDST_WORD_DECO_MATH2 |
11133 + (4 << LDST_OFFSET_SHIFT));
11134 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11135 + /* Move the updated fields back to the Job Descriptor */
11136 +#ifdef __LITTLE_ENDIAN
11137 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11138 + MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
11141 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11142 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11146 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11147 + * and then jump back to the next command from the
11148 + * Shared Descriptor.
11150 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11153 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11154 + * Output Length into math registers.
11156 +#ifdef __LITTLE_ENDIAN
11157 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11158 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11161 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11162 + MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
11165 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11166 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
11167 + ~(((u64)(CMD_SEQ_IN_PTR ^
11168 + CMD_SEQ_OUT_PTR)) << 32));
11169 + /* Append a JUMP command after the copied fields */
11170 + jumpback = CMD_JUMP | (char)-7;
11171 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11172 + LDST_SRCDST_WORD_DECO_MATH1 |
11173 + (4 << LDST_OFFSET_SHIFT));
11174 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11175 + /* Move the updated fields back to the Job Descriptor */
11176 +#ifdef __LITTLE_ENDIAN
11177 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11178 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11181 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11182 + MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
11186 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11187 + * and then jump back to the next command from the
11188 + * Shared Descriptor.
11190 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11193 + /* skip payload */
11194 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11196 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11197 + FIFOLD_TYPE_LAST2 | authsize);
11200 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11201 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11202 + desc_bytes(desc), 1);
11205 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11208 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11209 + * @desc: pointer to buffer used for descriptor construction
11210 + * @cdata: pointer to block cipher transform definitions
11211 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11212 + * @ivsize: initialization vector size
11213 + * @icvsize: integrity check value (ICV) size (truncated or full)
11214 + * @is_qi: true when called from caam/qi
11216 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11217 + unsigned int ivsize, unsigned int icvsize,
11218 + const bool is_qi)
11220 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11221 + *zero_assoc_jump_cmd2;
11223 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11225 + /* skip key loading if they are loaded due to sharing */
11226 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11228 + if (cdata->key_inline)
11229 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11230 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11232 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11233 + KEY_DEST_CLASS_REG);
11234 + set_jump_tgt_here(desc, key_jump_cmd);
11236 + /* class 1 operation */
11237 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11241 + u32 *wait_load_cmd;
11243 + /* REG3 = assoclen */
11244 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11245 + LDST_SRCDST_WORD_DECO_MATH3 |
11246 + (4 << LDST_OFFSET_SHIFT));
11248 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11249 + JUMP_COND_CALM | JUMP_COND_NCP |
11250 + JUMP_COND_NOP | JUMP_COND_NIP |
11252 + set_jump_tgt_here(desc, wait_load_cmd);
11254 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11257 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11261 + /* if assoclen + cryptlen is ZERO, skip to ICV write */
11262 + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11263 + JUMP_COND_MATH_Z);
11266 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11267 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11269 + /* if assoclen is ZERO, skip reading the assoc data */
11270 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11271 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11272 + JUMP_COND_MATH_Z);
11274 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11276 + /* skip assoc data */
11277 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11279 + /* cryptlen = seqinlen - assoclen */
11280 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11282 + /* if cryptlen is ZERO jump to zero-payload commands */
11283 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11284 + JUMP_COND_MATH_Z);
11286 + /* read assoc data */
11287 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11288 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11289 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11291 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11293 + /* write encrypted data */
11294 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11296 + /* read payload data */
11297 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11298 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11300 + /* jump to ICV writing */
11302 + append_jump(desc, JUMP_TEST_ALL | 4);
11304 + append_jump(desc, JUMP_TEST_ALL | 2);
11306 + /* zero-payload commands */
11307 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11309 + /* read assoc data */
11310 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11311 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11313 + /* jump to ICV writing */
11314 + append_jump(desc, JUMP_TEST_ALL | 2);
11316 + /* There is no input data */
11317 + set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11320 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11321 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11322 + FIFOLD_TYPE_LAST1);
11325 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11326 + LDST_SRCDST_BYTE_CONTEXT);
11329 + print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11330 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11333 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11336 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11337 + * @desc: pointer to buffer used for descriptor construction
11338 + * @cdata: pointer to block cipher transform definitions
11339 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11340 + * @ivsize: initialization vector size
11341 + * @icvsize: integrity check value (ICV) size (truncated or full)
11342 + * @is_qi: true when called from caam/qi
11344 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11345 + unsigned int ivsize, unsigned int icvsize,
11346 + const bool is_qi)
11348 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11350 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11352 + /* skip key loading if they are loaded due to sharing */
11353 + key_jump_cmd = append_jump(desc, JUMP_JSL |
11354 + JUMP_TEST_ALL | JUMP_COND_SHRD);
11355 + if (cdata->key_inline)
11356 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11357 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11359 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11360 + KEY_DEST_CLASS_REG);
11361 + set_jump_tgt_here(desc, key_jump_cmd);
11363 + /* class 1 operation */
11364 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11365 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11368 + u32 *wait_load_cmd;
11370 + /* REG3 = assoclen */
11371 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11372 + LDST_SRCDST_WORD_DECO_MATH3 |
11373 + (4 << LDST_OFFSET_SHIFT));
11375 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11376 + JUMP_COND_CALM | JUMP_COND_NCP |
11377 + JUMP_COND_NOP | JUMP_COND_NIP |
11379 + set_jump_tgt_here(desc, wait_load_cmd);
11381 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11382 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11385 + /* if assoclen is ZERO, skip reading the assoc data */
11386 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11387 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11388 + JUMP_COND_MATH_Z);
11390 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11392 + /* skip assoc data */
11393 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11395 + /* read assoc data */
11396 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11397 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11399 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11401 + /* cryptlen = seqoutlen - assoclen */
11402 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11404 + /* jump to zero-payload command if cryptlen is zero */
11405 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11406 + JUMP_COND_MATH_Z);
11408 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11410 + /* store encrypted data */
11411 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11413 + /* read payload data */
11414 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11415 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11417 + /* zero-payload command */
11418 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11421 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11422 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11425 + print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11426 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11429 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11432 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11433 + * (non-protocol).
11434 + * @desc: pointer to buffer used for descriptor construction
11435 + * @cdata: pointer to block cipher transform definitions
11436 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11437 + * @ivsize: initialization vector size
11438 + * @icvsize: integrity check value (ICV) size (truncated or full)
11439 + * @is_qi: true when called from caam/qi
11441 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11442 + unsigned int ivsize, unsigned int icvsize,
11443 + const bool is_qi)
11445 + u32 *key_jump_cmd;
11447 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11449 + /* Skip key loading if it is loaded due to sharing */
11450 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11452 + if (cdata->key_inline)
11453 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11454 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11456 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11457 + KEY_DEST_CLASS_REG);
11458 + set_jump_tgt_here(desc, key_jump_cmd);
11460 + /* Class 1 operation */
11461 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11465 + u32 *wait_load_cmd;
11467 + /* REG3 = assoclen */
11468 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11469 + LDST_SRCDST_WORD_DECO_MATH3 |
11470 + (4 << LDST_OFFSET_SHIFT));
11472 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11473 + JUMP_COND_CALM | JUMP_COND_NCP |
11474 + JUMP_COND_NOP | JUMP_COND_NIP |
11476 + set_jump_tgt_here(desc, wait_load_cmd);
11478 + /* Read salt and IV */
11479 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11480 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11482 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11483 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11486 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11487 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11489 + /* Read assoc data */
11490 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11491 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11494 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11496 + /* Will read cryptlen bytes */
11497 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11499 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11500 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11502 + /* Skip assoc data */
11503 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11505 + /* cryptlen = seqoutlen - assoclen */
11506 + append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11508 + /* Write encrypted data */
11509 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11511 + /* Read payload data */
11512 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11513 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11516 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11517 + LDST_SRCDST_BYTE_CONTEXT);
11520 + print_hex_dump(KERN_ERR,
11521 + "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11522 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11525 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11528 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11529 + * (non-protocol).
11530 + * @desc: pointer to buffer used for descriptor construction
11531 + * @cdata: pointer to block cipher transform definitions
11532 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11533 + * @ivsize: initialization vector size
11534 + * @icvsize: integrity check value (ICV) size (truncated or full)
11535 + * @is_qi: true when called from caam/qi
11537 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11538 + unsigned int ivsize, unsigned int icvsize,
11539 + const bool is_qi)
11541 + u32 *key_jump_cmd;
11543 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11545 + /* Skip key loading if it is loaded due to sharing */
11546 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11548 + if (cdata->key_inline)
11549 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11550 + cdata->keylen, CLASS_1 |
11551 + KEY_DEST_CLASS_REG);
11553 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11554 + KEY_DEST_CLASS_REG);
11555 + set_jump_tgt_here(desc, key_jump_cmd);
11557 + /* Class 1 operation */
11558 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11559 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11562 + u32 *wait_load_cmd;
11564 + /* REG3 = assoclen */
11565 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11566 + LDST_SRCDST_WORD_DECO_MATH3 |
11567 + (4 << LDST_OFFSET_SHIFT));
11569 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11570 + JUMP_COND_CALM | JUMP_COND_NCP |
11571 + JUMP_COND_NOP | JUMP_COND_NIP |
11573 + set_jump_tgt_here(desc, wait_load_cmd);
11575 + /* Read salt and IV */
11576 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11577 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11579 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11580 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11583 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11584 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11586 + /* Read assoc data */
11587 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11588 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11591 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11593 + /* Will read cryptlen bytes */
11594 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11596 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11597 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11599 + /* Skip assoc data */
11600 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11602 + /* Will write cryptlen bytes */
11603 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11605 + /* Store payload data */
11606 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11608 + /* Read encrypted data */
11609 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11610 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11613 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11614 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11617 + print_hex_dump(KERN_ERR,
11618 + "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11619 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11622 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11625 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11626 + * (non-protocol).
11627 + * @desc: pointer to buffer used for descriptor construction
11628 + * @cdata: pointer to block cipher transform definitions
11629 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11630 + * @ivsize: initialization vector size
11631 + * @icvsize: integrity check value (ICV) size (truncated or full)
11632 + * @is_qi: true when called from caam/qi
11634 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11635 + unsigned int ivsize, unsigned int icvsize,
11636 + const bool is_qi)
11638 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11640 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11642 + /* Skip key loading if it is loaded due to sharing */
11643 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11645 + if (cdata->key_inline)
11646 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11647 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11649 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11650 + KEY_DEST_CLASS_REG);
11651 + set_jump_tgt_here(desc, key_jump_cmd);
11653 + /* Class 1 operation */
11654 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11658 + /* assoclen is not needed, skip it */
11659 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11661 + /* Read salt and IV */
11662 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11663 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11665 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11666 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11669 + /* assoclen + cryptlen = seqinlen */
11670 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11673 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11674 + * thus need to do some magic, i.e. self-patch the descriptor
11677 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11678 + (0x6 << MOVE_LEN_SHIFT));
11679 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11680 + (0x8 << MOVE_LEN_SHIFT));
11682 + /* Will read assoclen + cryptlen bytes */
11683 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11685 + /* Will write assoclen + cryptlen bytes */
11686 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11688 + /* Read and write assoclen + cryptlen bytes */
11689 + aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11691 + set_move_tgt_here(desc, read_move_cmd);
11692 + set_move_tgt_here(desc, write_move_cmd);
11693 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11694 + /* Move payload data to OFIFO */
11695 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11698 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11699 + LDST_SRCDST_BYTE_CONTEXT);
11702 + print_hex_dump(KERN_ERR,
11703 + "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11704 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11707 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11710 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11711 + * (non-protocol).
11712 + * @desc: pointer to buffer used for descriptor construction
11713 + * @cdata: pointer to block cipher transform definitions
11714 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11715 + * @ivsize: initialization vector size
11716 + * @icvsize: integrity check value (ICV) size (truncated or full)
11717 + * @is_qi: true when called from caam/qi
11719 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11720 + unsigned int ivsize, unsigned int icvsize,
11721 + const bool is_qi)
11723 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11725 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11727 + /* Skip key loading if it is loaded due to sharing */
11728 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11730 + if (cdata->key_inline)
11731 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11732 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11734 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11735 + KEY_DEST_CLASS_REG);
11736 + set_jump_tgt_here(desc, key_jump_cmd);
11738 + /* Class 1 operation */
11739 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11740 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11743 + /* assoclen is not needed, skip it */
11744 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11746 + /* Read salt and IV */
11747 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11748 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11750 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11751 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11754 + /* assoclen + cryptlen = seqoutlen */
11755 + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11758 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11759 + * thus need to do some magic, i.e. self-patch the descriptor
11762 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11763 + (0x6 << MOVE_LEN_SHIFT));
11764 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11765 + (0x8 << MOVE_LEN_SHIFT));
11767 + /* Will read assoclen + cryptlen bytes */
11768 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11770 + /* Will write assoclen + cryptlen bytes */
11771 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11773 + /* Store payload data */
11774 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11776 + /* In-snoop assoclen + cryptlen data */
11777 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11778 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11780 + set_move_tgt_here(desc, read_move_cmd);
11781 + set_move_tgt_here(desc, write_move_cmd);
11782 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11783 + /* Move payload data to OFIFO */
11784 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11785 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11788 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11789 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11792 + print_hex_dump(KERN_ERR,
11793 + "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11794 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11797 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11800 + * For ablkcipher encrypt and decrypt, read from req->src and
11801 + * write to req->dst
11803 +static inline void ablkcipher_append_src_dst(u32 *desc)
11805 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11806 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11807 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11808 + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11809 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11813 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11814 + * @desc: pointer to buffer used for descriptor construction
11815 + * @cdata: pointer to block cipher transform definitions
11816 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11817 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11818 + * @ivsize: initialization vector size
11819 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11820 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11822 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11823 + unsigned int ivsize, const bool is_rfc3686,
11824 + const u32 ctx1_iv_off)
11826 + u32 *key_jump_cmd;
11828 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11829 + /* Skip if already shared */
11830 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11833 + /* Load class1 key only */
11834 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11835 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11837 + /* Load nonce into CONTEXT1 reg */
11838 + if (is_rfc3686) {
11839 + u8 *nonce = cdata->key_virt + cdata->keylen;
11841 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11842 + LDST_CLASS_IND_CCB |
11843 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11844 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11845 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11846 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11849 + set_jump_tgt_here(desc, key_jump_cmd);
11852 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11853 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11855 + /* Load counter into CONTEXT1 reg */
11857 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11858 + LDST_SRCDST_BYTE_CONTEXT |
11859 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11860 + LDST_OFFSET_SHIFT));
11862 + /* Load operation */
11863 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11866 + /* Perform operation */
11867 + ablkcipher_append_src_dst(desc);
11870 + print_hex_dump(KERN_ERR,
11871 + "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
11872 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11875 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
11878 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
11879 + * @desc: pointer to buffer used for descriptor construction
11880 + * @cdata: pointer to block cipher transform definitions
11881 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11882 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11883 + * @ivsize: initialization vector size
11884 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11885 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11887 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
11888 + unsigned int ivsize, const bool is_rfc3686,
11889 + const u32 ctx1_iv_off)
11891 + u32 *key_jump_cmd;
11893 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11894 + /* Skip if already shared */
11895 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11898 + /* Load class1 key only */
11899 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11900 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11902 + /* Load nonce into CONTEXT1 reg */
11903 + if (is_rfc3686) {
11904 + u8 *nonce = cdata->key_virt + cdata->keylen;
11906 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11907 + LDST_CLASS_IND_CCB |
11908 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11909 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11910 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11911 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11914 + set_jump_tgt_here(desc, key_jump_cmd);
11917 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11918 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
11920 + /* Load counter into CONTEXT1 reg */
11922 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
11923 + LDST_SRCDST_BYTE_CONTEXT |
11924 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
11925 + LDST_OFFSET_SHIFT));
11927 + /* Choose operation */
11929 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11932 + append_dec_op1(desc, cdata->algtype);
11934 + /* Perform operation */
11935 + ablkcipher_append_src_dst(desc);
11938 + print_hex_dump(KERN_ERR,
11939 + "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
11940 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11943 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
11946 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
11947 + * with HW-generated initialization vector.
11948 + * @desc: pointer to buffer used for descriptor construction
11949 + * @cdata: pointer to block cipher transform definitions
11950 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11951 + * with OP_ALG_AAI_CBC.
11952 + * @ivsize: initialization vector size
11953 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11954 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11956 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
11957 + unsigned int ivsize, const bool is_rfc3686,
11958 + const u32 ctx1_iv_off)
11960 + u32 *key_jump_cmd, geniv;
11962 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11963 + /* Skip if already shared */
11964 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11967 + /* Load class1 key only */
11968 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11969 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11971 + /* Load Nonce into CONTEXT1 reg */
11972 + if (is_rfc3686) {
11973 + u8 *nonce = cdata->key_virt + cdata->keylen;
11975 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11976 + LDST_CLASS_IND_CCB |
11977 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11978 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11979 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
11980 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
11982 + set_jump_tgt_here(desc, key_jump_cmd);
11984 + /* Generate IV */
11985 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
11986 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
11987 + (ivsize << NFIFOENTRY_DLEN_SHIFT);
11988 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
11989 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
11990 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11991 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
11992 + MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
11993 + (ctx1_iv_off << MOVE_OFFSET_SHIFT));
11994 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11996 + /* Copy generated IV to memory */
11997 + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
11998 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12000 + /* Load Counter into CONTEXT1 reg */
12002 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12003 + LDST_SRCDST_BYTE_CONTEXT |
12004 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12005 + LDST_OFFSET_SHIFT));
12008 + append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12009 + (1 << JUMP_OFFSET_SHIFT));
12011 + /* Load operation */
12012 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12015 + /* Perform operation */
12016 + ablkcipher_append_src_dst(desc);
12019 + print_hex_dump(KERN_ERR,
12020 + "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12021 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12024 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12027 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12029 + * @desc: pointer to buffer used for descriptor construction
12030 + * @cdata: pointer to block cipher transform definitions
12031 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12033 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12035 + __be64 sector_size = cpu_to_be64(512);
12036 + u32 *key_jump_cmd;
12038 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12039 + /* Skip if already shared */
12040 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12043 + /* Load class1 keys only */
12044 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12045 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12047 + /* Load sector size with index 40 bytes (0x28) */
12048 + append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB |
12049 + LDST_SRCDST_BYTE_CONTEXT |
12050 + (0x28 << LDST_OFFSET_SHIFT));
12052 + set_jump_tgt_here(desc, key_jump_cmd);
12055 + * create sequence for loading the sector index
12056 + * Upper 8B of IV - will be used as sector index
12057 + * Lower 8B of IV - will be discarded
12059 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12060 + (0x20 << LDST_OFFSET_SHIFT));
12061 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12063 + /* Load operation */
12064 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12067 + /* Perform operation */
12068 + ablkcipher_append_src_dst(desc);
12071 + print_hex_dump(KERN_ERR,
12072 + "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12073 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12076 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12079 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12081 + * @desc: pointer to buffer used for descriptor construction
12082 + * @cdata: pointer to block cipher transform definitions
12083 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12085 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12087 + __be64 sector_size = cpu_to_be64(512);
12088 + u32 *key_jump_cmd;
12090 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12091 + /* Skip if already shared */
12092 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12095 + /* Load class1 key only */
12096 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12097 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12099 + /* Load sector size with index 40 bytes (0x28) */
12100 + append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB |
12101 + LDST_SRCDST_BYTE_CONTEXT |
12102 + (0x28 << LDST_OFFSET_SHIFT));
12104 + set_jump_tgt_here(desc, key_jump_cmd);
12107 + * create sequence for loading the sector index
12108 + * Upper 8B of IV - will be used as sector index
12109 + * Lower 8B of IV - will be discarded
12111 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12112 + (0x20 << LDST_OFFSET_SHIFT));
12113 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12115 + /* Load operation */
12116 + append_dec_op1(desc, cdata->algtype);
12118 + /* Perform operation */
12119 + ablkcipher_append_src_dst(desc);
12122 + print_hex_dump(KERN_ERR,
12123 + "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12124 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12127 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12129 +MODULE_LICENSE("GPL");
12130 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12131 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12133 +++ b/drivers/crypto/caam/caamalg_desc.h
12136 + * Shared descriptors for aead, ablkcipher algorithms
12138 + * Copyright 2016 NXP
12141 +#ifndef _CAAMALG_DESC_H_
12142 +#define _CAAMALG_DESC_H_
12144 +/* length of descriptors text */
12145 +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
12146 +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12147 +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12148 +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12149 +#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12150 +#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12151 +#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12153 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
12154 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12156 +/* Note: Nonce is counted in cdata.keylen */
12157 +#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
12159 +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
12160 +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12161 +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12163 +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
12164 +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12165 +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12166 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12167 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12169 +#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
12170 +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12171 +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12172 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12173 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12175 +#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
12176 +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12177 +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12178 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12179 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12181 +#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
12182 +#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
12183 + 20 * CAAM_CMD_SZ)
12184 +#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
12185 + 15 * CAAM_CMD_SZ)
12187 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12188 + unsigned int icvsize);
12190 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12191 + unsigned int icvsize);
12193 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12194 + struct alginfo *adata, unsigned int ivsize,
12195 + unsigned int icvsize, const bool is_rfc3686,
12196 + u32 *nonce, const u32 ctx1_iv_off,
12197 + const bool is_qi);
12199 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12200 + struct alginfo *adata, unsigned int ivsize,
12201 + unsigned int icvsize, const bool geniv,
12202 + const bool is_rfc3686, u32 *nonce,
12203 + const u32 ctx1_iv_off, const bool is_qi);
12205 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12206 + struct alginfo *adata, unsigned int ivsize,
12207 + unsigned int icvsize, const bool is_rfc3686,
12208 + u32 *nonce, const u32 ctx1_iv_off,
12209 + const bool is_qi);
12211 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12212 + struct alginfo *adata, unsigned int assoclen,
12213 + unsigned int ivsize, unsigned int authsize,
12214 + unsigned int blocksize);
12216 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12217 + struct alginfo *adata, unsigned int assoclen,
12218 + unsigned int ivsize, unsigned int authsize,
12219 + unsigned int blocksize);
12221 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12222 + unsigned int ivsize, unsigned int icvsize,
12223 + const bool is_qi);
12225 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12226 + unsigned int ivsize, unsigned int icvsize,
12227 + const bool is_qi);
12229 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12230 + unsigned int ivsize, unsigned int icvsize,
12231 + const bool is_qi);
12233 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12234 + unsigned int ivsize, unsigned int icvsize,
12235 + const bool is_qi);
12237 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12238 + unsigned int ivsize, unsigned int icvsize,
12239 + const bool is_qi);
12241 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12242 + unsigned int ivsize, unsigned int icvsize,
12243 + const bool is_qi);
12245 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12246 + unsigned int ivsize, const bool is_rfc3686,
12247 + const u32 ctx1_iv_off);
12249 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12250 + unsigned int ivsize, const bool is_rfc3686,
12251 + const u32 ctx1_iv_off);
12253 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12254 + unsigned int ivsize, const bool is_rfc3686,
12255 + const u32 ctx1_iv_off);
12257 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12259 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12261 +#endif /* _CAAMALG_DESC_H_ */
12263 +++ b/drivers/crypto/caam/caamalg_qi.c
12266 + * Freescale FSL CAAM support for crypto API over QI backend.
12267 + * Based on caamalg.c
12269 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12270 + * Copyright 2016-2017 NXP
12273 +#include "compat.h"
12276 +#include "intern.h"
12277 +#include "desc_constr.h"
12278 +#include "error.h"
12279 +#include "sg_sw_qm.h"
12280 +#include "key_gen.h"
12283 +#include "caamalg_desc.h"
12288 +#define CAAM_CRA_PRIORITY 2000
12289 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12290 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
12291 + SHA512_DIGEST_SIZE * 2)
12293 +#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
12294 + CAAM_MAX_KEY_SIZE)
12295 +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12297 +struct caam_alg_entry {
12298 + int class1_alg_type;
12299 + int class2_alg_type;
12304 +struct caam_aead_alg {
12305 + struct aead_alg aead;
12306 + struct caam_alg_entry caam;
12311 + * per-session context
12314 + struct device *jrdev;
12315 + u32 sh_desc_enc[DESC_MAX_USED_LEN];
12316 + u32 sh_desc_dec[DESC_MAX_USED_LEN];
12317 + u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12318 + u8 key[CAAM_MAX_KEY_SIZE];
12319 + dma_addr_t key_dma;
12320 + struct alginfo adata;
12321 + struct alginfo cdata;
12322 + unsigned int authsize;
12323 + struct device *qidev;
12324 + spinlock_t lock; /* Protects multiple init of driver context */
12325 + struct caam_drv_ctx *drv_ctx[NUM_OP];
12328 +static int aead_set_sh_desc(struct crypto_aead *aead)
12330 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12331 + typeof(*alg), aead);
12332 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12333 + unsigned int ivsize = crypto_aead_ivsize(aead);
12334 + u32 ctx1_iv_off = 0;
12335 + u32 *nonce = NULL;
12336 + unsigned int data_len[2];
12338 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12339 + OP_ALG_AAI_CTR_MOD128);
12340 + const bool is_rfc3686 = alg->caam.rfc3686;
12342 + if (!ctx->cdata.keylen || !ctx->authsize)
12346 + * AES-CTR needs to load IV in CONTEXT1 reg
12347 + * at an offset of 128bits (16bytes)
12348 + * CONTEXT1[255:128] = IV
12351 + ctx1_iv_off = 16;
12354 + * RFC3686 specific:
12355 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12357 + if (is_rfc3686) {
12358 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12359 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12360 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12363 + data_len[0] = ctx->adata.keylen_pad;
12364 + data_len[1] = ctx->cdata.keylen;
12366 + if (alg->caam.geniv)
12369 + /* aead_encrypt shared descriptor */
12370 + if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12371 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12372 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12373 + ARRAY_SIZE(data_len)) < 0)
12376 + if (inl_mask & 1)
12377 + ctx->adata.key_virt = ctx->key;
12379 + ctx->adata.key_dma = ctx->key_dma;
12381 + if (inl_mask & 2)
12382 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12384 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12386 + ctx->adata.key_inline = !!(inl_mask & 1);
12387 + ctx->cdata.key_inline = !!(inl_mask & 2);
12389 + cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12390 + ivsize, ctx->authsize, is_rfc3686, nonce,
12391 + ctx1_iv_off, true);
12394 + /* aead_decrypt shared descriptor */
12395 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12396 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12397 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12398 + ARRAY_SIZE(data_len)) < 0)
12401 + if (inl_mask & 1)
12402 + ctx->adata.key_virt = ctx->key;
12404 + ctx->adata.key_dma = ctx->key_dma;
12406 + if (inl_mask & 2)
12407 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12409 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12411 + ctx->adata.key_inline = !!(inl_mask & 1);
12412 + ctx->cdata.key_inline = !!(inl_mask & 2);
12414 + cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12415 + ivsize, ctx->authsize, alg->caam.geniv,
12416 + is_rfc3686, nonce, ctx1_iv_off, true);
12418 + if (!alg->caam.geniv)
12419 + goto skip_givenc;
12421 + /* aead_givencrypt shared descriptor */
12422 + if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12423 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12424 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12425 + ARRAY_SIZE(data_len)) < 0)
12428 + if (inl_mask & 1)
12429 + ctx->adata.key_virt = ctx->key;
12431 + ctx->adata.key_dma = ctx->key_dma;
12433 + if (inl_mask & 2)
12434 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12436 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12438 + ctx->adata.key_inline = !!(inl_mask & 1);
12439 + ctx->cdata.key_inline = !!(inl_mask & 2);
12441 + cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12442 + ivsize, ctx->authsize, is_rfc3686, nonce,
12443 + ctx1_iv_off, true);
12449 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12451 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12453 + ctx->authsize = authsize;
12454 + aead_set_sh_desc(authenc);
12459 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12460 + unsigned int keylen)
12462 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12463 + struct device *jrdev = ctx->jrdev;
12464 + struct crypto_authenc_keys keys;
12467 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12471 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12472 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12473 + keys.authkeylen);
12474 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12475 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12478 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12479 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12484 + /* postpend encryption key to auth split key */
12485 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12486 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12487 + keys.enckeylen, DMA_TO_DEVICE);
12489 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12490 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12491 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12494 + ctx->cdata.keylen = keys.enckeylen;
12496 + ret = aead_set_sh_desc(aead);
12500 + /* Now update the driver contexts with the new shared descriptor */
12501 + if (ctx->drv_ctx[ENCRYPT]) {
12502 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12503 + ctx->sh_desc_enc);
12505 + dev_err(jrdev, "driver enc context update failed\n");
12510 + if (ctx->drv_ctx[DECRYPT]) {
12511 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12512 + ctx->sh_desc_dec);
12514 + dev_err(jrdev, "driver dec context update failed\n");
12521 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12525 +static int tls_set_sh_desc(struct crypto_aead *tls)
12527 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12528 + unsigned int ivsize = crypto_aead_ivsize(tls);
12529 + unsigned int blocksize = crypto_aead_blocksize(tls);
12530 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
12531 + unsigned int data_len[2];
12534 + if (!ctx->cdata.keylen || !ctx->authsize)
12538 + * TLS 1.0 encrypt shared descriptor
12539 + * Job Descriptor and Shared Descriptor
12540 + * must fit into the 64-word Descriptor h/w Buffer
12542 + data_len[0] = ctx->adata.keylen_pad;
12543 + data_len[1] = ctx->cdata.keylen;
12545 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12546 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
12549 + if (inl_mask & 1)
12550 + ctx->adata.key_virt = ctx->key;
12552 + ctx->adata.key_dma = ctx->key_dma;
12554 + if (inl_mask & 2)
12555 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12557 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12559 + ctx->adata.key_inline = !!(inl_mask & 1);
12560 + ctx->cdata.key_inline = !!(inl_mask & 2);
12562 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12563 + assoclen, ivsize, ctx->authsize, blocksize);
12566 + * TLS 1.0 decrypt shared descriptor
12567 + * Keys do not fit inline, regardless of algorithms used
12569 + ctx->adata.key_dma = ctx->key_dma;
12570 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12572 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12573 + assoclen, ivsize, ctx->authsize, blocksize);
12578 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12580 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12582 + ctx->authsize = authsize;
12583 + tls_set_sh_desc(tls);
12588 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12589 + unsigned int keylen)
12591 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12592 + struct device *jrdev = ctx->jrdev;
12593 + struct crypto_authenc_keys keys;
12596 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12600 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12601 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12602 + keys.authkeylen);
12603 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12604 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12607 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12608 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12613 + /* postpend encryption key to auth split key */
12614 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12615 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12616 + keys.enckeylen, DMA_TO_DEVICE);
12619 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12620 + ctx->adata.keylen, ctx->adata.keylen_pad);
12621 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12622 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12623 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12626 + ctx->cdata.keylen = keys.enckeylen;
12628 + ret = tls_set_sh_desc(tls);
12632 + /* Now update the driver contexts with the new shared descriptor */
12633 + if (ctx->drv_ctx[ENCRYPT]) {
12634 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12635 + ctx->sh_desc_enc);
12637 + dev_err(jrdev, "driver enc context update failed\n");
12642 + if (ctx->drv_ctx[DECRYPT]) {
12643 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12644 + ctx->sh_desc_dec);
12646 + dev_err(jrdev, "driver dec context update failed\n");
12653 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12657 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12658 + const u8 *key, unsigned int keylen)
12660 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12661 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
12662 + const char *alg_name = crypto_tfm_alg_name(tfm);
12663 + struct device *jrdev = ctx->jrdev;
12664 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12665 + u32 ctx1_iv_off = 0;
12666 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12667 + OP_ALG_AAI_CTR_MOD128);
12668 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
12671 + memcpy(ctx->key, key, keylen);
12673 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12674 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12677 + * AES-CTR needs to load IV in CONTEXT1 reg
12678 + * at an offset of 128bits (16bytes)
12679 + * CONTEXT1[255:128] = IV
12682 + ctx1_iv_off = 16;
12685 + * RFC3686 specific:
12686 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12687 + * | *key = {KEY, NONCE}
12689 + if (is_rfc3686) {
12690 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12691 + keylen -= CTR_RFC3686_NONCE_SIZE;
12694 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12695 + ctx->cdata.keylen = keylen;
12696 + ctx->cdata.key_virt = ctx->key;
12697 + ctx->cdata.key_inline = true;
12699 + /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
12700 + cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12701 + is_rfc3686, ctx1_iv_off);
12702 + cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12703 + is_rfc3686, ctx1_iv_off);
12704 + cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
12705 + ivsize, is_rfc3686, ctx1_iv_off);
12707 + /* Now update the driver contexts with the new shared descriptor */
12708 + if (ctx->drv_ctx[ENCRYPT]) {
12709 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12710 + ctx->sh_desc_enc);
12712 + dev_err(jrdev, "driver enc context update failed\n");
12717 + if (ctx->drv_ctx[DECRYPT]) {
12718 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12719 + ctx->sh_desc_dec);
12721 + dev_err(jrdev, "driver dec context update failed\n");
12726 + if (ctx->drv_ctx[GIVENCRYPT]) {
12727 + ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
12728 + ctx->sh_desc_givenc);
12730 + dev_err(jrdev, "driver givenc context update failed\n");
12737 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12741 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12742 + const u8 *key, unsigned int keylen)
12744 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12745 + struct device *jrdev = ctx->jrdev;
12748 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
12749 + crypto_ablkcipher_set_flags(ablkcipher,
12750 + CRYPTO_TFM_RES_BAD_KEY_LEN);
12751 + dev_err(jrdev, "key size mismatch\n");
12755 + memcpy(ctx->key, key, keylen);
12756 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12757 + ctx->cdata.keylen = keylen;
12758 + ctx->cdata.key_virt = ctx->key;
12759 + ctx->cdata.key_inline = true;
12761 + /* xts ablkcipher encrypt, decrypt shared descriptors */
12762 + cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
12763 + cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
12765 + /* Now update the driver contexts with the new shared descriptor */
12766 + if (ctx->drv_ctx[ENCRYPT]) {
12767 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12768 + ctx->sh_desc_enc);
12770 + dev_err(jrdev, "driver enc context update failed\n");
12775 + if (ctx->drv_ctx[DECRYPT]) {
12776 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12777 + ctx->sh_desc_dec);
12779 + dev_err(jrdev, "driver dec context update failed\n");
12786 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12791 + * aead_edesc - s/w-extended aead descriptor
12792 + * @src_nents: number of segments in input scatterlist
12793 + * @dst_nents: number of segments in output scatterlist
12794 + * @iv_dma: dma address of iv for checking continuity and link table
12795 + * @qm_sg_bytes: length of dma mapped h/w link table
12796 + * @qm_sg_dma: bus physical mapped address of h/w link table
12797 + * @assoclen: associated data length, in CAAM endianness
12798 + * @assoclen_dma: bus physical mapped address of req->assoclen
12799 + * @drv_req: driver-specific request structure
12800 + * @sgt: the h/w link table
12802 +struct aead_edesc {
12805 + dma_addr_t iv_dma;
12807 + dma_addr_t qm_sg_dma;
12808 + unsigned int assoclen;
12809 + dma_addr_t assoclen_dma;
12810 + struct caam_drv_req drv_req;
12811 +#define CAAM_QI_MAX_AEAD_SG \
12812 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
12813 + sizeof(struct qm_sg_entry))
12814 + struct qm_sg_entry sgt[0];
12818 + * tls_edesc - s/w-extended tls descriptor
12819 + * @src_nents: number of segments in input scatterlist
12820 + * @dst_nents: number of segments in output scatterlist
12821 + * @iv_dma: dma address of iv for checking continuity and link table
12822 + * @qm_sg_bytes: length of dma mapped h/w link table
12823 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
12824 + * @qm_sg_dma: bus physical mapped address of h/w link table
12825 + * @drv_req: driver-specific request structure
12826 + * @sgt: the h/w link table
12828 +struct tls_edesc {
12831 + dma_addr_t iv_dma;
12833 + dma_addr_t qm_sg_dma;
12834 + struct scatterlist tmp[2];
12835 + struct scatterlist *dst;
12836 + struct caam_drv_req drv_req;
12837 + struct qm_sg_entry sgt[0];
12841 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
12842 + * @src_nents: number of segments in input scatterlist
12843 + * @dst_nents: number of segments in output scatterlist
12844 + * @iv_dma: dma address of iv for checking continuity and link table
12845 + * @qm_sg_bytes: length of dma mapped h/w link table
12846 + * @qm_sg_dma: bus physical mapped address of h/w link table
12847 + * @drv_req: driver-specific request structure
12848 + * @sgt: the h/w link table
12850 +struct ablkcipher_edesc {
12853 + dma_addr_t iv_dma;
12855 + dma_addr_t qm_sg_dma;
12856 + struct caam_drv_req drv_req;
12857 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
12858 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
12859 + sizeof(struct qm_sg_entry))
12860 + struct qm_sg_entry sgt[0];
12863 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
12864 + enum optype type)
12867 + * This function is called on the fast path with values of 'type'
12868 + * known at compile time. Invalid arguments are not expected and
12869 + * thus no checks are made.
12871 + struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
12874 + if (unlikely(!drv_ctx)) {
12875 + spin_lock(&ctx->lock);
12877 + /* Read again to check if some other core init drv_ctx */
12878 + drv_ctx = ctx->drv_ctx[type];
12882 + if (type == ENCRYPT)
12883 + desc = ctx->sh_desc_enc;
12884 + else if (type == DECRYPT)
12885 + desc = ctx->sh_desc_dec;
12886 + else /* (type == GIVENCRYPT) */
12887 + desc = ctx->sh_desc_givenc;
12889 + cpu = smp_processor_id();
12890 + drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
12891 + if (likely(!IS_ERR_OR_NULL(drv_ctx)))
12892 + drv_ctx->op_type = type;
12894 + ctx->drv_ctx[type] = drv_ctx;
12897 + spin_unlock(&ctx->lock);
12903 +static void caam_unmap(struct device *dev, struct scatterlist *src,
12904 + struct scatterlist *dst, int src_nents,
12905 + int dst_nents, dma_addr_t iv_dma, int ivsize,
12906 + enum optype op_type, dma_addr_t qm_sg_dma,
12909 + if (dst != src) {
12911 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
12912 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
12914 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
12918 + dma_unmap_single(dev, iv_dma, ivsize,
12919 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
12922 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
12925 +static void aead_unmap(struct device *dev,
12926 + struct aead_edesc *edesc,
12927 + struct aead_request *req)
12929 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
12930 + int ivsize = crypto_aead_ivsize(aead);
12932 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
12933 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
12934 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
12935 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
12938 +static void tls_unmap(struct device *dev,
12939 + struct tls_edesc *edesc,
12940 + struct aead_request *req)
12942 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
12943 + int ivsize = crypto_aead_ivsize(aead);
12945 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
12946 + edesc->dst_nents, edesc->iv_dma, ivsize,
12947 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
12948 + edesc->qm_sg_bytes);
12951 +static void ablkcipher_unmap(struct device *dev,
12952 + struct ablkcipher_edesc *edesc,
12953 + struct ablkcipher_request *req)
12955 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
12956 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12958 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
12959 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
12960 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
12963 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
12965 + struct device *qidev;
12966 + struct aead_edesc *edesc;
12967 + struct aead_request *aead_req = drv_req->app_ctx;
12968 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
12969 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
12972 + qidev = caam_ctx->qidev;
12974 + if (unlikely(status)) {
12975 + caam_jr_strstatus(qidev, status);
12979 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
12980 + aead_unmap(qidev, edesc, aead_req);
12982 + aead_request_complete(aead_req, ecode);
12983 + qi_cache_free(edesc);
12987 + * allocate and map the aead extended descriptor
12989 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
12992 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
12993 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12994 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12995 + typeof(*alg), aead);
12996 + struct device *qidev = ctx->qidev;
12997 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
12998 + GFP_KERNEL : GFP_ATOMIC;
12999 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13000 + struct aead_edesc *edesc;
13001 + dma_addr_t qm_sg_dma, iv_dma = 0;
13003 + unsigned int authsize = ctx->authsize;
13004 + int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13005 + int in_len, out_len;
13006 + struct qm_sg_entry *sg_table, *fd_sgt;
13007 + struct caam_drv_ctx *drv_ctx;
13008 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13010 + drv_ctx = get_drv_ctx(ctx, op_type);
13011 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13012 + return (struct aead_edesc *)drv_ctx;
13014 + /* allocate space for base edesc and hw desc commands, link tables */
13015 + edesc = qi_cache_alloc(GFP_DMA | flags);
13016 + if (unlikely(!edesc)) {
13017 + dev_err(qidev, "could not allocate extended descriptor\n");
13018 + return ERR_PTR(-ENOMEM);
13021 + if (likely(req->src == req->dst)) {
13022 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13024 + (encrypt ? authsize : 0));
13025 + if (unlikely(src_nents < 0)) {
13026 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13027 + req->assoclen + req->cryptlen +
13028 + (encrypt ? authsize : 0));
13029 + qi_cache_free(edesc);
13030 + return ERR_PTR(src_nents);
13033 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13034 + DMA_BIDIRECTIONAL);
13035 + if (unlikely(!mapped_src_nents)) {
13036 + dev_err(qidev, "unable to map source\n");
13037 + qi_cache_free(edesc);
13038 + return ERR_PTR(-ENOMEM);
13041 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13043 + if (unlikely(src_nents < 0)) {
13044 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13045 + req->assoclen + req->cryptlen);
13046 + qi_cache_free(edesc);
13047 + return ERR_PTR(src_nents);
13050 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13052 + (encrypt ? authsize :
13054 + if (unlikely(dst_nents < 0)) {
13055 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13056 + req->assoclen + req->cryptlen +
13057 + (encrypt ? authsize : (-authsize)));
13058 + qi_cache_free(edesc);
13059 + return ERR_PTR(dst_nents);
13063 + mapped_src_nents = dma_map_sg(qidev, req->src,
13064 + src_nents, DMA_TO_DEVICE);
13065 + if (unlikely(!mapped_src_nents)) {
13066 + dev_err(qidev, "unable to map source\n");
13067 + qi_cache_free(edesc);
13068 + return ERR_PTR(-ENOMEM);
13071 + mapped_src_nents = 0;
13074 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13075 + DMA_FROM_DEVICE);
13076 + if (unlikely(!mapped_dst_nents)) {
13077 + dev_err(qidev, "unable to map destination\n");
13078 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13079 + qi_cache_free(edesc);
13080 + return ERR_PTR(-ENOMEM);
13084 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13085 + ivsize = crypto_aead_ivsize(aead);
13086 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13087 + if (dma_mapping_error(qidev, iv_dma)) {
13088 + dev_err(qidev, "unable to map IV\n");
13089 + caam_unmap(qidev, req->src, req->dst, src_nents,
13090 + dst_nents, 0, 0, op_type, 0, 0);
13091 + qi_cache_free(edesc);
13092 + return ERR_PTR(-ENOMEM);
13097 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13098 + * Input is not contiguous.
13100 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13101 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13102 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13103 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13104 + qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13105 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13106 + iv_dma, ivsize, op_type, 0, 0);
13107 + qi_cache_free(edesc);
13108 + return ERR_PTR(-ENOMEM);
13110 + sg_table = &edesc->sgt[0];
13111 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13113 + edesc->src_nents = src_nents;
13114 + edesc->dst_nents = dst_nents;
13115 + edesc->iv_dma = iv_dma;
13116 + edesc->drv_req.app_ctx = req;
13117 + edesc->drv_req.cbk = aead_done;
13118 + edesc->drv_req.drv_ctx = drv_ctx;
13120 + edesc->assoclen = cpu_to_caam32(req->assoclen);
13121 + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13123 + if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13124 + dev_err(qidev, "unable to map assoclen\n");
13125 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13126 + iv_dma, ivsize, op_type, 0, 0);
13127 + qi_cache_free(edesc);
13128 + return ERR_PTR(-ENOMEM);
13131 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13134 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13137 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13138 + qm_sg_index += mapped_src_nents;
13140 + if (mapped_dst_nents > 1)
13141 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13144 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13145 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13146 + dev_err(qidev, "unable to map S/G table\n");
13147 + dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13148 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13149 + iv_dma, ivsize, op_type, 0, 0);
13150 + qi_cache_free(edesc);
13151 + return ERR_PTR(-ENOMEM);
13154 + edesc->qm_sg_dma = qm_sg_dma;
13155 + edesc->qm_sg_bytes = qm_sg_bytes;
13157 + out_len = req->assoclen + req->cryptlen +
13158 + (encrypt ? ctx->authsize : (-ctx->authsize));
13159 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13161 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13162 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13164 + if (req->dst == req->src) {
13165 + if (mapped_src_nents == 1)
13166 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13169 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13170 + (1 + !!ivsize) * sizeof(*sg_table),
13172 + } else if (mapped_dst_nents == 1) {
13173 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13176 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13177 + qm_sg_index, out_len, 0);
13183 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13185 + struct aead_edesc *edesc;
13186 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13187 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13190 + if (unlikely(caam_congested))
13193 + /* allocate extended descriptor */
13194 + edesc = aead_edesc_alloc(req, encrypt);
13195 + if (IS_ERR_OR_NULL(edesc))
13196 + return PTR_ERR(edesc);
13198 + /* Create and submit job descriptor */
13199 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13201 + ret = -EINPROGRESS;
13203 + aead_unmap(ctx->qidev, edesc, req);
13204 + qi_cache_free(edesc);
13210 +static int aead_encrypt(struct aead_request *req)
13212 + return aead_crypt(req, true);
13215 +static int aead_decrypt(struct aead_request *req)
13217 + return aead_crypt(req, false);
13220 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13222 + struct device *qidev;
13223 + struct tls_edesc *edesc;
13224 + struct aead_request *aead_req = drv_req->app_ctx;
13225 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13226 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13229 + qidev = caam_ctx->qidev;
13231 + if (unlikely(status)) {
13232 + caam_jr_strstatus(qidev, status);
13236 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13237 + tls_unmap(qidev, edesc, aead_req);
13239 + aead_request_complete(aead_req, ecode);
13240 + qi_cache_free(edesc);
13244 + * allocate and map the tls extended descriptor
13246 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13248 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13249 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13250 + unsigned int blocksize = crypto_aead_blocksize(aead);
13251 + unsigned int padsize, authsize;
13252 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13253 + typeof(*alg), aead);
13254 + struct device *qidev = ctx->qidev;
13255 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13256 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13257 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13258 + struct tls_edesc *edesc;
13259 + dma_addr_t qm_sg_dma, iv_dma = 0;
13261 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13262 + int in_len, out_len;
13263 + struct qm_sg_entry *sg_table, *fd_sgt;
13264 + struct caam_drv_ctx *drv_ctx;
13265 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13266 + struct scatterlist *dst;
13269 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13271 + authsize = ctx->authsize + padsize;
13273 + authsize = ctx->authsize;
13276 + drv_ctx = get_drv_ctx(ctx, op_type);
13277 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13278 + return (struct tls_edesc *)drv_ctx;
13280 + /* allocate space for base edesc and hw desc commands, link tables */
13281 + edesc = qi_cache_alloc(GFP_DMA | flags);
13282 + if (unlikely(!edesc)) {
13283 + dev_err(qidev, "could not allocate extended descriptor\n");
13284 + return ERR_PTR(-ENOMEM);
13287 + if (likely(req->src == req->dst)) {
13288 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13290 + (encrypt ? authsize : 0));
13291 + if (unlikely(src_nents < 0)) {
13292 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13293 + req->assoclen + req->cryptlen +
13294 + (encrypt ? authsize : 0));
13295 + qi_cache_free(edesc);
13296 + return ERR_PTR(src_nents);
13299 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13300 + DMA_BIDIRECTIONAL);
13301 + if (unlikely(!mapped_src_nents)) {
13302 + dev_err(qidev, "unable to map source\n");
13303 + qi_cache_free(edesc);
13304 + return ERR_PTR(-ENOMEM);
13308 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13310 + if (unlikely(src_nents < 0)) {
13311 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13312 + req->assoclen + req->cryptlen);
13313 + qi_cache_free(edesc);
13314 + return ERR_PTR(src_nents);
13317 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13318 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
13319 + (encrypt ? authsize : 0));
13320 + if (unlikely(dst_nents < 0)) {
13321 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13323 + (encrypt ? authsize : 0));
13324 + qi_cache_free(edesc);
13325 + return ERR_PTR(dst_nents);
13329 + mapped_src_nents = dma_map_sg(qidev, req->src,
13330 + src_nents, DMA_TO_DEVICE);
13331 + if (unlikely(!mapped_src_nents)) {
13332 + dev_err(qidev, "unable to map source\n");
13333 + qi_cache_free(edesc);
13334 + return ERR_PTR(-ENOMEM);
13337 + mapped_src_nents = 0;
13340 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13341 + DMA_FROM_DEVICE);
13342 + if (unlikely(!mapped_dst_nents)) {
13343 + dev_err(qidev, "unable to map destination\n");
13344 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13345 + qi_cache_free(edesc);
13346 + return ERR_PTR(-ENOMEM);
13350 + ivsize = crypto_aead_ivsize(aead);
13351 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13352 + if (dma_mapping_error(qidev, iv_dma)) {
13353 + dev_err(qidev, "unable to map IV\n");
13354 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13356 + qi_cache_free(edesc);
13357 + return ERR_PTR(-ENOMEM);
13361 + * Create S/G table: IV, src, dst.
13362 + * Input is not contiguous.
13364 + qm_sg_ents = 1 + mapped_src_nents +
13365 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13366 + sg_table = &edesc->sgt[0];
13367 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13369 + edesc->src_nents = src_nents;
13370 + edesc->dst_nents = dst_nents;
13371 + edesc->dst = dst;
13372 + edesc->iv_dma = iv_dma;
13373 + edesc->drv_req.app_ctx = req;
13374 + edesc->drv_req.cbk = tls_done;
13375 + edesc->drv_req.drv_ctx = drv_ctx;
13377 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13380 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13381 + qm_sg_index += mapped_src_nents;
13383 + if (mapped_dst_nents > 1)
13384 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13387 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13388 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13389 + dev_err(qidev, "unable to map S/G table\n");
13390 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13391 + ivsize, op_type, 0, 0);
13392 + qi_cache_free(edesc);
13393 + return ERR_PTR(-ENOMEM);
13396 + edesc->qm_sg_dma = qm_sg_dma;
13397 + edesc->qm_sg_bytes = qm_sg_bytes;
13399 + out_len = req->cryptlen + (encrypt ? authsize : 0);
13400 + in_len = ivsize + req->assoclen + req->cryptlen;
13402 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13404 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13406 + if (req->dst == req->src)
13407 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13408 + (sg_nents_for_len(req->src, req->assoclen) +
13409 + 1) * sizeof(*sg_table), out_len, 0);
13410 + else if (mapped_dst_nents == 1)
13411 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13413 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13414 + qm_sg_index, out_len, 0);
13419 +static int tls_crypt(struct aead_request *req, bool encrypt)
13421 + struct tls_edesc *edesc;
13422 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13423 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13426 + if (unlikely(caam_congested))
13429 + edesc = tls_edesc_alloc(req, encrypt);
13430 + if (IS_ERR_OR_NULL(edesc))
13431 + return PTR_ERR(edesc);
13433 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13435 + ret = -EINPROGRESS;
13437 + tls_unmap(ctx->qidev, edesc, req);
13438 + qi_cache_free(edesc);
13444 +static int tls_encrypt(struct aead_request *req)
13446 + return tls_crypt(req, true);
13449 +static int tls_decrypt(struct aead_request *req)
13451 + return tls_crypt(req, false);
13454 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
13456 + struct ablkcipher_edesc *edesc;
13457 + struct ablkcipher_request *req = drv_req->app_ctx;
13458 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13459 + struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
13460 + struct device *qidev = caam_ctx->qidev;
13461 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13464 + dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
13467 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13470 + caam_jr_strstatus(qidev, status);
13473 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
13474 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
13475 + edesc->src_nents > 1 ? 100 : ivsize, 1);
13476 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
13477 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
13478 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
13481 + ablkcipher_unmap(qidev, edesc, req);
13482 + qi_cache_free(edesc);
13485 + * The crypto API expects us to set the IV (req->info) to the last
13486 + * ciphertext block. This is used e.g. by the CTS mode.
13488 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
13491 + ablkcipher_request_complete(req, status);
13494 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
13495 + *req, bool encrypt)
13497 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13498 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13499 + struct device *qidev = ctx->qidev;
13500 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13501 + GFP_KERNEL : GFP_ATOMIC;
13502 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13503 + struct ablkcipher_edesc *edesc;
13504 + dma_addr_t iv_dma;
13506 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13507 + int dst_sg_idx, qm_sg_ents;
13508 + struct qm_sg_entry *sg_table, *fd_sgt;
13509 + struct caam_drv_ctx *drv_ctx;
13510 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13512 + drv_ctx = get_drv_ctx(ctx, op_type);
13513 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13514 + return (struct ablkcipher_edesc *)drv_ctx;
13516 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13517 + if (unlikely(src_nents < 0)) {
13518 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13520 + return ERR_PTR(src_nents);
13523 + if (unlikely(req->src != req->dst)) {
13524 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13525 + if (unlikely(dst_nents < 0)) {
13526 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13528 + return ERR_PTR(dst_nents);
13531 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13533 + if (unlikely(!mapped_src_nents)) {
13534 + dev_err(qidev, "unable to map source\n");
13535 + return ERR_PTR(-ENOMEM);
13538 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13539 + DMA_FROM_DEVICE);
13540 + if (unlikely(!mapped_dst_nents)) {
13541 + dev_err(qidev, "unable to map destination\n");
13542 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13543 + return ERR_PTR(-ENOMEM);
13546 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13547 + DMA_BIDIRECTIONAL);
13548 + if (unlikely(!mapped_src_nents)) {
13549 + dev_err(qidev, "unable to map source\n");
13550 + return ERR_PTR(-ENOMEM);
13554 + iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
13555 + if (dma_mapping_error(qidev, iv_dma)) {
13556 + dev_err(qidev, "unable to map IV\n");
13557 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13559 + return ERR_PTR(-ENOMEM);
13562 + if (mapped_src_nents == 1 &&
13563 + iv_dma + ivsize == sg_dma_address(req->src)) {
13564 + in_contig = true;
13567 + in_contig = false;
13568 + qm_sg_ents = 1 + mapped_src_nents;
13570 + dst_sg_idx = qm_sg_ents;
13572 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
13573 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13574 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13575 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13576 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13577 + iv_dma, ivsize, op_type, 0, 0);
13578 + return ERR_PTR(-ENOMEM);
13581 + /* allocate space for base edesc and link tables */
13582 + edesc = qi_cache_alloc(GFP_DMA | flags);
13583 + if (unlikely(!edesc)) {
13584 + dev_err(qidev, "could not allocate extended descriptor\n");
13585 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13586 + iv_dma, ivsize, op_type, 0, 0);
13587 + return ERR_PTR(-ENOMEM);
13590 + edesc->src_nents = src_nents;
13591 + edesc->dst_nents = dst_nents;
13592 + edesc->iv_dma = iv_dma;
13593 + sg_table = &edesc->sgt[0];
13594 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13595 + edesc->drv_req.app_ctx = req;
13596 + edesc->drv_req.cbk = ablkcipher_done;
13597 + edesc->drv_req.drv_ctx = drv_ctx;
13599 + if (!in_contig) {
13600 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13601 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
13604 + if (mapped_dst_nents > 1)
13605 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13608 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13610 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13611 + dev_err(qidev, "unable to map S/G table\n");
13612 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13613 + iv_dma, ivsize, op_type, 0, 0);
13614 + qi_cache_free(edesc);
13615 + return ERR_PTR(-ENOMEM);
13618 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13621 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
13622 + ivsize + req->nbytes, 0);
13624 + dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
13627 + if (req->src == req->dst) {
13629 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
13630 + sizeof(*sg_table), req->nbytes, 0);
13632 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13634 + } else if (mapped_dst_nents > 1) {
13635 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13636 + sizeof(*sg_table), req->nbytes, 0);
13638 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13645 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
13646 + struct skcipher_givcrypt_request *creq)
13648 + struct ablkcipher_request *req = &creq->creq;
13649 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13650 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13651 + struct device *qidev = ctx->qidev;
13652 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13653 + GFP_KERNEL : GFP_ATOMIC;
13654 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
13655 + struct ablkcipher_edesc *edesc;
13656 + dma_addr_t iv_dma;
13658 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13659 + struct qm_sg_entry *sg_table, *fd_sgt;
13660 + int dst_sg_idx, qm_sg_ents;
13661 + struct caam_drv_ctx *drv_ctx;
13663 + drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
13664 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13665 + return (struct ablkcipher_edesc *)drv_ctx;
13667 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13668 + if (unlikely(src_nents < 0)) {
13669 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13671 + return ERR_PTR(src_nents);
13674 + if (unlikely(req->src != req->dst)) {
13675 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13676 + if (unlikely(dst_nents < 0)) {
13677 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13679 + return ERR_PTR(dst_nents);
13682 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13684 + if (unlikely(!mapped_src_nents)) {
13685 + dev_err(qidev, "unable to map source\n");
13686 + return ERR_PTR(-ENOMEM);
13689 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13690 + DMA_FROM_DEVICE);
13691 + if (unlikely(!mapped_dst_nents)) {
13692 + dev_err(qidev, "unable to map destination\n");
13693 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13694 + return ERR_PTR(-ENOMEM);
13697 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13698 + DMA_BIDIRECTIONAL);
13699 + if (unlikely(!mapped_src_nents)) {
13700 + dev_err(qidev, "unable to map source\n");
13701 + return ERR_PTR(-ENOMEM);
13704 + dst_nents = src_nents;
13705 + mapped_dst_nents = src_nents;
13708 + iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
13709 + if (dma_mapping_error(qidev, iv_dma)) {
13710 + dev_err(qidev, "unable to map IV\n");
13711 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13713 + return ERR_PTR(-ENOMEM);
13716 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
13717 + dst_sg_idx = qm_sg_ents;
13718 + if (mapped_dst_nents == 1 &&
13719 + iv_dma + ivsize == sg_dma_address(req->dst)) {
13720 + out_contig = true;
13722 + out_contig = false;
13723 + qm_sg_ents += 1 + mapped_dst_nents;
13726 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13727 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13728 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13729 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13730 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13731 + return ERR_PTR(-ENOMEM);
13734 + /* allocate space for base edesc and link tables */
13735 + edesc = qi_cache_alloc(GFP_DMA | flags);
13737 + dev_err(qidev, "could not allocate extended descriptor\n");
13738 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13739 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13740 + return ERR_PTR(-ENOMEM);
13743 + edesc->src_nents = src_nents;
13744 + edesc->dst_nents = dst_nents;
13745 + edesc->iv_dma = iv_dma;
13746 + sg_table = &edesc->sgt[0];
13747 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13748 + edesc->drv_req.app_ctx = req;
13749 + edesc->drv_req.cbk = ablkcipher_done;
13750 + edesc->drv_req.drv_ctx = drv_ctx;
13752 + if (mapped_src_nents > 1)
13753 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
13755 + if (!out_contig) {
13756 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
13757 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13758 + dst_sg_idx + 1, 0);
13761 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13763 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13764 + dev_err(qidev, "unable to map S/G table\n");
13765 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13766 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13767 + qi_cache_free(edesc);
13768 + return ERR_PTR(-ENOMEM);
13771 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13773 + if (mapped_src_nents > 1)
13774 + dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
13777 + dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
13781 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13782 + sizeof(*sg_table), ivsize + req->nbytes,
13785 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13786 + ivsize + req->nbytes, 0);
13791 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
13793 + struct ablkcipher_edesc *edesc;
13794 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13795 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13798 + if (unlikely(caam_congested))
13801 + /* allocate extended descriptor */
13802 + edesc = ablkcipher_edesc_alloc(req, encrypt);
13803 + if (IS_ERR(edesc))
13804 + return PTR_ERR(edesc);
13806 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13808 + ret = -EINPROGRESS;
13810 + ablkcipher_unmap(ctx->qidev, edesc, req);
13811 + qi_cache_free(edesc);
13817 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
13819 + return ablkcipher_crypt(req, true);
13822 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
13824 + return ablkcipher_crypt(req, false);
13827 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
13829 + struct ablkcipher_request *req = &creq->creq;
13830 + struct ablkcipher_edesc *edesc;
13831 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13832 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13835 + if (unlikely(caam_congested))
13838 + /* allocate extended descriptor */
13839 + edesc = ablkcipher_giv_edesc_alloc(creq);
13840 + if (IS_ERR(edesc))
13841 + return PTR_ERR(edesc);
13843 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13845 + ret = -EINPROGRESS;
13847 + ablkcipher_unmap(ctx->qidev, edesc, req);
13848 + qi_cache_free(edesc);
13854 +#define template_ablkcipher template_u.ablkcipher
13855 +struct caam_alg_template {
13856 + char name[CRYPTO_MAX_ALG_NAME];
13857 + char driver_name[CRYPTO_MAX_ALG_NAME];
13858 + unsigned int blocksize;
13861 + struct ablkcipher_alg ablkcipher;
13863 + u32 class1_alg_type;
13864 + u32 class2_alg_type;
13867 +static struct caam_alg_template driver_algs[] = {
13868 + /* ablkcipher descriptor */
13870 + .name = "cbc(aes)",
13871 + .driver_name = "cbc-aes-caam-qi",
13872 + .blocksize = AES_BLOCK_SIZE,
13873 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13874 + .template_ablkcipher = {
13875 + .setkey = ablkcipher_setkey,
13876 + .encrypt = ablkcipher_encrypt,
13877 + .decrypt = ablkcipher_decrypt,
13878 + .givencrypt = ablkcipher_givencrypt,
13879 + .geniv = "<built-in>",
13880 + .min_keysize = AES_MIN_KEY_SIZE,
13881 + .max_keysize = AES_MAX_KEY_SIZE,
13882 + .ivsize = AES_BLOCK_SIZE,
13884 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
13887 + .name = "cbc(des3_ede)",
13888 + .driver_name = "cbc-3des-caam-qi",
13889 + .blocksize = DES3_EDE_BLOCK_SIZE,
13890 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13891 + .template_ablkcipher = {
13892 + .setkey = ablkcipher_setkey,
13893 + .encrypt = ablkcipher_encrypt,
13894 + .decrypt = ablkcipher_decrypt,
13895 + .givencrypt = ablkcipher_givencrypt,
13896 + .geniv = "<built-in>",
13897 + .min_keysize = DES3_EDE_KEY_SIZE,
13898 + .max_keysize = DES3_EDE_KEY_SIZE,
13899 + .ivsize = DES3_EDE_BLOCK_SIZE,
13901 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
13904 + .name = "cbc(des)",
13905 + .driver_name = "cbc-des-caam-qi",
13906 + .blocksize = DES_BLOCK_SIZE,
13907 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13908 + .template_ablkcipher = {
13909 + .setkey = ablkcipher_setkey,
13910 + .encrypt = ablkcipher_encrypt,
13911 + .decrypt = ablkcipher_decrypt,
13912 + .givencrypt = ablkcipher_givencrypt,
13913 + .geniv = "<built-in>",
13914 + .min_keysize = DES_KEY_SIZE,
13915 + .max_keysize = DES_KEY_SIZE,
13916 + .ivsize = DES_BLOCK_SIZE,
13918 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
13921 + .name = "ctr(aes)",
13922 + .driver_name = "ctr-aes-caam-qi",
13924 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
13925 + .template_ablkcipher = {
13926 + .setkey = ablkcipher_setkey,
13927 + .encrypt = ablkcipher_encrypt,
13928 + .decrypt = ablkcipher_decrypt,
13929 + .geniv = "chainiv",
13930 + .min_keysize = AES_MIN_KEY_SIZE,
13931 + .max_keysize = AES_MAX_KEY_SIZE,
13932 + .ivsize = AES_BLOCK_SIZE,
13934 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13937 + .name = "rfc3686(ctr(aes))",
13938 + .driver_name = "rfc3686-ctr-aes-caam-qi",
13940 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
13941 + .template_ablkcipher = {
13942 + .setkey = ablkcipher_setkey,
13943 + .encrypt = ablkcipher_encrypt,
13944 + .decrypt = ablkcipher_decrypt,
13945 + .givencrypt = ablkcipher_givencrypt,
13946 + .geniv = "<built-in>",
13947 + .min_keysize = AES_MIN_KEY_SIZE +
13948 + CTR_RFC3686_NONCE_SIZE,
13949 + .max_keysize = AES_MAX_KEY_SIZE +
13950 + CTR_RFC3686_NONCE_SIZE,
13951 + .ivsize = CTR_RFC3686_IV_SIZE,
13953 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
13956 + .name = "xts(aes)",
13957 + .driver_name = "xts-aes-caam-qi",
13958 + .blocksize = AES_BLOCK_SIZE,
13959 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
13960 + .template_ablkcipher = {
13961 + .setkey = xts_ablkcipher_setkey,
13962 + .encrypt = ablkcipher_encrypt,
13963 + .decrypt = ablkcipher_decrypt,
13964 + .geniv = "eseqiv",
13965 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
13966 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
13967 + .ivsize = AES_BLOCK_SIZE,
13969 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
13973 +static struct caam_aead_alg driver_aeads[] = {
13974 + /* single-pass ipsec_esp descriptor */
13978 + .cra_name = "authenc(hmac(md5),cbc(aes))",
13979 + .cra_driver_name = "authenc-hmac-md5-"
13980 + "cbc-aes-caam-qi",
13981 + .cra_blocksize = AES_BLOCK_SIZE,
13983 + .setkey = aead_setkey,
13984 + .setauthsize = aead_setauthsize,
13985 + .encrypt = aead_encrypt,
13986 + .decrypt = aead_decrypt,
13987 + .ivsize = AES_BLOCK_SIZE,
13988 + .maxauthsize = MD5_DIGEST_SIZE,
13991 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
13992 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
13993 + OP_ALG_AAI_HMAC_PRECOMP,
13999 + .cra_name = "echainiv(authenc(hmac(md5),"
14001 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14002 + "cbc-aes-caam-qi",
14003 + .cra_blocksize = AES_BLOCK_SIZE,
14005 + .setkey = aead_setkey,
14006 + .setauthsize = aead_setauthsize,
14007 + .encrypt = aead_encrypt,
14008 + .decrypt = aead_decrypt,
14009 + .ivsize = AES_BLOCK_SIZE,
14010 + .maxauthsize = MD5_DIGEST_SIZE,
14013 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14014 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14015 + OP_ALG_AAI_HMAC_PRECOMP,
14022 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
14023 + .cra_driver_name = "authenc-hmac-sha1-"
14024 + "cbc-aes-caam-qi",
14025 + .cra_blocksize = AES_BLOCK_SIZE,
14027 + .setkey = aead_setkey,
14028 + .setauthsize = aead_setauthsize,
14029 + .encrypt = aead_encrypt,
14030 + .decrypt = aead_decrypt,
14031 + .ivsize = AES_BLOCK_SIZE,
14032 + .maxauthsize = SHA1_DIGEST_SIZE,
14035 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14036 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14037 + OP_ALG_AAI_HMAC_PRECOMP,
14043 + .cra_name = "echainiv(authenc(hmac(sha1),"
14045 + .cra_driver_name = "echainiv-authenc-"
14046 + "hmac-sha1-cbc-aes-caam-qi",
14047 + .cra_blocksize = AES_BLOCK_SIZE,
14049 + .setkey = aead_setkey,
14050 + .setauthsize = aead_setauthsize,
14051 + .encrypt = aead_encrypt,
14052 + .decrypt = aead_decrypt,
14053 + .ivsize = AES_BLOCK_SIZE,
14054 + .maxauthsize = SHA1_DIGEST_SIZE,
14057 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14058 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14059 + OP_ALG_AAI_HMAC_PRECOMP,
14066 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
14067 + .cra_driver_name = "authenc-hmac-sha224-"
14068 + "cbc-aes-caam-qi",
14069 + .cra_blocksize = AES_BLOCK_SIZE,
14071 + .setkey = aead_setkey,
14072 + .setauthsize = aead_setauthsize,
14073 + .encrypt = aead_encrypt,
14074 + .decrypt = aead_decrypt,
14075 + .ivsize = AES_BLOCK_SIZE,
14076 + .maxauthsize = SHA224_DIGEST_SIZE,
14079 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14080 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14081 + OP_ALG_AAI_HMAC_PRECOMP,
14087 + .cra_name = "echainiv(authenc(hmac(sha224),"
14089 + .cra_driver_name = "echainiv-authenc-"
14090 + "hmac-sha224-cbc-aes-caam-qi",
14091 + .cra_blocksize = AES_BLOCK_SIZE,
14093 + .setkey = aead_setkey,
14094 + .setauthsize = aead_setauthsize,
14095 + .encrypt = aead_encrypt,
14096 + .decrypt = aead_decrypt,
14097 + .ivsize = AES_BLOCK_SIZE,
14098 + .maxauthsize = SHA224_DIGEST_SIZE,
14101 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14102 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14103 + OP_ALG_AAI_HMAC_PRECOMP,
14110 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
14111 + .cra_driver_name = "authenc-hmac-sha256-"
14112 + "cbc-aes-caam-qi",
14113 + .cra_blocksize = AES_BLOCK_SIZE,
14115 + .setkey = aead_setkey,
14116 + .setauthsize = aead_setauthsize,
14117 + .encrypt = aead_encrypt,
14118 + .decrypt = aead_decrypt,
14119 + .ivsize = AES_BLOCK_SIZE,
14120 + .maxauthsize = SHA256_DIGEST_SIZE,
14123 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14124 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14125 + OP_ALG_AAI_HMAC_PRECOMP,
14131 + .cra_name = "echainiv(authenc(hmac(sha256),"
14133 + .cra_driver_name = "echainiv-authenc-"
14134 + "hmac-sha256-cbc-aes-"
14136 + .cra_blocksize = AES_BLOCK_SIZE,
14138 + .setkey = aead_setkey,
14139 + .setauthsize = aead_setauthsize,
14140 + .encrypt = aead_encrypt,
14141 + .decrypt = aead_decrypt,
14142 + .ivsize = AES_BLOCK_SIZE,
14143 + .maxauthsize = SHA256_DIGEST_SIZE,
14146 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14147 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14148 + OP_ALG_AAI_HMAC_PRECOMP,
14155 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
14156 + .cra_driver_name = "authenc-hmac-sha384-"
14157 + "cbc-aes-caam-qi",
14158 + .cra_blocksize = AES_BLOCK_SIZE,
14160 + .setkey = aead_setkey,
14161 + .setauthsize = aead_setauthsize,
14162 + .encrypt = aead_encrypt,
14163 + .decrypt = aead_decrypt,
14164 + .ivsize = AES_BLOCK_SIZE,
14165 + .maxauthsize = SHA384_DIGEST_SIZE,
14168 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14169 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14170 + OP_ALG_AAI_HMAC_PRECOMP,
14176 + .cra_name = "echainiv(authenc(hmac(sha384),"
14178 + .cra_driver_name = "echainiv-authenc-"
14179 + "hmac-sha384-cbc-aes-"
14181 + .cra_blocksize = AES_BLOCK_SIZE,
14183 + .setkey = aead_setkey,
14184 + .setauthsize = aead_setauthsize,
14185 + .encrypt = aead_encrypt,
14186 + .decrypt = aead_decrypt,
14187 + .ivsize = AES_BLOCK_SIZE,
14188 + .maxauthsize = SHA384_DIGEST_SIZE,
14191 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14192 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14193 + OP_ALG_AAI_HMAC_PRECOMP,
14200 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
14201 + .cra_driver_name = "authenc-hmac-sha512-"
14202 + "cbc-aes-caam-qi",
14203 + .cra_blocksize = AES_BLOCK_SIZE,
14205 + .setkey = aead_setkey,
14206 + .setauthsize = aead_setauthsize,
14207 + .encrypt = aead_encrypt,
14208 + .decrypt = aead_decrypt,
14209 + .ivsize = AES_BLOCK_SIZE,
14210 + .maxauthsize = SHA512_DIGEST_SIZE,
14213 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14214 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14215 + OP_ALG_AAI_HMAC_PRECOMP,
14221 + .cra_name = "echainiv(authenc(hmac(sha512),"
14223 + .cra_driver_name = "echainiv-authenc-"
14224 + "hmac-sha512-cbc-aes-"
14226 + .cra_blocksize = AES_BLOCK_SIZE,
14228 + .setkey = aead_setkey,
14229 + .setauthsize = aead_setauthsize,
14230 + .encrypt = aead_encrypt,
14231 + .decrypt = aead_decrypt,
14232 + .ivsize = AES_BLOCK_SIZE,
14233 + .maxauthsize = SHA512_DIGEST_SIZE,
14236 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14237 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14238 + OP_ALG_AAI_HMAC_PRECOMP,
14245 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14246 + .cra_driver_name = "authenc-hmac-md5-"
14247 + "cbc-des3_ede-caam-qi",
14248 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14250 + .setkey = aead_setkey,
14251 + .setauthsize = aead_setauthsize,
14252 + .encrypt = aead_encrypt,
14253 + .decrypt = aead_decrypt,
14254 + .ivsize = DES3_EDE_BLOCK_SIZE,
14255 + .maxauthsize = MD5_DIGEST_SIZE,
14258 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14259 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14260 + OP_ALG_AAI_HMAC_PRECOMP,
14266 + .cra_name = "echainiv(authenc(hmac(md5),"
14267 + "cbc(des3_ede)))",
14268 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14269 + "cbc-des3_ede-caam-qi",
14270 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14272 + .setkey = aead_setkey,
14273 + .setauthsize = aead_setauthsize,
14274 + .encrypt = aead_encrypt,
14275 + .decrypt = aead_decrypt,
14276 + .ivsize = DES3_EDE_BLOCK_SIZE,
14277 + .maxauthsize = MD5_DIGEST_SIZE,
14280 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14281 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14282 + OP_ALG_AAI_HMAC_PRECOMP,
14289 + .cra_name = "authenc(hmac(sha1),"
14290 + "cbc(des3_ede))",
14291 + .cra_driver_name = "authenc-hmac-sha1-"
14292 + "cbc-des3_ede-caam-qi",
14293 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14295 + .setkey = aead_setkey,
14296 + .setauthsize = aead_setauthsize,
14297 + .encrypt = aead_encrypt,
14298 + .decrypt = aead_decrypt,
14299 + .ivsize = DES3_EDE_BLOCK_SIZE,
14300 + .maxauthsize = SHA1_DIGEST_SIZE,
14303 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14304 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14305 + OP_ALG_AAI_HMAC_PRECOMP,
14311 + .cra_name = "echainiv(authenc(hmac(sha1),"
14312 + "cbc(des3_ede)))",
14313 + .cra_driver_name = "echainiv-authenc-"
14315 + "cbc-des3_ede-caam-qi",
14316 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14318 + .setkey = aead_setkey,
14319 + .setauthsize = aead_setauthsize,
14320 + .encrypt = aead_encrypt,
14321 + .decrypt = aead_decrypt,
14322 + .ivsize = DES3_EDE_BLOCK_SIZE,
14323 + .maxauthsize = SHA1_DIGEST_SIZE,
14326 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14327 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14328 + OP_ALG_AAI_HMAC_PRECOMP,
14335 + .cra_name = "authenc(hmac(sha224),"
14336 + "cbc(des3_ede))",
14337 + .cra_driver_name = "authenc-hmac-sha224-"
14338 + "cbc-des3_ede-caam-qi",
14339 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14341 + .setkey = aead_setkey,
14342 + .setauthsize = aead_setauthsize,
14343 + .encrypt = aead_encrypt,
14344 + .decrypt = aead_decrypt,
14345 + .ivsize = DES3_EDE_BLOCK_SIZE,
14346 + .maxauthsize = SHA224_DIGEST_SIZE,
14349 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14350 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14351 + OP_ALG_AAI_HMAC_PRECOMP,
14357 + .cra_name = "echainiv(authenc(hmac(sha224),"
14358 + "cbc(des3_ede)))",
14359 + .cra_driver_name = "echainiv-authenc-"
14361 + "cbc-des3_ede-caam-qi",
14362 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14364 + .setkey = aead_setkey,
14365 + .setauthsize = aead_setauthsize,
14366 + .encrypt = aead_encrypt,
14367 + .decrypt = aead_decrypt,
14368 + .ivsize = DES3_EDE_BLOCK_SIZE,
14369 + .maxauthsize = SHA224_DIGEST_SIZE,
14372 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14373 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14374 + OP_ALG_AAI_HMAC_PRECOMP,
14381 + .cra_name = "authenc(hmac(sha256),"
14382 + "cbc(des3_ede))",
14383 + .cra_driver_name = "authenc-hmac-sha256-"
14384 + "cbc-des3_ede-caam-qi",
14385 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14387 + .setkey = aead_setkey,
14388 + .setauthsize = aead_setauthsize,
14389 + .encrypt = aead_encrypt,
14390 + .decrypt = aead_decrypt,
14391 + .ivsize = DES3_EDE_BLOCK_SIZE,
14392 + .maxauthsize = SHA256_DIGEST_SIZE,
14395 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14396 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14397 + OP_ALG_AAI_HMAC_PRECOMP,
14403 + .cra_name = "echainiv(authenc(hmac(sha256),"
14404 + "cbc(des3_ede)))",
14405 + .cra_driver_name = "echainiv-authenc-"
14407 + "cbc-des3_ede-caam-qi",
14408 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14410 + .setkey = aead_setkey,
14411 + .setauthsize = aead_setauthsize,
14412 + .encrypt = aead_encrypt,
14413 + .decrypt = aead_decrypt,
14414 + .ivsize = DES3_EDE_BLOCK_SIZE,
14415 + .maxauthsize = SHA256_DIGEST_SIZE,
14418 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14419 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14420 + OP_ALG_AAI_HMAC_PRECOMP,
14427 + .cra_name = "authenc(hmac(sha384),"
14428 + "cbc(des3_ede))",
14429 + .cra_driver_name = "authenc-hmac-sha384-"
14430 + "cbc-des3_ede-caam-qi",
14431 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14433 + .setkey = aead_setkey,
14434 + .setauthsize = aead_setauthsize,
14435 + .encrypt = aead_encrypt,
14436 + .decrypt = aead_decrypt,
14437 + .ivsize = DES3_EDE_BLOCK_SIZE,
14438 + .maxauthsize = SHA384_DIGEST_SIZE,
14441 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14442 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14443 + OP_ALG_AAI_HMAC_PRECOMP,
14449 + .cra_name = "echainiv(authenc(hmac(sha384),"
14450 + "cbc(des3_ede)))",
14451 + .cra_driver_name = "echainiv-authenc-"
14453 + "cbc-des3_ede-caam-qi",
14454 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14456 + .setkey = aead_setkey,
14457 + .setauthsize = aead_setauthsize,
14458 + .encrypt = aead_encrypt,
14459 + .decrypt = aead_decrypt,
14460 + .ivsize = DES3_EDE_BLOCK_SIZE,
14461 + .maxauthsize = SHA384_DIGEST_SIZE,
14464 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14465 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14466 + OP_ALG_AAI_HMAC_PRECOMP,
14473 + .cra_name = "authenc(hmac(sha512),"
14474 + "cbc(des3_ede))",
14475 + .cra_driver_name = "authenc-hmac-sha512-"
14476 + "cbc-des3_ede-caam-qi",
14477 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14479 + .setkey = aead_setkey,
14480 + .setauthsize = aead_setauthsize,
14481 + .encrypt = aead_encrypt,
14482 + .decrypt = aead_decrypt,
14483 + .ivsize = DES3_EDE_BLOCK_SIZE,
14484 + .maxauthsize = SHA512_DIGEST_SIZE,
14487 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14488 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14489 + OP_ALG_AAI_HMAC_PRECOMP,
14495 + .cra_name = "echainiv(authenc(hmac(sha512),"
14496 + "cbc(des3_ede)))",
14497 + .cra_driver_name = "echainiv-authenc-"
14499 + "cbc-des3_ede-caam-qi",
14500 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14502 + .setkey = aead_setkey,
14503 + .setauthsize = aead_setauthsize,
14504 + .encrypt = aead_encrypt,
14505 + .decrypt = aead_decrypt,
14506 + .ivsize = DES3_EDE_BLOCK_SIZE,
14507 + .maxauthsize = SHA512_DIGEST_SIZE,
14510 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14511 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14512 + OP_ALG_AAI_HMAC_PRECOMP,
14519 + .cra_name = "authenc(hmac(md5),cbc(des))",
14520 + .cra_driver_name = "authenc-hmac-md5-"
14521 + "cbc-des-caam-qi",
14522 + .cra_blocksize = DES_BLOCK_SIZE,
14524 + .setkey = aead_setkey,
14525 + .setauthsize = aead_setauthsize,
14526 + .encrypt = aead_encrypt,
14527 + .decrypt = aead_decrypt,
14528 + .ivsize = DES_BLOCK_SIZE,
14529 + .maxauthsize = MD5_DIGEST_SIZE,
14532 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14533 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14534 + OP_ALG_AAI_HMAC_PRECOMP,
14540 + .cra_name = "echainiv(authenc(hmac(md5),"
14542 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14543 + "cbc-des-caam-qi",
14544 + .cra_blocksize = DES_BLOCK_SIZE,
14546 + .setkey = aead_setkey,
14547 + .setauthsize = aead_setauthsize,
14548 + .encrypt = aead_encrypt,
14549 + .decrypt = aead_decrypt,
14550 + .ivsize = DES_BLOCK_SIZE,
14551 + .maxauthsize = MD5_DIGEST_SIZE,
14554 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14555 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14556 + OP_ALG_AAI_HMAC_PRECOMP,
14563 + .cra_name = "authenc(hmac(sha1),cbc(des))",
14564 + .cra_driver_name = "authenc-hmac-sha1-"
14565 + "cbc-des-caam-qi",
14566 + .cra_blocksize = DES_BLOCK_SIZE,
14568 + .setkey = aead_setkey,
14569 + .setauthsize = aead_setauthsize,
14570 + .encrypt = aead_encrypt,
14571 + .decrypt = aead_decrypt,
14572 + .ivsize = DES_BLOCK_SIZE,
14573 + .maxauthsize = SHA1_DIGEST_SIZE,
14576 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14577 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14578 + OP_ALG_AAI_HMAC_PRECOMP,
14584 + .cra_name = "echainiv(authenc(hmac(sha1),"
14586 + .cra_driver_name = "echainiv-authenc-"
14587 + "hmac-sha1-cbc-des-caam-qi",
14588 + .cra_blocksize = DES_BLOCK_SIZE,
14590 + .setkey = aead_setkey,
14591 + .setauthsize = aead_setauthsize,
14592 + .encrypt = aead_encrypt,
14593 + .decrypt = aead_decrypt,
14594 + .ivsize = DES_BLOCK_SIZE,
14595 + .maxauthsize = SHA1_DIGEST_SIZE,
14598 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14599 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14600 + OP_ALG_AAI_HMAC_PRECOMP,
14607 + .cra_name = "authenc(hmac(sha224),cbc(des))",
14608 + .cra_driver_name = "authenc-hmac-sha224-"
14609 + "cbc-des-caam-qi",
14610 + .cra_blocksize = DES_BLOCK_SIZE,
14612 + .setkey = aead_setkey,
14613 + .setauthsize = aead_setauthsize,
14614 + .encrypt = aead_encrypt,
14615 + .decrypt = aead_decrypt,
14616 + .ivsize = DES_BLOCK_SIZE,
14617 + .maxauthsize = SHA224_DIGEST_SIZE,
14620 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14621 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14622 + OP_ALG_AAI_HMAC_PRECOMP,
14628 + .cra_name = "echainiv(authenc(hmac(sha224),"
14630 + .cra_driver_name = "echainiv-authenc-"
14631 + "hmac-sha224-cbc-des-"
14633 + .cra_blocksize = DES_BLOCK_SIZE,
14635 + .setkey = aead_setkey,
14636 + .setauthsize = aead_setauthsize,
14637 + .encrypt = aead_encrypt,
14638 + .decrypt = aead_decrypt,
14639 + .ivsize = DES_BLOCK_SIZE,
14640 + .maxauthsize = SHA224_DIGEST_SIZE,
14643 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14644 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14645 + OP_ALG_AAI_HMAC_PRECOMP,
14652 + .cra_name = "authenc(hmac(sha256),cbc(des))",
14653 + .cra_driver_name = "authenc-hmac-sha256-"
14654 + "cbc-des-caam-qi",
14655 + .cra_blocksize = DES_BLOCK_SIZE,
14657 + .setkey = aead_setkey,
14658 + .setauthsize = aead_setauthsize,
14659 + .encrypt = aead_encrypt,
14660 + .decrypt = aead_decrypt,
14661 + .ivsize = DES_BLOCK_SIZE,
14662 + .maxauthsize = SHA256_DIGEST_SIZE,
14665 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14666 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14667 + OP_ALG_AAI_HMAC_PRECOMP,
14673 + .cra_name = "echainiv(authenc(hmac(sha256),"
14675 + .cra_driver_name = "echainiv-authenc-"
14676 + "hmac-sha256-cbc-des-"
14678 + .cra_blocksize = DES_BLOCK_SIZE,
14680 + .setkey = aead_setkey,
14681 + .setauthsize = aead_setauthsize,
14682 + .encrypt = aead_encrypt,
14683 + .decrypt = aead_decrypt,
14684 + .ivsize = DES_BLOCK_SIZE,
14685 + .maxauthsize = SHA256_DIGEST_SIZE,
14688 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14689 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14690 + OP_ALG_AAI_HMAC_PRECOMP,
14697 + .cra_name = "authenc(hmac(sha384),cbc(des))",
14698 + .cra_driver_name = "authenc-hmac-sha384-"
14699 + "cbc-des-caam-qi",
14700 + .cra_blocksize = DES_BLOCK_SIZE,
14702 + .setkey = aead_setkey,
14703 + .setauthsize = aead_setauthsize,
14704 + .encrypt = aead_encrypt,
14705 + .decrypt = aead_decrypt,
14706 + .ivsize = DES_BLOCK_SIZE,
14707 + .maxauthsize = SHA384_DIGEST_SIZE,
14710 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14711 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14712 + OP_ALG_AAI_HMAC_PRECOMP,
14718 + .cra_name = "echainiv(authenc(hmac(sha384),"
14720 + .cra_driver_name = "echainiv-authenc-"
14721 + "hmac-sha384-cbc-des-"
14723 + .cra_blocksize = DES_BLOCK_SIZE,
14725 + .setkey = aead_setkey,
14726 + .setauthsize = aead_setauthsize,
14727 + .encrypt = aead_encrypt,
14728 + .decrypt = aead_decrypt,
14729 + .ivsize = DES_BLOCK_SIZE,
14730 + .maxauthsize = SHA384_DIGEST_SIZE,
14733 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14734 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14735 + OP_ALG_AAI_HMAC_PRECOMP,
14742 + .cra_name = "authenc(hmac(sha512),cbc(des))",
14743 + .cra_driver_name = "authenc-hmac-sha512-"
14744 + "cbc-des-caam-qi",
14745 + .cra_blocksize = DES_BLOCK_SIZE,
14747 + .setkey = aead_setkey,
14748 + .setauthsize = aead_setauthsize,
14749 + .encrypt = aead_encrypt,
14750 + .decrypt = aead_decrypt,
14751 + .ivsize = DES_BLOCK_SIZE,
14752 + .maxauthsize = SHA512_DIGEST_SIZE,
14755 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14756 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14757 + OP_ALG_AAI_HMAC_PRECOMP,
14763 + .cra_name = "echainiv(authenc(hmac(sha512),"
14765 + .cra_driver_name = "echainiv-authenc-"
14766 + "hmac-sha512-cbc-des-"
14768 + .cra_blocksize = DES_BLOCK_SIZE,
14770 + .setkey = aead_setkey,
14771 + .setauthsize = aead_setauthsize,
14772 + .encrypt = aead_encrypt,
14773 + .decrypt = aead_decrypt,
14774 + .ivsize = DES_BLOCK_SIZE,
14775 + .maxauthsize = SHA512_DIGEST_SIZE,
14778 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14779 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14780 + OP_ALG_AAI_HMAC_PRECOMP,
14787 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
14788 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
14789 + .cra_blocksize = AES_BLOCK_SIZE,
14791 + .setkey = tls_setkey,
14792 + .setauthsize = tls_setauthsize,
14793 + .encrypt = tls_encrypt,
14794 + .decrypt = tls_decrypt,
14795 + .ivsize = AES_BLOCK_SIZE,
14796 + .maxauthsize = SHA1_DIGEST_SIZE,
14799 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14800 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14801 + OP_ALG_AAI_HMAC_PRECOMP,
14806 +struct caam_crypto_alg {
14807 + struct list_head entry;
14808 + struct crypto_alg crypto_alg;
14809 + struct caam_alg_entry caam;
14812 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
14814 + struct caam_drv_private *priv;
14815 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
14816 + static const u8 digest_size[] = {
14818 + SHA1_DIGEST_SIZE,
14819 + SHA224_DIGEST_SIZE,
14820 + SHA256_DIGEST_SIZE,
14821 + SHA384_DIGEST_SIZE,
14822 + SHA512_DIGEST_SIZE
14827 + * distribute tfms across job rings to ensure in-order
14828 + * crypto request processing per tfm
14830 + ctx->jrdev = caam_jr_alloc();
14831 + if (IS_ERR(ctx->jrdev)) {
14832 + pr_err("Job Ring Device allocation for transform failed\n");
14833 + return PTR_ERR(ctx->jrdev);
14836 + ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
14838 + if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
14839 + dev_err(ctx->jrdev, "unable to map key\n");
14840 + caam_jr_free(ctx->jrdev);
14844 + /* copy descriptor header template value */
14845 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
14846 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
14848 + if (ctx->adata.algtype) {
14849 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
14850 + >> OP_ALG_ALGSEL_SHIFT;
14851 + if (op_id < ARRAY_SIZE(digest_size)) {
14852 + ctx->authsize = digest_size[op_id];
14854 + dev_err(ctx->jrdev,
14855 + "incorrect op_id %d; must be less than %zu\n",
14856 + op_id, ARRAY_SIZE(digest_size));
14857 + caam_jr_free(ctx->jrdev);
14861 + ctx->authsize = 0;
14864 + priv = dev_get_drvdata(ctx->jrdev->parent);
14865 + ctx->qidev = priv->qidev;
14867 + spin_lock_init(&ctx->lock);
14868 + ctx->drv_ctx[ENCRYPT] = NULL;
14869 + ctx->drv_ctx[DECRYPT] = NULL;
14870 + ctx->drv_ctx[GIVENCRYPT] = NULL;
14875 +static int caam_cra_init(struct crypto_tfm *tfm)
14877 + struct crypto_alg *alg = tfm->__crt_alg;
14878 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14880 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
14882 + return caam_init_common(ctx, &caam_alg->caam);
14885 +static int caam_aead_init(struct crypto_aead *tfm)
14887 + struct aead_alg *alg = crypto_aead_alg(tfm);
14888 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
14890 + struct caam_ctx *ctx = crypto_aead_ctx(tfm);
14892 + return caam_init_common(ctx, &caam_alg->caam);
14895 +static void caam_exit_common(struct caam_ctx *ctx)
14897 + caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
14898 + caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
14899 + caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
14901 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
14904 + caam_jr_free(ctx->jrdev);
14907 +static void caam_cra_exit(struct crypto_tfm *tfm)
14909 + caam_exit_common(crypto_tfm_ctx(tfm));
14912 +static void caam_aead_exit(struct crypto_aead *tfm)
14914 + caam_exit_common(crypto_aead_ctx(tfm));
14917 +static struct list_head alg_list;
14918 +static void __exit caam_qi_algapi_exit(void)
14920 + struct caam_crypto_alg *t_alg, *n;
14923 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
14924 + struct caam_aead_alg *t_alg = driver_aeads + i;
14926 + if (t_alg->registered)
14927 + crypto_unregister_aead(&t_alg->aead);
14930 + if (!alg_list.next)
14933 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
14934 + crypto_unregister_alg(&t_alg->crypto_alg);
14935 + list_del(&t_alg->entry);
14940 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
14943 + struct caam_crypto_alg *t_alg;
14944 + struct crypto_alg *alg;
14946 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
14948 + return ERR_PTR(-ENOMEM);
14950 + alg = &t_alg->crypto_alg;
14952 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
14953 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
14954 + template->driver_name);
14955 + alg->cra_module = THIS_MODULE;
14956 + alg->cra_init = caam_cra_init;
14957 + alg->cra_exit = caam_cra_exit;
14958 + alg->cra_priority = CAAM_CRA_PRIORITY;
14959 + alg->cra_blocksize = template->blocksize;
14960 + alg->cra_alignmask = 0;
14961 + alg->cra_ctxsize = sizeof(struct caam_ctx);
14962 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
14964 + switch (template->type) {
14965 + case CRYPTO_ALG_TYPE_GIVCIPHER:
14966 + alg->cra_type = &crypto_givcipher_type;
14967 + alg->cra_ablkcipher = template->template_ablkcipher;
14969 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
14970 + alg->cra_type = &crypto_ablkcipher_type;
14971 + alg->cra_ablkcipher = template->template_ablkcipher;
14975 + t_alg->caam.class1_alg_type = template->class1_alg_type;
14976 + t_alg->caam.class2_alg_type = template->class2_alg_type;
14981 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
14983 + struct aead_alg *alg = &t_alg->aead;
14985 + alg->base.cra_module = THIS_MODULE;
14986 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
14987 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
14988 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
14990 + alg->init = caam_aead_init;
14991 + alg->exit = caam_aead_exit;
14994 +static int __init caam_qi_algapi_init(void)
14996 + struct device_node *dev_node;
14997 + struct platform_device *pdev;
14998 + struct device *ctrldev;
14999 + struct caam_drv_private *priv;
15000 + int i = 0, err = 0;
15001 + u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15002 + unsigned int md_limit = SHA512_DIGEST_SIZE;
15003 + bool registered = false;
15005 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15007 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15012 + pdev = of_find_device_by_node(dev_node);
15013 + of_node_put(dev_node);
15017 + ctrldev = &pdev->dev;
15018 + priv = dev_get_drvdata(ctrldev);
15021 + * If priv is NULL, it's probably because the caam driver wasn't
15022 + * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15024 + if (!priv || !priv->qi_present)
15027 + if (caam_dpaa2) {
15028 + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15032 + INIT_LIST_HEAD(&alg_list);
15035 + * Register crypto algorithms the device supports.
15036 + * First, detect presence and attributes of DES, AES, and MD blocks.
15038 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15039 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15040 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15041 + aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15042 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15044 + /* If MD is present, limit digest size based on LP256 */
15045 + if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15046 + md_limit = SHA256_DIGEST_SIZE;
15048 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15049 + struct caam_crypto_alg *t_alg;
15050 + struct caam_alg_template *alg = driver_algs + i;
15051 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15053 + /* Skip DES algorithms if not supported by device */
15055 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15056 + (alg_sel == OP_ALG_ALGSEL_DES)))
15059 + /* Skip AES algorithms if not supported by device */
15060 + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15063 + t_alg = caam_alg_alloc(alg);
15064 + if (IS_ERR(t_alg)) {
15065 + err = PTR_ERR(t_alg);
15066 + dev_warn(priv->qidev, "%s alg allocation failed\n",
15067 + alg->driver_name);
15071 + err = crypto_register_alg(&t_alg->crypto_alg);
15073 + dev_warn(priv->qidev, "%s alg registration failed\n",
15074 + t_alg->crypto_alg.cra_driver_name);
15079 + list_add_tail(&t_alg->entry, &alg_list);
15080 + registered = true;
15083 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15084 + struct caam_aead_alg *t_alg = driver_aeads + i;
15085 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15086 + OP_ALG_ALGSEL_MASK;
15087 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15088 + OP_ALG_ALGSEL_MASK;
15089 + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15091 + /* Skip DES algorithms if not supported by device */
15093 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15094 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15097 + /* Skip AES algorithms if not supported by device */
15098 + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15102 + * Check support for AES algorithms not available
15105 + if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15106 + (alg_aai == OP_ALG_AAI_GCM))
15110 + * Skip algorithms requiring message digests
15111 + * if MD or MD size is not supported by device.
15113 + if (c2_alg_sel &&
15114 + (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15117 + caam_aead_alg_init(t_alg);
15119 + err = crypto_register_aead(&t_alg->aead);
15121 + pr_warn("%s alg registration failed\n",
15122 + t_alg->aead.base.cra_driver_name);
15126 + t_alg->registered = true;
15127 + registered = true;
15131 + dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15136 +module_init(caam_qi_algapi_init);
15137 +module_exit(caam_qi_algapi_exit);
15139 +MODULE_LICENSE("GPL");
15140 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15141 +MODULE_AUTHOR("Freescale Semiconductor");
15143 +++ b/drivers/crypto/caam/caamalg_qi2.c
15146 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15147 + * Copyright 2017 NXP
15149 + * Redistribution and use in source and binary forms, with or without
15150 + * modification, are permitted provided that the following conditions are met:
15151 + * * Redistributions of source code must retain the above copyright
15152 + * notice, this list of conditions and the following disclaimer.
15153 + * * Redistributions in binary form must reproduce the above copyright
15154 + * notice, this list of conditions and the following disclaimer in the
15155 + * documentation and/or other materials provided with the distribution.
15156 + * * Neither the names of the above-listed copyright holders nor the
15157 + * names of any contributors may be used to endorse or promote products
15158 + * derived from this software without specific prior written permission.
15161 + * ALTERNATIVELY, this software may be distributed under the terms of the
15162 + * GNU General Public License ("GPL") as published by the Free Software
15163 + * Foundation, either version 2 of that License or (at your option) any
15166 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15167 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15168 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15169 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15170 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15171 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15172 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15173 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15174 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15175 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15176 + * POSSIBILITY OF SUCH DAMAGE.
15179 +#include "compat.h"
15181 +#include "caamalg_qi2.h"
15182 +#include "dpseci_cmd.h"
15183 +#include "desc_constr.h"
15184 +#include "error.h"
15185 +#include "sg_sw_sec4.h"
15186 +#include "sg_sw_qm2.h"
15187 +#include "key_gen.h"
15188 +#include "caamalg_desc.h"
15189 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
15190 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15191 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15193 +#define CAAM_CRA_PRIORITY 2000
15195 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15196 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15197 + SHA512_DIGEST_SIZE * 2)
15199 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15200 +bool caam_little_end;
15201 +EXPORT_SYMBOL(caam_little_end);
15203 +EXPORT_SYMBOL(caam_imx);
15207 + * This is a a cache of buffers, from which the users of CAAM QI driver
15208 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15209 + * NOTE: A more elegant solution would be to have some headroom in the frames
15210 + * being processed. This can be added by the dpaa2-eth driver. This would
15211 + * pose a problem for userspace application processing which cannot
15212 + * know of this limitation. So for now, this will work.
15213 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15215 +static struct kmem_cache *qi_cache;
15217 +struct caam_alg_entry {
15218 + struct device *dev;
15219 + int class1_alg_type;
15220 + int class2_alg_type;
15225 +struct caam_aead_alg {
15226 + struct aead_alg aead;
15227 + struct caam_alg_entry caam;
15232 + * caam_ctx - per-session context
15233 + * @flc: Flow Contexts array
15234 + * @key: virtual address of the key(s): [authentication key], encryption key
15235 + * @key_dma: I/O virtual address of the key
15236 + * @dev: dpseci device
15237 + * @adata: authentication algorithm details
15238 + * @cdata: encryption algorithm details
15239 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15242 + struct caam_flc flc[NUM_OP];
15243 + u8 key[CAAM_MAX_KEY_SIZE];
15244 + dma_addr_t key_dma;
15245 + struct device *dev;
15246 + struct alginfo adata;
15247 + struct alginfo cdata;
15248 + unsigned int authsize;
15251 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15252 + dma_addr_t iova_addr)
15254 + phys_addr_t phys_addr;
15256 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15259 + return phys_to_virt(phys_addr);
15263 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
15265 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
15266 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15267 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15268 + * hosting 16 SG entries.
15270 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15272 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15274 +static inline void *qi_cache_zalloc(gfp_t flags)
15276 + return kmem_cache_zalloc(qi_cache, flags);
15280 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15282 + * @obj - buffer previously allocated by qi_cache_zalloc
15284 + * No checking is being done, the call is a passthrough call to
15285 + * kmem_cache_free(...)
15287 +static inline void qi_cache_free(void *obj)
15289 + kmem_cache_free(qi_cache, obj);
15292 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15294 + switch (crypto_tfm_alg_type(areq->tfm)) {
15295 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15296 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15297 + return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15298 + case CRYPTO_ALG_TYPE_AEAD:
15299 + return aead_request_ctx(container_of(areq, struct aead_request,
15302 + return ERR_PTR(-EINVAL);
15306 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15307 + struct scatterlist *dst, int src_nents,
15308 + int dst_nents, dma_addr_t iv_dma, int ivsize,
15309 + enum optype op_type, dma_addr_t qm_sg_dma,
15312 + if (dst != src) {
15314 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15315 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15317 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15321 + dma_unmap_single(dev, iv_dma, ivsize,
15322 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15326 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15329 +static int aead_set_sh_desc(struct crypto_aead *aead)
15331 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15332 + typeof(*alg), aead);
15333 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15334 + unsigned int ivsize = crypto_aead_ivsize(aead);
15335 + struct device *dev = ctx->dev;
15336 + struct caam_flc *flc;
15338 + u32 ctx1_iv_off = 0;
15339 + u32 *nonce = NULL;
15340 + unsigned int data_len[2];
15342 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15343 + OP_ALG_AAI_CTR_MOD128);
15344 + const bool is_rfc3686 = alg->caam.rfc3686;
15346 + if (!ctx->cdata.keylen || !ctx->authsize)
15350 + * AES-CTR needs to load IV in CONTEXT1 reg
15351 + * at an offset of 128bits (16bytes)
15352 + * CONTEXT1[255:128] = IV
15355 + ctx1_iv_off = 16;
15358 + * RFC3686 specific:
15359 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15361 + if (is_rfc3686) {
15362 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15363 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15364 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15367 + data_len[0] = ctx->adata.keylen_pad;
15368 + data_len[1] = ctx->cdata.keylen;
15370 + /* aead_encrypt shared descriptor */
15371 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15372 + DESC_QI_AEAD_ENC_LEN) +
15373 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15374 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15375 + ARRAY_SIZE(data_len)) < 0)
15378 + if (inl_mask & 1)
15379 + ctx->adata.key_virt = ctx->key;
15381 + ctx->adata.key_dma = ctx->key_dma;
15383 + if (inl_mask & 2)
15384 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15386 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15388 + ctx->adata.key_inline = !!(inl_mask & 1);
15389 + ctx->cdata.key_inline = !!(inl_mask & 2);
15391 + flc = &ctx->flc[ENCRYPT];
15392 + desc = flc->sh_desc;
15394 + if (alg->caam.geniv)
15395 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
15396 + ivsize, ctx->authsize, is_rfc3686,
15397 + nonce, ctx1_iv_off, true);
15399 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
15400 + ivsize, ctx->authsize, is_rfc3686, nonce,
15401 + ctx1_iv_off, true);
15403 + flc->flc[1] = desc_len(desc); /* SDL */
15404 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15405 + desc_bytes(desc), DMA_TO_DEVICE);
15406 + if (dma_mapping_error(dev, flc->flc_dma)) {
15407 + dev_err(dev, "unable to map shared descriptor\n");
15411 + /* aead_decrypt shared descriptor */
15412 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
15413 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15414 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15415 + ARRAY_SIZE(data_len)) < 0)
15418 + if (inl_mask & 1)
15419 + ctx->adata.key_virt = ctx->key;
15421 + ctx->adata.key_dma = ctx->key_dma;
15423 + if (inl_mask & 2)
15424 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15426 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15428 + ctx->adata.key_inline = !!(inl_mask & 1);
15429 + ctx->cdata.key_inline = !!(inl_mask & 2);
15431 + flc = &ctx->flc[DECRYPT];
15432 + desc = flc->sh_desc;
15434 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
15435 + ivsize, ctx->authsize, alg->caam.geniv,
15436 + is_rfc3686, nonce, ctx1_iv_off, true);
15438 + flc->flc[1] = desc_len(desc); /* SDL */
15439 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15440 + desc_bytes(desc), DMA_TO_DEVICE);
15441 + if (dma_mapping_error(dev, flc->flc_dma)) {
15442 + dev_err(dev, "unable to map shared descriptor\n");
15449 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
15451 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
15453 + ctx->authsize = authsize;
15454 + aead_set_sh_desc(authenc);
15459 +struct split_key_sh_result {
15460 + struct completion completion;
15462 + struct device *dev;
15465 +static void split_key_sh_done(void *cbk_ctx, u32 err)
15467 + struct split_key_sh_result *res = cbk_ctx;
15470 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
15474 + caam_qi2_strstatus(res->dev, err);
15477 + complete(&res->completion);
15480 +static int gen_split_key_sh(struct device *dev, u8 *key_out,
15481 + struct alginfo * const adata, const u8 *key_in,
15484 + struct caam_request *req_ctx;
15486 + struct split_key_sh_result result;
15487 + dma_addr_t dma_addr_in, dma_addr_out;
15488 + struct caam_flc *flc;
15489 + struct dpaa2_fl_entry *in_fle, *out_fle;
15490 + int ret = -ENOMEM;
15492 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
15496 + in_fle = &req_ctx->fd_flt[1];
15497 + out_fle = &req_ctx->fd_flt[0];
15499 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
15503 + dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
15505 + if (dma_mapping_error(dev, dma_addr_in)) {
15506 + dev_err(dev, "unable to map key input memory\n");
15507 + goto err_dma_addr_in;
15510 + dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
15511 + DMA_FROM_DEVICE);
15512 + if (dma_mapping_error(dev, dma_addr_out)) {
15513 + dev_err(dev, "unable to map key output memory\n");
15514 + goto err_dma_addr_out;
15517 + desc = flc->sh_desc;
15519 + init_sh_desc(desc, 0);
15520 + append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
15522 + /* Sets MDHA up into an HMAC-INIT */
15523 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
15524 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
15528 + * do a FIFO_LOAD of zero, this will trigger the internal key expansion
15529 + * into both pads inside MDHA
15531 + append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
15532 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
15535 + * FIFO_STORE with the explicit split-key content store
15536 + * (0x26 output type)
15538 + append_fifo_store(desc, dma_addr_out, adata->keylen,
15539 + LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
15541 + flc->flc[1] = desc_len(desc); /* SDL */
15542 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
15543 + desc_bytes(desc), DMA_TO_DEVICE);
15544 + if (dma_mapping_error(dev, flc->flc_dma)) {
15545 + dev_err(dev, "unable to map shared descriptor\n");
15546 + goto err_flc_dma;
15549 + dpaa2_fl_set_final(in_fle, true);
15550 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
15551 + dpaa2_fl_set_addr(in_fle, dma_addr_in);
15552 + dpaa2_fl_set_len(in_fle, keylen);
15553 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15554 + dpaa2_fl_set_addr(out_fle, dma_addr_out);
15555 + dpaa2_fl_set_len(out_fle, adata->keylen_pad);
15558 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15559 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
15560 + print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
15561 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
15565 + init_completion(&result.completion);
15566 + result.dev = dev;
15568 + req_ctx->flc = flc;
15569 + req_ctx->cbk = split_key_sh_done;
15570 + req_ctx->ctx = &result;
15572 + ret = dpaa2_caam_enqueue(dev, req_ctx);
15573 + if (ret == -EINPROGRESS) {
15574 + /* in progress */
15575 + wait_for_completion(&result.completion);
15576 + ret = result.err;
15578 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15579 + DUMP_PREFIX_ADDRESS, 16, 4, key_out,
15580 + adata->keylen_pad, 1);
15584 + dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
15587 + dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
15589 + dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
15597 +static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
15600 + return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
15604 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
15605 + unsigned int keylen)
15607 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15608 + struct device *dev = ctx->dev;
15609 + struct crypto_authenc_keys keys;
15612 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
15616 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
15617 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
15618 + keys.authkeylen);
15619 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
15620 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
15623 + ctx->adata.keylen = split_key_len(ctx->adata.algtype &
15624 + OP_ALG_ALGSEL_MASK);
15625 + ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
15626 + OP_ALG_ALGSEL_MASK);
15629 + dev_err(dev, "split keylen %d split keylen padded %d\n",
15630 + ctx->adata.keylen, ctx->adata.keylen_pad);
15631 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15632 + DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
15635 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
15638 + ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
15642 + /* postpend encryption key to auth split key */
15643 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
15645 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
15646 + keys.enckeylen, DMA_TO_DEVICE);
15647 + if (dma_mapping_error(dev, ctx->key_dma)) {
15648 + dev_err(dev, "unable to map key i/o memory\n");
15652 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15653 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
15654 + ctx->adata.keylen_pad + keys.enckeylen, 1);
15657 + ctx->cdata.keylen = keys.enckeylen;
15659 + ret = aead_set_sh_desc(aead);
15661 + dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
15662 + keys.enckeylen, DMA_TO_DEVICE);
15666 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
15670 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
15673 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
15674 + struct caam_request *req_ctx = aead_request_ctx(req);
15675 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15676 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15677 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15678 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15679 + typeof(*alg), aead);
15680 + struct device *dev = ctx->dev;
15681 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
15682 + GFP_KERNEL : GFP_ATOMIC;
15683 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15684 + struct aead_edesc *edesc;
15685 + dma_addr_t qm_sg_dma, iv_dma = 0;
15687 + unsigned int authsize = ctx->authsize;
15688 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
15689 + int in_len, out_len;
15690 + struct dpaa2_sg_entry *sg_table;
15691 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15693 + /* allocate space for base edesc and link tables */
15694 + edesc = qi_cache_zalloc(GFP_DMA | flags);
15695 + if (unlikely(!edesc)) {
15696 + dev_err(dev, "could not allocate extended descriptor\n");
15697 + return ERR_PTR(-ENOMEM);
15700 + if (unlikely(req->dst != req->src)) {
15701 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15703 + if (unlikely(src_nents < 0)) {
15704 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15705 + req->assoclen + req->cryptlen);
15706 + qi_cache_free(edesc);
15707 + return ERR_PTR(src_nents);
15710 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
15712 + (encrypt ? authsize :
15714 + if (unlikely(dst_nents < 0)) {
15715 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15716 + req->assoclen + req->cryptlen +
15717 + (encrypt ? authsize : (-authsize)));
15718 + qi_cache_free(edesc);
15719 + return ERR_PTR(dst_nents);
15723 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15725 + if (unlikely(!mapped_src_nents)) {
15726 + dev_err(dev, "unable to map source\n");
15727 + qi_cache_free(edesc);
15728 + return ERR_PTR(-ENOMEM);
15731 + mapped_src_nents = 0;
15734 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
15735 + DMA_FROM_DEVICE);
15736 + if (unlikely(!mapped_dst_nents)) {
15737 + dev_err(dev, "unable to map destination\n");
15738 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15739 + qi_cache_free(edesc);
15740 + return ERR_PTR(-ENOMEM);
15743 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15745 + (encrypt ? authsize : 0));
15746 + if (unlikely(src_nents < 0)) {
15747 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15748 + req->assoclen + req->cryptlen +
15749 + (encrypt ? authsize : 0));
15750 + qi_cache_free(edesc);
15751 + return ERR_PTR(src_nents);
15754 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15755 + DMA_BIDIRECTIONAL);
15756 + if (unlikely(!mapped_src_nents)) {
15757 + dev_err(dev, "unable to map source\n");
15758 + qi_cache_free(edesc);
15759 + return ERR_PTR(-ENOMEM);
15763 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
15764 + ivsize = crypto_aead_ivsize(aead);
15765 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15766 + if (dma_mapping_error(dev, iv_dma)) {
15767 + dev_err(dev, "unable to map IV\n");
15768 + caam_unmap(dev, req->src, req->dst, src_nents,
15769 + dst_nents, 0, 0, op_type, 0, 0);
15770 + qi_cache_free(edesc);
15771 + return ERR_PTR(-ENOMEM);
15776 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
15777 + * Input is not contiguous.
15779 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
15780 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15781 + if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
15782 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
15783 + qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
15784 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15785 + iv_dma, ivsize, op_type, 0, 0);
15786 + qi_cache_free(edesc);
15787 + return ERR_PTR(-ENOMEM);
15789 + sg_table = &edesc->sgt[0];
15790 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
15792 + edesc->src_nents = src_nents;
15793 + edesc->dst_nents = dst_nents;
15794 + edesc->iv_dma = iv_dma;
15796 + edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
15798 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
15799 + dev_err(dev, "unable to map assoclen\n");
15800 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15801 + iv_dma, ivsize, op_type, 0, 0);
15802 + qi_cache_free(edesc);
15803 + return ERR_PTR(-ENOMEM);
15806 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
15809 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
15812 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15813 + qm_sg_index += mapped_src_nents;
15815 + if (mapped_dst_nents > 1)
15816 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
15819 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
15820 + if (dma_mapping_error(dev, qm_sg_dma)) {
15821 + dev_err(dev, "unable to map S/G table\n");
15822 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
15823 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15824 + iv_dma, ivsize, op_type, 0, 0);
15825 + qi_cache_free(edesc);
15826 + return ERR_PTR(-ENOMEM);
15829 + edesc->qm_sg_dma = qm_sg_dma;
15830 + edesc->qm_sg_bytes = qm_sg_bytes;
15832 + out_len = req->assoclen + req->cryptlen +
15833 + (encrypt ? ctx->authsize : (-ctx->authsize));
15834 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
15836 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
15837 + dpaa2_fl_set_final(in_fle, true);
15838 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
15839 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
15840 + dpaa2_fl_set_len(in_fle, in_len);
15842 + if (req->dst == req->src) {
15843 + if (mapped_src_nents == 1) {
15844 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15845 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
15847 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15848 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
15849 + (1 + !!ivsize) * sizeof(*sg_table));
15851 + } else if (mapped_dst_nents == 1) {
15852 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15853 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
15855 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15856 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
15857 + sizeof(*sg_table));
15860 + dpaa2_fl_set_len(out_fle, out_len);
15865 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
15868 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
15869 + unsigned int blocksize = crypto_aead_blocksize(tls);
15870 + unsigned int padsize, authsize;
15871 + struct caam_request *req_ctx = aead_request_ctx(req);
15872 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15873 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15874 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
15875 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
15876 + typeof(*alg), aead);
15877 + struct device *dev = ctx->dev;
15878 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
15879 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15880 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15881 + struct tls_edesc *edesc;
15882 + dma_addr_t qm_sg_dma, iv_dma = 0;
15884 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
15885 + int in_len, out_len;
15886 + struct dpaa2_sg_entry *sg_table;
15887 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15888 + struct scatterlist *dst;
15891 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
15893 + authsize = ctx->authsize + padsize;
15895 + authsize = ctx->authsize;
15898 + /* allocate space for base edesc and link tables */
15899 + edesc = qi_cache_zalloc(GFP_DMA | flags);
15900 + if (unlikely(!edesc)) {
15901 + dev_err(dev, "could not allocate extended descriptor\n");
15902 + return ERR_PTR(-ENOMEM);
15905 + if (likely(req->src == req->dst)) {
15906 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15908 + (encrypt ? authsize : 0));
15909 + if (unlikely(src_nents < 0)) {
15910 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15911 + req->assoclen + req->cryptlen +
15912 + (encrypt ? authsize : 0));
15913 + qi_cache_free(edesc);
15914 + return ERR_PTR(src_nents);
15917 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15918 + DMA_BIDIRECTIONAL);
15919 + if (unlikely(!mapped_src_nents)) {
15920 + dev_err(dev, "unable to map source\n");
15921 + qi_cache_free(edesc);
15922 + return ERR_PTR(-ENOMEM);
15926 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15928 + if (unlikely(src_nents < 0)) {
15929 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15930 + req->assoclen + req->cryptlen);
15931 + qi_cache_free(edesc);
15932 + return ERR_PTR(src_nents);
15935 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
15936 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
15937 + (encrypt ? authsize : 0));
15938 + if (unlikely(dst_nents < 0)) {
15939 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15941 + (encrypt ? authsize : 0));
15942 + qi_cache_free(edesc);
15943 + return ERR_PTR(dst_nents);
15947 + mapped_src_nents = dma_map_sg(dev, req->src,
15948 + src_nents, DMA_TO_DEVICE);
15949 + if (unlikely(!mapped_src_nents)) {
15950 + dev_err(dev, "unable to map source\n");
15951 + qi_cache_free(edesc);
15952 + return ERR_PTR(-ENOMEM);
15955 + mapped_src_nents = 0;
15958 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
15959 + DMA_FROM_DEVICE);
15960 + if (unlikely(!mapped_dst_nents)) {
15961 + dev_err(dev, "unable to map destination\n");
15962 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15963 + qi_cache_free(edesc);
15964 + return ERR_PTR(-ENOMEM);
15968 + ivsize = crypto_aead_ivsize(tls);
15969 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15970 + if (dma_mapping_error(dev, iv_dma)) {
15971 + dev_err(dev, "unable to map IV\n");
15972 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
15974 + qi_cache_free(edesc);
15975 + return ERR_PTR(-ENOMEM);
15979 + * Create S/G table: IV, src, dst.
15980 + * Input is not contiguous.
15982 + qm_sg_ents = 1 + mapped_src_nents +
15983 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15984 + sg_table = &edesc->sgt[0];
15985 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
15987 + edesc->src_nents = src_nents;
15988 + edesc->dst_nents = dst_nents;
15989 + edesc->dst = dst;
15990 + edesc->iv_dma = iv_dma;
15992 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
15995 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15996 + qm_sg_index += mapped_src_nents;
15998 + if (mapped_dst_nents > 1)
15999 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
16002 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16003 + if (dma_mapping_error(dev, qm_sg_dma)) {
16004 + dev_err(dev, "unable to map S/G table\n");
16005 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16006 + ivsize, op_type, 0, 0);
16007 + qi_cache_free(edesc);
16008 + return ERR_PTR(-ENOMEM);
16011 + edesc->qm_sg_dma = qm_sg_dma;
16012 + edesc->qm_sg_bytes = qm_sg_bytes;
16014 + out_len = req->cryptlen + (encrypt ? authsize : 0);
16015 + in_len = ivsize + req->assoclen + req->cryptlen;
16017 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16018 + dpaa2_fl_set_final(in_fle, true);
16019 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16020 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16021 + dpaa2_fl_set_len(in_fle, in_len);
16023 + if (req->dst == req->src) {
16024 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16025 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16026 + (sg_nents_for_len(req->src, req->assoclen) +
16027 + 1) * sizeof(*sg_table));
16028 + } else if (mapped_dst_nents == 1) {
16029 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16030 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16032 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16033 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16034 + sizeof(*sg_table));
16037 + dpaa2_fl_set_len(out_fle, out_len);
16042 +static int tls_set_sh_desc(struct crypto_aead *tls)
16044 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16045 + unsigned int ivsize = crypto_aead_ivsize(tls);
16046 + unsigned int blocksize = crypto_aead_blocksize(tls);
16047 + struct device *dev = ctx->dev;
16048 + struct caam_flc *flc;
16050 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
16051 + unsigned int data_len[2];
16054 + if (!ctx->cdata.keylen || !ctx->authsize)
16058 + * TLS 1.0 encrypt shared descriptor
16059 + * Job Descriptor and Shared Descriptor
16060 + * must fit into the 64-word Descriptor h/w Buffer
16062 + data_len[0] = ctx->adata.keylen_pad;
16063 + data_len[1] = ctx->cdata.keylen;
16065 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16066 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
16069 + if (inl_mask & 1)
16070 + ctx->adata.key_virt = ctx->key;
16072 + ctx->adata.key_dma = ctx->key_dma;
16074 + if (inl_mask & 2)
16075 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16077 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16079 + ctx->adata.key_inline = !!(inl_mask & 1);
16080 + ctx->cdata.key_inline = !!(inl_mask & 2);
16082 + flc = &ctx->flc[ENCRYPT];
16083 + desc = flc->sh_desc;
16085 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16086 + assoclen, ivsize, ctx->authsize, blocksize);
16088 + flc->flc[1] = desc_len(desc);
16089 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16090 + desc_bytes(desc), DMA_TO_DEVICE);
16092 + if (dma_mapping_error(dev, flc->flc_dma)) {
16093 + dev_err(dev, "unable to map shared descriptor\n");
16098 + * TLS 1.0 decrypt shared descriptor
16099 + * Keys do not fit inline, regardless of algorithms used
16101 + ctx->adata.key_dma = ctx->key_dma;
16102 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16104 + flc = &ctx->flc[DECRYPT];
16105 + desc = flc->sh_desc;
16107 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16108 + ctx->authsize, blocksize);
16110 + flc->flc[1] = desc_len(desc); /* SDL */
16111 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16112 + desc_bytes(desc), DMA_TO_DEVICE);
16113 + if (dma_mapping_error(dev, flc->flc_dma)) {
16114 + dev_err(dev, "unable to map shared descriptor\n");
16121 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16122 + unsigned int keylen)
16124 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16125 + struct device *dev = ctx->dev;
16126 + struct crypto_authenc_keys keys;
16129 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16133 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16134 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
16135 + keys.authkeylen);
16136 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16137 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16140 + ctx->adata.keylen = split_key_len(ctx->adata.algtype &
16141 + OP_ALG_ALGSEL_MASK);
16142 + ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
16143 + OP_ALG_ALGSEL_MASK);
16146 + dev_err(dev, "split keylen %d split keylen padded %d\n",
16147 + ctx->adata.keylen, ctx->adata.keylen_pad);
16148 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16149 + DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
16150 + keys.authkeylen + keys.enckeylen, 1);
16153 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16156 + ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
16160 + /* postpend encryption key to auth split key */
16161 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16163 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
16164 + keys.enckeylen, DMA_TO_DEVICE);
16165 + if (dma_mapping_error(dev, ctx->key_dma)) {
16166 + dev_err(dev, "unable to map key i/o memory\n");
16170 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16171 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16172 + ctx->adata.keylen_pad + keys.enckeylen, 1);
16175 + ctx->cdata.keylen = keys.enckeylen;
16177 + ret = tls_set_sh_desc(tls);
16179 + dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
16180 + keys.enckeylen, DMA_TO_DEVICE);
16184 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16188 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16190 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16192 + ctx->authsize = authsize;
16193 + tls_set_sh_desc(tls);
16198 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16200 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16201 + struct device *dev = ctx->dev;
16202 + unsigned int ivsize = crypto_aead_ivsize(aead);
16203 + struct caam_flc *flc;
16205 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16206 + ctx->cdata.keylen;
16208 + if (!ctx->cdata.keylen || !ctx->authsize)
16212 + * AES GCM encrypt shared descriptor
16213 + * Job Descriptor and Shared Descriptor
16214 + * must fit into the 64-word Descriptor h/w Buffer
16216 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16217 + ctx->cdata.key_inline = true;
16218 + ctx->cdata.key_virt = ctx->key;
16220 + ctx->cdata.key_inline = false;
16221 + ctx->cdata.key_dma = ctx->key_dma;
16224 + flc = &ctx->flc[ENCRYPT];
16225 + desc = flc->sh_desc;
16226 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16228 + flc->flc[1] = desc_len(desc); /* SDL */
16229 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16230 + desc_bytes(desc), DMA_TO_DEVICE);
16231 + if (dma_mapping_error(dev, flc->flc_dma)) {
16232 + dev_err(dev, "unable to map shared descriptor\n");
16237 + * Job Descriptor and Shared Descriptors
16238 + * must all fit into the 64-word Descriptor h/w Buffer
16240 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16241 + ctx->cdata.key_inline = true;
16242 + ctx->cdata.key_virt = ctx->key;
16244 + ctx->cdata.key_inline = false;
16245 + ctx->cdata.key_dma = ctx->key_dma;
16248 + flc = &ctx->flc[DECRYPT];
16249 + desc = flc->sh_desc;
16250 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16252 + flc->flc[1] = desc_len(desc); /* SDL */
16253 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16254 + desc_bytes(desc), DMA_TO_DEVICE);
16255 + if (dma_mapping_error(dev, flc->flc_dma)) {
16256 + dev_err(dev, "unable to map shared descriptor\n");
16263 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16265 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16267 + ctx->authsize = authsize;
16268 + gcm_set_sh_desc(authenc);
16273 +static int gcm_setkey(struct crypto_aead *aead,
16274 + const u8 *key, unsigned int keylen)
16276 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16277 + struct device *dev = ctx->dev;
16281 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16282 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16285 + memcpy(ctx->key, key, keylen);
16286 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16287 + if (dma_mapping_error(dev, ctx->key_dma)) {
16288 + dev_err(dev, "unable to map key i/o memory\n");
16291 + ctx->cdata.keylen = keylen;
16293 + ret = gcm_set_sh_desc(aead);
16295 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16301 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16303 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16304 + struct device *dev = ctx->dev;
16305 + unsigned int ivsize = crypto_aead_ivsize(aead);
16306 + struct caam_flc *flc;
16308 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16309 + ctx->cdata.keylen;
16311 + if (!ctx->cdata.keylen || !ctx->authsize)
16314 + ctx->cdata.key_virt = ctx->key;
16317 + * RFC4106 encrypt shared descriptor
16318 + * Job Descriptor and Shared Descriptor
16319 + * must fit into the 64-word Descriptor h/w Buffer
16321 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16322 + ctx->cdata.key_inline = true;
16324 + ctx->cdata.key_inline = false;
16325 + ctx->cdata.key_dma = ctx->key_dma;
16328 + flc = &ctx->flc[ENCRYPT];
16329 + desc = flc->sh_desc;
16330 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16333 + flc->flc[1] = desc_len(desc); /* SDL */
16334 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16335 + desc_bytes(desc), DMA_TO_DEVICE);
16336 + if (dma_mapping_error(dev, flc->flc_dma)) {
16337 + dev_err(dev, "unable to map shared descriptor\n");
16342 + * Job Descriptor and Shared Descriptors
16343 + * must all fit into the 64-word Descriptor h/w Buffer
16345 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16346 + ctx->cdata.key_inline = true;
16348 + ctx->cdata.key_inline = false;
16349 + ctx->cdata.key_dma = ctx->key_dma;
16352 + flc = &ctx->flc[DECRYPT];
16353 + desc = flc->sh_desc;
16354 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16357 + flc->flc[1] = desc_len(desc); /* SDL */
16358 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16359 + desc_bytes(desc), DMA_TO_DEVICE);
16360 + if (dma_mapping_error(dev, flc->flc_dma)) {
16361 + dev_err(dev, "unable to map shared descriptor\n");
16368 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16369 + unsigned int authsize)
16371 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16373 + ctx->authsize = authsize;
16374 + rfc4106_set_sh_desc(authenc);
16379 +static int rfc4106_setkey(struct crypto_aead *aead,
16380 + const u8 *key, unsigned int keylen)
16382 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16383 + struct device *dev = ctx->dev;
16390 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16391 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16394 + memcpy(ctx->key, key, keylen);
16396 + * The last four bytes of the key material are used as the salt value
16397 + * in the nonce. Update the AES key length.
16399 + ctx->cdata.keylen = keylen - 4;
16400 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16402 + if (dma_mapping_error(dev, ctx->key_dma)) {
16403 + dev_err(dev, "unable to map key i/o memory\n");
16407 + ret = rfc4106_set_sh_desc(aead);
16409 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16415 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16417 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16418 + struct device *dev = ctx->dev;
16419 + unsigned int ivsize = crypto_aead_ivsize(aead);
16420 + struct caam_flc *flc;
16422 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16423 + ctx->cdata.keylen;
16425 + if (!ctx->cdata.keylen || !ctx->authsize)
16428 + ctx->cdata.key_virt = ctx->key;
16431 + * RFC4543 encrypt shared descriptor
16432 + * Job Descriptor and Shared Descriptor
16433 + * must fit into the 64-word Descriptor h/w Buffer
16435 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16436 + ctx->cdata.key_inline = true;
16438 + ctx->cdata.key_inline = false;
16439 + ctx->cdata.key_dma = ctx->key_dma;
16442 + flc = &ctx->flc[ENCRYPT];
16443 + desc = flc->sh_desc;
16444 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16447 + flc->flc[1] = desc_len(desc); /* SDL */
16448 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16449 + desc_bytes(desc), DMA_TO_DEVICE);
16450 + if (dma_mapping_error(dev, flc->flc_dma)) {
16451 + dev_err(dev, "unable to map shared descriptor\n");
16456 + * Job Descriptor and Shared Descriptors
16457 + * must all fit into the 64-word Descriptor h/w Buffer
16459 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16460 + ctx->cdata.key_inline = true;
16462 + ctx->cdata.key_inline = false;
16463 + ctx->cdata.key_dma = ctx->key_dma;
16466 + flc = &ctx->flc[DECRYPT];
16467 + desc = flc->sh_desc;
16468 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16471 + flc->flc[1] = desc_len(desc); /* SDL */
16472 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16473 + desc_bytes(desc), DMA_TO_DEVICE);
16474 + if (dma_mapping_error(dev, flc->flc_dma)) {
16475 + dev_err(dev, "unable to map shared descriptor\n");
16482 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16483 + unsigned int authsize)
16485 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16487 + ctx->authsize = authsize;
16488 + rfc4543_set_sh_desc(authenc);
16493 +static int rfc4543_setkey(struct crypto_aead *aead,
16494 + const u8 *key, unsigned int keylen)
16496 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16497 + struct device *dev = ctx->dev;
16504 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16505 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16508 + memcpy(ctx->key, key, keylen);
16510 + * The last four bytes of the key material are used as the salt value
16511 + * in the nonce. Update the AES key length.
16513 + ctx->cdata.keylen = keylen - 4;
16514 + ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
16516 + if (dma_mapping_error(dev, ctx->key_dma)) {
16517 + dev_err(dev, "unable to map key i/o memory\n");
16521 + ret = rfc4543_set_sh_desc(aead);
16523 + dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
16529 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16530 + const u8 *key, unsigned int keylen)
16532 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16533 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16534 + const char *alg_name = crypto_tfm_alg_name(tfm);
16535 + struct device *dev = ctx->dev;
16536 + struct caam_flc *flc;
16537 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16539 + u32 ctx1_iv_off = 0;
16540 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16541 + OP_ALG_AAI_CTR_MOD128);
16542 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16544 + memcpy(ctx->key, key, keylen);
16546 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16547 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16550 + * AES-CTR needs to load IV in CONTEXT1 reg
16551 + * at an offset of 128bits (16bytes)
16552 + * CONTEXT1[255:128] = IV
16555 + ctx1_iv_off = 16;
16558 + * RFC3686 specific:
16559 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16560 + * | *key = {KEY, NONCE}
16562 + if (is_rfc3686) {
16563 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16564 + keylen -= CTR_RFC3686_NONCE_SIZE;
16567 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16568 + if (dma_mapping_error(dev, ctx->key_dma)) {
16569 + dev_err(dev, "unable to map key i/o memory\n");
16572 + ctx->cdata.keylen = keylen;
16573 + ctx->cdata.key_virt = ctx->key;
16574 + ctx->cdata.key_inline = true;
16576 + /* ablkcipher_encrypt shared descriptor */
16577 + flc = &ctx->flc[ENCRYPT];
16578 + desc = flc->sh_desc;
16580 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16581 + is_rfc3686, ctx1_iv_off);
16583 + flc->flc[1] = desc_len(desc); /* SDL */
16584 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16585 + desc_bytes(desc), DMA_TO_DEVICE);
16586 + if (dma_mapping_error(dev, flc->flc_dma)) {
16587 + dev_err(dev, "unable to map shared descriptor\n");
16591 + /* ablkcipher_decrypt shared descriptor */
16592 + flc = &ctx->flc[DECRYPT];
16593 + desc = flc->sh_desc;
16595 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16596 + is_rfc3686, ctx1_iv_off);
16598 + flc->flc[1] = desc_len(desc); /* SDL */
16599 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16600 + desc_bytes(desc), DMA_TO_DEVICE);
16601 + if (dma_mapping_error(dev, flc->flc_dma)) {
16602 + dev_err(dev, "unable to map shared descriptor\n");
16606 + /* ablkcipher_givencrypt shared descriptor */
16607 + flc = &ctx->flc[GIVENCRYPT];
16608 + desc = flc->sh_desc;
16610 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16611 + ivsize, is_rfc3686, ctx1_iv_off);
16613 + flc->flc[1] = desc_len(desc); /* SDL */
16614 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16615 + desc_bytes(desc), DMA_TO_DEVICE);
16616 + if (dma_mapping_error(dev, flc->flc_dma)) {
16617 + dev_err(dev, "unable to map shared descriptor\n");
16624 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16625 + const u8 *key, unsigned int keylen)
16627 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16628 + struct device *dev = ctx->dev;
16629 + struct caam_flc *flc;
16632 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
16633 + dev_err(dev, "key size mismatch\n");
16634 + crypto_ablkcipher_set_flags(ablkcipher,
16635 + CRYPTO_TFM_RES_BAD_KEY_LEN);
16639 + memcpy(ctx->key, key, keylen);
16640 + ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
16641 + if (dma_mapping_error(dev, ctx->key_dma)) {
16642 + dev_err(dev, "unable to map key i/o memory\n");
16645 + ctx->cdata.keylen = keylen;
16646 + ctx->cdata.key_virt = ctx->key;
16647 + ctx->cdata.key_inline = true;
16649 + /* xts_ablkcipher_encrypt shared descriptor */
16650 + flc = &ctx->flc[ENCRYPT];
16651 + desc = flc->sh_desc;
16652 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
16654 + flc->flc[1] = desc_len(desc); /* SDL */
16655 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16656 + desc_bytes(desc), DMA_TO_DEVICE);
16657 + if (dma_mapping_error(dev, flc->flc_dma)) {
16658 + dev_err(dev, "unable to map shared descriptor\n");
16662 + /* xts_ablkcipher_decrypt shared descriptor */
16663 + flc = &ctx->flc[DECRYPT];
16664 + desc = flc->sh_desc;
16666 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
16668 + flc->flc[1] = desc_len(desc); /* SDL */
16669 + flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
16670 + desc_bytes(desc), DMA_TO_DEVICE);
16671 + if (dma_mapping_error(dev, flc->flc_dma)) {
16672 + dev_err(dev, "unable to map shared descriptor\n");
16679 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
16680 + *req, bool encrypt)
16682 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16683 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16684 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16685 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16686 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16687 + struct device *dev = ctx->dev;
16688 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16689 + GFP_KERNEL : GFP_ATOMIC;
16690 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16691 + struct ablkcipher_edesc *edesc;
16692 + dma_addr_t iv_dma;
16694 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16695 + int dst_sg_idx, qm_sg_ents;
16696 + struct dpaa2_sg_entry *sg_table;
16697 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16699 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16700 + if (unlikely(src_nents < 0)) {
16701 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16703 + return ERR_PTR(src_nents);
16706 + if (unlikely(req->dst != req->src)) {
16707 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16708 + if (unlikely(dst_nents < 0)) {
16709 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16711 + return ERR_PTR(dst_nents);
16714 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16716 + if (unlikely(!mapped_src_nents)) {
16717 + dev_err(dev, "unable to map source\n");
16718 + return ERR_PTR(-ENOMEM);
16721 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16722 + DMA_FROM_DEVICE);
16723 + if (unlikely(!mapped_dst_nents)) {
16724 + dev_err(dev, "unable to map destination\n");
16725 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16726 + return ERR_PTR(-ENOMEM);
16729 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16730 + DMA_BIDIRECTIONAL);
16731 + if (unlikely(!mapped_src_nents)) {
16732 + dev_err(dev, "unable to map source\n");
16733 + return ERR_PTR(-ENOMEM);
16737 + iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
16738 + if (dma_mapping_error(dev, iv_dma)) {
16739 + dev_err(dev, "unable to map IV\n");
16740 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16742 + return ERR_PTR(-ENOMEM);
16745 + if (mapped_src_nents == 1 &&
16746 + iv_dma + ivsize == sg_dma_address(req->src)) {
16747 + in_contig = true;
16750 + in_contig = false;
16751 + qm_sg_ents = 1 + mapped_src_nents;
16753 + dst_sg_idx = qm_sg_ents;
16755 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
16756 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16757 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16758 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16759 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16760 + iv_dma, ivsize, op_type, 0, 0);
16761 + return ERR_PTR(-ENOMEM);
16764 + /* allocate space for base edesc and link tables */
16765 + edesc = qi_cache_zalloc(GFP_DMA | flags);
16766 + if (unlikely(!edesc)) {
16767 + dev_err(dev, "could not allocate extended descriptor\n");
16768 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16769 + iv_dma, ivsize, op_type, 0, 0);
16770 + return ERR_PTR(-ENOMEM);
16773 + edesc->src_nents = src_nents;
16774 + edesc->dst_nents = dst_nents;
16775 + edesc->iv_dma = iv_dma;
16776 + sg_table = &edesc->sgt[0];
16777 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16779 + if (!in_contig) {
16780 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16781 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
16784 + if (mapped_dst_nents > 1)
16785 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16788 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16790 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16791 + dev_err(dev, "unable to map S/G table\n");
16792 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16793 + iv_dma, ivsize, op_type, 0, 0);
16794 + qi_cache_free(edesc);
16795 + return ERR_PTR(-ENOMEM);
16798 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16799 + dpaa2_fl_set_final(in_fle, true);
16800 + dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
16801 + dpaa2_fl_set_len(out_fle, req->nbytes);
16803 + if (!in_contig) {
16804 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16805 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16807 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16808 + dpaa2_fl_set_addr(in_fle, iv_dma);
16811 + if (req->src == req->dst) {
16812 + if (!in_contig) {
16813 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16814 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
16815 + sizeof(*sg_table));
16817 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16818 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16820 + } else if (mapped_dst_nents > 1) {
16821 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16822 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16823 + sizeof(*sg_table));
16825 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16826 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16832 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
16833 + struct skcipher_givcrypt_request *greq)
16835 + struct ablkcipher_request *req = &greq->creq;
16836 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16837 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16838 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16839 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16840 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16841 + struct device *dev = ctx->dev;
16842 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16843 + GFP_KERNEL : GFP_ATOMIC;
16844 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
16845 + struct ablkcipher_edesc *edesc;
16846 + dma_addr_t iv_dma;
16848 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16849 + struct dpaa2_sg_entry *sg_table;
16850 + int dst_sg_idx, qm_sg_ents;
16852 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16853 + if (unlikely(src_nents < 0)) {
16854 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16856 + return ERR_PTR(src_nents);
16859 + if (unlikely(req->dst != req->src)) {
16860 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16861 + if (unlikely(dst_nents < 0)) {
16862 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16864 + return ERR_PTR(dst_nents);
16867 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16869 + if (unlikely(!mapped_src_nents)) {
16870 + dev_err(dev, "unable to map source\n");
16871 + return ERR_PTR(-ENOMEM);
16874 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16875 + DMA_FROM_DEVICE);
16876 + if (unlikely(!mapped_dst_nents)) {
16877 + dev_err(dev, "unable to map destination\n");
16878 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16879 + return ERR_PTR(-ENOMEM);
16882 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16883 + DMA_BIDIRECTIONAL);
16884 + if (unlikely(!mapped_src_nents)) {
16885 + dev_err(dev, "unable to map source\n");
16886 + return ERR_PTR(-ENOMEM);
16889 + dst_nents = src_nents;
16890 + mapped_dst_nents = src_nents;
16893 + iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
16894 + if (dma_mapping_error(dev, iv_dma)) {
16895 + dev_err(dev, "unable to map IV\n");
16896 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16898 + return ERR_PTR(-ENOMEM);
16901 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
16902 + dst_sg_idx = qm_sg_ents;
16903 + if (mapped_dst_nents == 1 &&
16904 + iv_dma + ivsize == sg_dma_address(req->dst)) {
16905 + out_contig = true;
16907 + out_contig = false;
16908 + qm_sg_ents += 1 + mapped_dst_nents;
16911 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16912 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16913 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16914 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16915 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16916 + return ERR_PTR(-ENOMEM);
16919 + /* allocate space for base edesc and link tables */
16920 + edesc = qi_cache_zalloc(GFP_DMA | flags);
16922 + dev_err(dev, "could not allocate extended descriptor\n");
16923 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16924 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16925 + return ERR_PTR(-ENOMEM);
16928 + edesc->src_nents = src_nents;
16929 + edesc->dst_nents = dst_nents;
16930 + edesc->iv_dma = iv_dma;
16931 + sg_table = &edesc->sgt[0];
16932 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16934 + if (mapped_src_nents > 1)
16935 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
16937 + if (!out_contig) {
16938 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
16939 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16940 + dst_sg_idx + 1, 0);
16943 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16945 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16946 + dev_err(dev, "unable to map S/G table\n");
16947 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16948 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16949 + qi_cache_free(edesc);
16950 + return ERR_PTR(-ENOMEM);
16953 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16954 + dpaa2_fl_set_final(in_fle, true);
16955 + dpaa2_fl_set_len(in_fle, req->nbytes);
16956 + dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
16958 + if (mapped_src_nents > 1) {
16959 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16960 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16962 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16963 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
16966 + if (!out_contig) {
16967 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16968 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16969 + sizeof(*sg_table));
16971 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16972 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16978 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
16979 + struct aead_request *req)
16981 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16982 + int ivsize = crypto_aead_ivsize(aead);
16983 + struct caam_request *caam_req = aead_request_ctx(req);
16985 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16986 + edesc->iv_dma, ivsize, caam_req->op_type,
16987 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
16988 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
16991 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
16992 + struct aead_request *req)
16994 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
16995 + int ivsize = crypto_aead_ivsize(tls);
16996 + struct caam_request *caam_req = aead_request_ctx(req);
16998 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
16999 + edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
17000 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17003 +static void ablkcipher_unmap(struct device *dev,
17004 + struct ablkcipher_edesc *edesc,
17005 + struct ablkcipher_request *req)
17007 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17008 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17009 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17011 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17012 + edesc->iv_dma, ivsize, caam_req->op_type,
17013 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17016 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
17018 + struct crypto_async_request *areq = cbk_ctx;
17019 + struct aead_request *req = container_of(areq, struct aead_request,
17021 + struct caam_request *req_ctx = to_caam_req(areq);
17022 + struct aead_edesc *edesc = req_ctx->edesc;
17023 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17024 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17028 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17031 + if (unlikely(status)) {
17032 + caam_qi2_strstatus(ctx->dev, status);
17036 + aead_unmap(ctx->dev, edesc, req);
17037 + qi_cache_free(edesc);
17038 + aead_request_complete(req, ecode);
17041 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
17043 + struct crypto_async_request *areq = cbk_ctx;
17044 + struct aead_request *req = container_of(areq, struct aead_request,
17046 + struct caam_request *req_ctx = to_caam_req(areq);
17047 + struct aead_edesc *edesc = req_ctx->edesc;
17048 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17049 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17053 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17056 + if (unlikely(status)) {
17057 + caam_qi2_strstatus(ctx->dev, status);
17059 + * verify hw auth check passed else return -EBADMSG
17061 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17062 + JRSTA_CCBERR_ERRID_ICVCHK)
17063 + ecode = -EBADMSG;
17068 + aead_unmap(ctx->dev, edesc, req);
17069 + qi_cache_free(edesc);
17070 + aead_request_complete(req, ecode);
17073 +static int aead_encrypt(struct aead_request *req)
17075 + struct aead_edesc *edesc;
17076 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17077 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17078 + struct caam_request *caam_req = aead_request_ctx(req);
17081 + /* allocate extended descriptor */
17082 + edesc = aead_edesc_alloc(req, true);
17083 + if (IS_ERR(edesc))
17084 + return PTR_ERR(edesc);
17086 + caam_req->flc = &ctx->flc[ENCRYPT];
17087 + caam_req->op_type = ENCRYPT;
17088 + caam_req->cbk = aead_encrypt_done;
17089 + caam_req->ctx = &req->base;
17090 + caam_req->edesc = edesc;
17091 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17092 + if (ret != -EINPROGRESS &&
17093 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17094 + aead_unmap(ctx->dev, edesc, req);
17095 + qi_cache_free(edesc);
17101 +static int aead_decrypt(struct aead_request *req)
17103 + struct aead_edesc *edesc;
17104 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17105 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17106 + struct caam_request *caam_req = aead_request_ctx(req);
17109 + /* allocate extended descriptor */
17110 + edesc = aead_edesc_alloc(req, false);
17111 + if (IS_ERR(edesc))
17112 + return PTR_ERR(edesc);
17114 + caam_req->flc = &ctx->flc[DECRYPT];
17115 + caam_req->op_type = DECRYPT;
17116 + caam_req->cbk = aead_decrypt_done;
17117 + caam_req->ctx = &req->base;
17118 + caam_req->edesc = edesc;
17119 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17120 + if (ret != -EINPROGRESS &&
17121 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17122 + aead_unmap(ctx->dev, edesc, req);
17123 + qi_cache_free(edesc);
17129 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17131 + struct crypto_async_request *areq = cbk_ctx;
17132 + struct aead_request *req = container_of(areq, struct aead_request,
17134 + struct caam_request *req_ctx = to_caam_req(areq);
17135 + struct tls_edesc *edesc = req_ctx->edesc;
17136 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17137 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17141 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17144 + if (unlikely(status)) {
17145 + caam_qi2_strstatus(ctx->dev, status);
17149 + tls_unmap(ctx->dev, edesc, req);
17150 + qi_cache_free(edesc);
17151 + aead_request_complete(req, ecode);
17154 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17156 + struct crypto_async_request *areq = cbk_ctx;
17157 + struct aead_request *req = container_of(areq, struct aead_request,
17159 + struct caam_request *req_ctx = to_caam_req(areq);
17160 + struct tls_edesc *edesc = req_ctx->edesc;
17161 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17162 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17166 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17169 + if (unlikely(status)) {
17170 + caam_qi2_strstatus(ctx->dev, status);
17172 + * verify hw auth check passed else return -EBADMSG
17174 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17175 + JRSTA_CCBERR_ERRID_ICVCHK)
17176 + ecode = -EBADMSG;
17181 + tls_unmap(ctx->dev, edesc, req);
17182 + qi_cache_free(edesc);
17183 + aead_request_complete(req, ecode);
17186 +static int tls_encrypt(struct aead_request *req)
17188 + struct tls_edesc *edesc;
17189 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17190 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17191 + struct caam_request *caam_req = aead_request_ctx(req);
17194 + /* allocate extended descriptor */
17195 + edesc = tls_edesc_alloc(req, true);
17196 + if (IS_ERR(edesc))
17197 + return PTR_ERR(edesc);
17199 + caam_req->flc = &ctx->flc[ENCRYPT];
17200 + caam_req->op_type = ENCRYPT;
17201 + caam_req->cbk = tls_encrypt_done;
17202 + caam_req->ctx = &req->base;
17203 + caam_req->edesc = edesc;
17204 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17205 + if (ret != -EINPROGRESS &&
17206 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17207 + tls_unmap(ctx->dev, edesc, req);
17208 + qi_cache_free(edesc);
17214 +static int tls_decrypt(struct aead_request *req)
17216 + struct tls_edesc *edesc;
17217 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17218 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17219 + struct caam_request *caam_req = aead_request_ctx(req);
17222 + /* allocate extended descriptor */
17223 + edesc = tls_edesc_alloc(req, false);
17224 + if (IS_ERR(edesc))
17225 + return PTR_ERR(edesc);
17227 + caam_req->flc = &ctx->flc[DECRYPT];
17228 + caam_req->op_type = DECRYPT;
17229 + caam_req->cbk = tls_decrypt_done;
17230 + caam_req->ctx = &req->base;
17231 + caam_req->edesc = edesc;
17232 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17233 + if (ret != -EINPROGRESS &&
17234 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17235 + tls_unmap(ctx->dev, edesc, req);
17236 + qi_cache_free(edesc);
17242 +static int ipsec_gcm_encrypt(struct aead_request *req)
17244 + if (req->assoclen < 8)
17247 + return aead_encrypt(req);
17250 +static int ipsec_gcm_decrypt(struct aead_request *req)
17252 + if (req->assoclen < 8)
17255 + return aead_decrypt(req);
17258 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17260 + struct crypto_async_request *areq = cbk_ctx;
17261 + struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17262 + struct caam_request *req_ctx = to_caam_req(areq);
17263 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17264 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17265 + struct ablkcipher_edesc *edesc = req_ctx->edesc;
17267 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17270 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17273 + if (unlikely(status)) {
17274 + caam_qi2_strstatus(ctx->dev, status);
17279 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
17280 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17281 + edesc->src_nents > 1 ? 100 : ivsize, 1);
17282 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
17283 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17284 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17287 + ablkcipher_unmap(ctx->dev, edesc, req);
17288 + qi_cache_free(edesc);
17291 + * The crypto API expects us to set the IV (req->info) to the last
17292 + * ciphertext block. This is used e.g. by the CTS mode.
17294 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17297 + ablkcipher_request_complete(req, ecode);
17300 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17302 + struct ablkcipher_edesc *edesc;
17303 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17304 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17305 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17308 + /* allocate extended descriptor */
17309 + edesc = ablkcipher_edesc_alloc(req, true);
17310 + if (IS_ERR(edesc))
17311 + return PTR_ERR(edesc);
17313 + caam_req->flc = &ctx->flc[ENCRYPT];
17314 + caam_req->op_type = ENCRYPT;
17315 + caam_req->cbk = ablkcipher_done;
17316 + caam_req->ctx = &req->base;
17317 + caam_req->edesc = edesc;
17318 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17319 + if (ret != -EINPROGRESS &&
17320 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17321 + ablkcipher_unmap(ctx->dev, edesc, req);
17322 + qi_cache_free(edesc);
17328 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17330 + struct ablkcipher_request *req = &greq->creq;
17331 + struct ablkcipher_edesc *edesc;
17332 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17333 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17334 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17337 + /* allocate extended descriptor */
17338 + edesc = ablkcipher_giv_edesc_alloc(greq);
17339 + if (IS_ERR(edesc))
17340 + return PTR_ERR(edesc);
17342 + caam_req->flc = &ctx->flc[GIVENCRYPT];
17343 + caam_req->op_type = GIVENCRYPT;
17344 + caam_req->cbk = ablkcipher_done;
17345 + caam_req->ctx = &req->base;
17346 + caam_req->edesc = edesc;
17347 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17348 + if (ret != -EINPROGRESS &&
17349 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17350 + ablkcipher_unmap(ctx->dev, edesc, req);
17351 + qi_cache_free(edesc);
17357 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17359 + struct ablkcipher_edesc *edesc;
17360 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17361 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17362 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17365 + /* allocate extended descriptor */
17366 + edesc = ablkcipher_edesc_alloc(req, false);
17367 + if (IS_ERR(edesc))
17368 + return PTR_ERR(edesc);
17370 + caam_req->flc = &ctx->flc[DECRYPT];
17371 + caam_req->op_type = DECRYPT;
17372 + caam_req->cbk = ablkcipher_done;
17373 + caam_req->ctx = &req->base;
17374 + caam_req->edesc = edesc;
17375 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17376 + if (ret != -EINPROGRESS &&
17377 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17378 + ablkcipher_unmap(ctx->dev, edesc, req);
17379 + qi_cache_free(edesc);
17385 +struct caam_crypto_alg {
17386 + struct list_head entry;
17387 + struct crypto_alg crypto_alg;
17388 + struct caam_alg_entry caam;
17391 +static int caam_cra_init(struct crypto_tfm *tfm)
17393 + struct crypto_alg *alg = tfm->__crt_alg;
17394 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17396 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17398 + /* copy descriptor header template value */
17399 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17400 + caam_alg->caam.class1_alg_type;
17401 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17402 + caam_alg->caam.class2_alg_type;
17404 + ctx->dev = caam_alg->caam.dev;
17409 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17411 + struct ablkcipher_tfm *ablkcipher_tfm =
17412 + crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17414 + ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17415 + return caam_cra_init(tfm);
17418 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17420 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17421 + return caam_cra_init(crypto_aead_tfm(tfm));
17424 +static void caam_exit_common(struct caam_ctx *ctx)
17428 + for (i = 0; i < NUM_OP; i++) {
17429 + if (!ctx->flc[i].flc_dma)
17431 + dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
17432 + sizeof(ctx->flc[i].flc) +
17433 + desc_bytes(ctx->flc[i].sh_desc),
17437 + if (ctx->key_dma)
17438 + dma_unmap_single(ctx->dev, ctx->key_dma,
17439 + ctx->cdata.keylen + ctx->adata.keylen_pad,
17443 +static void caam_cra_exit(struct crypto_tfm *tfm)
17445 + caam_exit_common(crypto_tfm_ctx(tfm));
17448 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17450 + caam_exit_common(crypto_aead_ctx(tfm));
17453 +#define template_ablkcipher template_u.ablkcipher
17454 +struct caam_alg_template {
17455 + char name[CRYPTO_MAX_ALG_NAME];
17456 + char driver_name[CRYPTO_MAX_ALG_NAME];
17457 + unsigned int blocksize;
17460 + struct ablkcipher_alg ablkcipher;
17462 + u32 class1_alg_type;
17463 + u32 class2_alg_type;
17466 +static struct caam_alg_template driver_algs[] = {
17467 + /* ablkcipher descriptor */
17469 + .name = "cbc(aes)",
17470 + .driver_name = "cbc-aes-caam-qi2",
17471 + .blocksize = AES_BLOCK_SIZE,
17472 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17473 + .template_ablkcipher = {
17474 + .setkey = ablkcipher_setkey,
17475 + .encrypt = ablkcipher_encrypt,
17476 + .decrypt = ablkcipher_decrypt,
17477 + .givencrypt = ablkcipher_givencrypt,
17478 + .geniv = "<built-in>",
17479 + .min_keysize = AES_MIN_KEY_SIZE,
17480 + .max_keysize = AES_MAX_KEY_SIZE,
17481 + .ivsize = AES_BLOCK_SIZE,
17483 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17486 + .name = "cbc(des3_ede)",
17487 + .driver_name = "cbc-3des-caam-qi2",
17488 + .blocksize = DES3_EDE_BLOCK_SIZE,
17489 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17490 + .template_ablkcipher = {
17491 + .setkey = ablkcipher_setkey,
17492 + .encrypt = ablkcipher_encrypt,
17493 + .decrypt = ablkcipher_decrypt,
17494 + .givencrypt = ablkcipher_givencrypt,
17495 + .geniv = "<built-in>",
17496 + .min_keysize = DES3_EDE_KEY_SIZE,
17497 + .max_keysize = DES3_EDE_KEY_SIZE,
17498 + .ivsize = DES3_EDE_BLOCK_SIZE,
17500 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17503 + .name = "cbc(des)",
17504 + .driver_name = "cbc-des-caam-qi2",
17505 + .blocksize = DES_BLOCK_SIZE,
17506 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17507 + .template_ablkcipher = {
17508 + .setkey = ablkcipher_setkey,
17509 + .encrypt = ablkcipher_encrypt,
17510 + .decrypt = ablkcipher_decrypt,
17511 + .givencrypt = ablkcipher_givencrypt,
17512 + .geniv = "<built-in>",
17513 + .min_keysize = DES_KEY_SIZE,
17514 + .max_keysize = DES_KEY_SIZE,
17515 + .ivsize = DES_BLOCK_SIZE,
17517 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17520 + .name = "ctr(aes)",
17521 + .driver_name = "ctr-aes-caam-qi2",
17523 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17524 + .template_ablkcipher = {
17525 + .setkey = ablkcipher_setkey,
17526 + .encrypt = ablkcipher_encrypt,
17527 + .decrypt = ablkcipher_decrypt,
17528 + .geniv = "chainiv",
17529 + .min_keysize = AES_MIN_KEY_SIZE,
17530 + .max_keysize = AES_MAX_KEY_SIZE,
17531 + .ivsize = AES_BLOCK_SIZE,
17533 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17536 + .name = "rfc3686(ctr(aes))",
17537 + .driver_name = "rfc3686-ctr-aes-caam-qi2",
17539 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17540 + .template_ablkcipher = {
17541 + .setkey = ablkcipher_setkey,
17542 + .encrypt = ablkcipher_encrypt,
17543 + .decrypt = ablkcipher_decrypt,
17544 + .givencrypt = ablkcipher_givencrypt,
17545 + .geniv = "<built-in>",
17546 + .min_keysize = AES_MIN_KEY_SIZE +
17547 + CTR_RFC3686_NONCE_SIZE,
17548 + .max_keysize = AES_MAX_KEY_SIZE +
17549 + CTR_RFC3686_NONCE_SIZE,
17550 + .ivsize = CTR_RFC3686_IV_SIZE,
17552 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17555 + .name = "xts(aes)",
17556 + .driver_name = "xts-aes-caam-qi2",
17557 + .blocksize = AES_BLOCK_SIZE,
17558 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17559 + .template_ablkcipher = {
17560 + .setkey = xts_ablkcipher_setkey,
17561 + .encrypt = ablkcipher_encrypt,
17562 + .decrypt = ablkcipher_decrypt,
17563 + .geniv = "eseqiv",
17564 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
17565 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
17566 + .ivsize = AES_BLOCK_SIZE,
17568 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17572 +static struct caam_aead_alg driver_aeads[] = {
17576 + .cra_name = "rfc4106(gcm(aes))",
17577 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17578 + .cra_blocksize = 1,
17580 + .setkey = rfc4106_setkey,
17581 + .setauthsize = rfc4106_setauthsize,
17582 + .encrypt = ipsec_gcm_encrypt,
17583 + .decrypt = ipsec_gcm_decrypt,
17585 + .maxauthsize = AES_BLOCK_SIZE,
17588 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17594 + .cra_name = "rfc4543(gcm(aes))",
17595 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17596 + .cra_blocksize = 1,
17598 + .setkey = rfc4543_setkey,
17599 + .setauthsize = rfc4543_setauthsize,
17600 + .encrypt = ipsec_gcm_encrypt,
17601 + .decrypt = ipsec_gcm_decrypt,
17603 + .maxauthsize = AES_BLOCK_SIZE,
17606 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17609 + /* Galois Counter Mode */
17613 + .cra_name = "gcm(aes)",
17614 + .cra_driver_name = "gcm-aes-caam-qi2",
17615 + .cra_blocksize = 1,
17617 + .setkey = gcm_setkey,
17618 + .setauthsize = gcm_setauthsize,
17619 + .encrypt = aead_encrypt,
17620 + .decrypt = aead_decrypt,
17622 + .maxauthsize = AES_BLOCK_SIZE,
17625 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17628 + /* single-pass ipsec_esp descriptor */
17632 + .cra_name = "authenc(hmac(md5),cbc(aes))",
17633 + .cra_driver_name = "authenc-hmac-md5-"
17634 + "cbc-aes-caam-qi2",
17635 + .cra_blocksize = AES_BLOCK_SIZE,
17637 + .setkey = aead_setkey,
17638 + .setauthsize = aead_setauthsize,
17639 + .encrypt = aead_encrypt,
17640 + .decrypt = aead_decrypt,
17641 + .ivsize = AES_BLOCK_SIZE,
17642 + .maxauthsize = MD5_DIGEST_SIZE,
17645 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17646 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17647 + OP_ALG_AAI_HMAC_PRECOMP,
17653 + .cra_name = "echainiv(authenc(hmac(md5),"
17655 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17656 + "cbc-aes-caam-qi2",
17657 + .cra_blocksize = AES_BLOCK_SIZE,
17659 + .setkey = aead_setkey,
17660 + .setauthsize = aead_setauthsize,
17661 + .encrypt = aead_encrypt,
17662 + .decrypt = aead_decrypt,
17663 + .ivsize = AES_BLOCK_SIZE,
17664 + .maxauthsize = MD5_DIGEST_SIZE,
17667 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17668 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17669 + OP_ALG_AAI_HMAC_PRECOMP,
17676 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
17677 + .cra_driver_name = "authenc-hmac-sha1-"
17678 + "cbc-aes-caam-qi2",
17679 + .cra_blocksize = AES_BLOCK_SIZE,
17681 + .setkey = aead_setkey,
17682 + .setauthsize = aead_setauthsize,
17683 + .encrypt = aead_encrypt,
17684 + .decrypt = aead_decrypt,
17685 + .ivsize = AES_BLOCK_SIZE,
17686 + .maxauthsize = SHA1_DIGEST_SIZE,
17689 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17690 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17691 + OP_ALG_AAI_HMAC_PRECOMP,
17697 + .cra_name = "echainiv(authenc(hmac(sha1),"
17699 + .cra_driver_name = "echainiv-authenc-"
17700 + "hmac-sha1-cbc-aes-caam-qi2",
17701 + .cra_blocksize = AES_BLOCK_SIZE,
17703 + .setkey = aead_setkey,
17704 + .setauthsize = aead_setauthsize,
17705 + .encrypt = aead_encrypt,
17706 + .decrypt = aead_decrypt,
17707 + .ivsize = AES_BLOCK_SIZE,
17708 + .maxauthsize = SHA1_DIGEST_SIZE,
17711 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17712 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17713 + OP_ALG_AAI_HMAC_PRECOMP,
17720 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
17721 + .cra_driver_name = "authenc-hmac-sha224-"
17722 + "cbc-aes-caam-qi2",
17723 + .cra_blocksize = AES_BLOCK_SIZE,
17725 + .setkey = aead_setkey,
17726 + .setauthsize = aead_setauthsize,
17727 + .encrypt = aead_encrypt,
17728 + .decrypt = aead_decrypt,
17729 + .ivsize = AES_BLOCK_SIZE,
17730 + .maxauthsize = SHA224_DIGEST_SIZE,
17733 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17734 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17735 + OP_ALG_AAI_HMAC_PRECOMP,
17741 + .cra_name = "echainiv(authenc(hmac(sha224),"
17743 + .cra_driver_name = "echainiv-authenc-"
17744 + "hmac-sha224-cbc-aes-caam-qi2",
17745 + .cra_blocksize = AES_BLOCK_SIZE,
17747 + .setkey = aead_setkey,
17748 + .setauthsize = aead_setauthsize,
17749 + .encrypt = aead_encrypt,
17750 + .decrypt = aead_decrypt,
17751 + .ivsize = AES_BLOCK_SIZE,
17752 + .maxauthsize = SHA224_DIGEST_SIZE,
17755 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17756 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17757 + OP_ALG_AAI_HMAC_PRECOMP,
17764 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
17765 + .cra_driver_name = "authenc-hmac-sha256-"
17766 + "cbc-aes-caam-qi2",
17767 + .cra_blocksize = AES_BLOCK_SIZE,
17769 + .setkey = aead_setkey,
17770 + .setauthsize = aead_setauthsize,
17771 + .encrypt = aead_encrypt,
17772 + .decrypt = aead_decrypt,
17773 + .ivsize = AES_BLOCK_SIZE,
17774 + .maxauthsize = SHA256_DIGEST_SIZE,
17777 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17778 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17779 + OP_ALG_AAI_HMAC_PRECOMP,
17785 + .cra_name = "echainiv(authenc(hmac(sha256),"
17787 + .cra_driver_name = "echainiv-authenc-"
17788 + "hmac-sha256-cbc-aes-"
17790 + .cra_blocksize = AES_BLOCK_SIZE,
17792 + .setkey = aead_setkey,
17793 + .setauthsize = aead_setauthsize,
17794 + .encrypt = aead_encrypt,
17795 + .decrypt = aead_decrypt,
17796 + .ivsize = AES_BLOCK_SIZE,
17797 + .maxauthsize = SHA256_DIGEST_SIZE,
17800 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17801 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17802 + OP_ALG_AAI_HMAC_PRECOMP,
17809 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
17810 + .cra_driver_name = "authenc-hmac-sha384-"
17811 + "cbc-aes-caam-qi2",
17812 + .cra_blocksize = AES_BLOCK_SIZE,
17814 + .setkey = aead_setkey,
17815 + .setauthsize = aead_setauthsize,
17816 + .encrypt = aead_encrypt,
17817 + .decrypt = aead_decrypt,
17818 + .ivsize = AES_BLOCK_SIZE,
17819 + .maxauthsize = SHA384_DIGEST_SIZE,
17822 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17823 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17824 + OP_ALG_AAI_HMAC_PRECOMP,
17830 + .cra_name = "echainiv(authenc(hmac(sha384),"
17832 + .cra_driver_name = "echainiv-authenc-"
17833 + "hmac-sha384-cbc-aes-"
17835 + .cra_blocksize = AES_BLOCK_SIZE,
17837 + .setkey = aead_setkey,
17838 + .setauthsize = aead_setauthsize,
17839 + .encrypt = aead_encrypt,
17840 + .decrypt = aead_decrypt,
17841 + .ivsize = AES_BLOCK_SIZE,
17842 + .maxauthsize = SHA384_DIGEST_SIZE,
17845 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17846 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17847 + OP_ALG_AAI_HMAC_PRECOMP,
17854 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
17855 + .cra_driver_name = "authenc-hmac-sha512-"
17856 + "cbc-aes-caam-qi2",
17857 + .cra_blocksize = AES_BLOCK_SIZE,
17859 + .setkey = aead_setkey,
17860 + .setauthsize = aead_setauthsize,
17861 + .encrypt = aead_encrypt,
17862 + .decrypt = aead_decrypt,
17863 + .ivsize = AES_BLOCK_SIZE,
17864 + .maxauthsize = SHA512_DIGEST_SIZE,
17867 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17868 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17869 + OP_ALG_AAI_HMAC_PRECOMP,
17875 + .cra_name = "echainiv(authenc(hmac(sha512),"
17877 + .cra_driver_name = "echainiv-authenc-"
17878 + "hmac-sha512-cbc-aes-"
17880 + .cra_blocksize = AES_BLOCK_SIZE,
17882 + .setkey = aead_setkey,
17883 + .setauthsize = aead_setauthsize,
17884 + .encrypt = aead_encrypt,
17885 + .decrypt = aead_decrypt,
17886 + .ivsize = AES_BLOCK_SIZE,
17887 + .maxauthsize = SHA512_DIGEST_SIZE,
17890 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17891 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17892 + OP_ALG_AAI_HMAC_PRECOMP,
17899 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
17900 + .cra_driver_name = "authenc-hmac-md5-"
17901 + "cbc-des3_ede-caam-qi2",
17902 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17904 + .setkey = aead_setkey,
17905 + .setauthsize = aead_setauthsize,
17906 + .encrypt = aead_encrypt,
17907 + .decrypt = aead_decrypt,
17908 + .ivsize = DES3_EDE_BLOCK_SIZE,
17909 + .maxauthsize = MD5_DIGEST_SIZE,
17912 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17913 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17914 + OP_ALG_AAI_HMAC_PRECOMP,
17920 + .cra_name = "echainiv(authenc(hmac(md5),"
17921 + "cbc(des3_ede)))",
17922 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17923 + "cbc-des3_ede-caam-qi2",
17924 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17926 + .setkey = aead_setkey,
17927 + .setauthsize = aead_setauthsize,
17928 + .encrypt = aead_encrypt,
17929 + .decrypt = aead_decrypt,
17930 + .ivsize = DES3_EDE_BLOCK_SIZE,
17931 + .maxauthsize = MD5_DIGEST_SIZE,
17934 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17935 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17936 + OP_ALG_AAI_HMAC_PRECOMP,
17943 + .cra_name = "authenc(hmac(sha1),"
17944 + "cbc(des3_ede))",
17945 + .cra_driver_name = "authenc-hmac-sha1-"
17946 + "cbc-des3_ede-caam-qi2",
17947 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17949 + .setkey = aead_setkey,
17950 + .setauthsize = aead_setauthsize,
17951 + .encrypt = aead_encrypt,
17952 + .decrypt = aead_decrypt,
17953 + .ivsize = DES3_EDE_BLOCK_SIZE,
17954 + .maxauthsize = SHA1_DIGEST_SIZE,
17957 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17958 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17959 + OP_ALG_AAI_HMAC_PRECOMP,
17965 + .cra_name = "echainiv(authenc(hmac(sha1),"
17966 + "cbc(des3_ede)))",
17967 + .cra_driver_name = "echainiv-authenc-"
17969 + "cbc-des3_ede-caam-qi2",
17970 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17972 + .setkey = aead_setkey,
17973 + .setauthsize = aead_setauthsize,
17974 + .encrypt = aead_encrypt,
17975 + .decrypt = aead_decrypt,
17976 + .ivsize = DES3_EDE_BLOCK_SIZE,
17977 + .maxauthsize = SHA1_DIGEST_SIZE,
17980 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17981 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17982 + OP_ALG_AAI_HMAC_PRECOMP,
17989 + .cra_name = "authenc(hmac(sha224),"
17990 + "cbc(des3_ede))",
17991 + .cra_driver_name = "authenc-hmac-sha224-"
17992 + "cbc-des3_ede-caam-qi2",
17993 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17995 + .setkey = aead_setkey,
17996 + .setauthsize = aead_setauthsize,
17997 + .encrypt = aead_encrypt,
17998 + .decrypt = aead_decrypt,
17999 + .ivsize = DES3_EDE_BLOCK_SIZE,
18000 + .maxauthsize = SHA224_DIGEST_SIZE,
18003 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18004 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18005 + OP_ALG_AAI_HMAC_PRECOMP,
18011 + .cra_name = "echainiv(authenc(hmac(sha224),"
18012 + "cbc(des3_ede)))",
18013 + .cra_driver_name = "echainiv-authenc-"
18015 + "cbc-des3_ede-caam-qi2",
18016 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18018 + .setkey = aead_setkey,
18019 + .setauthsize = aead_setauthsize,
18020 + .encrypt = aead_encrypt,
18021 + .decrypt = aead_decrypt,
18022 + .ivsize = DES3_EDE_BLOCK_SIZE,
18023 + .maxauthsize = SHA224_DIGEST_SIZE,
18026 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18027 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18028 + OP_ALG_AAI_HMAC_PRECOMP,
18035 + .cra_name = "authenc(hmac(sha256),"
18036 + "cbc(des3_ede))",
18037 + .cra_driver_name = "authenc-hmac-sha256-"
18038 + "cbc-des3_ede-caam-qi2",
18039 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18041 + .setkey = aead_setkey,
18042 + .setauthsize = aead_setauthsize,
18043 + .encrypt = aead_encrypt,
18044 + .decrypt = aead_decrypt,
18045 + .ivsize = DES3_EDE_BLOCK_SIZE,
18046 + .maxauthsize = SHA256_DIGEST_SIZE,
18049 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18050 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18051 + OP_ALG_AAI_HMAC_PRECOMP,
18057 + .cra_name = "echainiv(authenc(hmac(sha256),"
18058 + "cbc(des3_ede)))",
18059 + .cra_driver_name = "echainiv-authenc-"
18061 + "cbc-des3_ede-caam-qi2",
18062 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18064 + .setkey = aead_setkey,
18065 + .setauthsize = aead_setauthsize,
18066 + .encrypt = aead_encrypt,
18067 + .decrypt = aead_decrypt,
18068 + .ivsize = DES3_EDE_BLOCK_SIZE,
18069 + .maxauthsize = SHA256_DIGEST_SIZE,
18072 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18073 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18074 + OP_ALG_AAI_HMAC_PRECOMP,
18081 + .cra_name = "authenc(hmac(sha384),"
18082 + "cbc(des3_ede))",
18083 + .cra_driver_name = "authenc-hmac-sha384-"
18084 + "cbc-des3_ede-caam-qi2",
18085 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18087 + .setkey = aead_setkey,
18088 + .setauthsize = aead_setauthsize,
18089 + .encrypt = aead_encrypt,
18090 + .decrypt = aead_decrypt,
18091 + .ivsize = DES3_EDE_BLOCK_SIZE,
18092 + .maxauthsize = SHA384_DIGEST_SIZE,
18095 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18096 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18097 + OP_ALG_AAI_HMAC_PRECOMP,
18103 + .cra_name = "echainiv(authenc(hmac(sha384),"
18104 + "cbc(des3_ede)))",
18105 + .cra_driver_name = "echainiv-authenc-"
18107 + "cbc-des3_ede-caam-qi2",
18108 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18110 + .setkey = aead_setkey,
18111 + .setauthsize = aead_setauthsize,
18112 + .encrypt = aead_encrypt,
18113 + .decrypt = aead_decrypt,
18114 + .ivsize = DES3_EDE_BLOCK_SIZE,
18115 + .maxauthsize = SHA384_DIGEST_SIZE,
18118 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18119 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18120 + OP_ALG_AAI_HMAC_PRECOMP,
18127 + .cra_name = "authenc(hmac(sha512),"
18128 + "cbc(des3_ede))",
18129 + .cra_driver_name = "authenc-hmac-sha512-"
18130 + "cbc-des3_ede-caam-qi2",
18131 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18133 + .setkey = aead_setkey,
18134 + .setauthsize = aead_setauthsize,
18135 + .encrypt = aead_encrypt,
18136 + .decrypt = aead_decrypt,
18137 + .ivsize = DES3_EDE_BLOCK_SIZE,
18138 + .maxauthsize = SHA512_DIGEST_SIZE,
18141 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18142 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18143 + OP_ALG_AAI_HMAC_PRECOMP,
18149 + .cra_name = "echainiv(authenc(hmac(sha512),"
18150 + "cbc(des3_ede)))",
18151 + .cra_driver_name = "echainiv-authenc-"
18153 + "cbc-des3_ede-caam-qi2",
18154 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18156 + .setkey = aead_setkey,
18157 + .setauthsize = aead_setauthsize,
18158 + .encrypt = aead_encrypt,
18159 + .decrypt = aead_decrypt,
18160 + .ivsize = DES3_EDE_BLOCK_SIZE,
18161 + .maxauthsize = SHA512_DIGEST_SIZE,
18164 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18165 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18166 + OP_ALG_AAI_HMAC_PRECOMP,
18173 + .cra_name = "authenc(hmac(md5),cbc(des))",
18174 + .cra_driver_name = "authenc-hmac-md5-"
18175 + "cbc-des-caam-qi2",
18176 + .cra_blocksize = DES_BLOCK_SIZE,
18178 + .setkey = aead_setkey,
18179 + .setauthsize = aead_setauthsize,
18180 + .encrypt = aead_encrypt,
18181 + .decrypt = aead_decrypt,
18182 + .ivsize = DES_BLOCK_SIZE,
18183 + .maxauthsize = MD5_DIGEST_SIZE,
18186 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18187 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18188 + OP_ALG_AAI_HMAC_PRECOMP,
18194 + .cra_name = "echainiv(authenc(hmac(md5),"
18196 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
18197 + "cbc-des-caam-qi2",
18198 + .cra_blocksize = DES_BLOCK_SIZE,
18200 + .setkey = aead_setkey,
18201 + .setauthsize = aead_setauthsize,
18202 + .encrypt = aead_encrypt,
18203 + .decrypt = aead_decrypt,
18204 + .ivsize = DES_BLOCK_SIZE,
18205 + .maxauthsize = MD5_DIGEST_SIZE,
18208 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18209 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18210 + OP_ALG_AAI_HMAC_PRECOMP,
18217 + .cra_name = "authenc(hmac(sha1),cbc(des))",
18218 + .cra_driver_name = "authenc-hmac-sha1-"
18219 + "cbc-des-caam-qi2",
18220 + .cra_blocksize = DES_BLOCK_SIZE,
18222 + .setkey = aead_setkey,
18223 + .setauthsize = aead_setauthsize,
18224 + .encrypt = aead_encrypt,
18225 + .decrypt = aead_decrypt,
18226 + .ivsize = DES_BLOCK_SIZE,
18227 + .maxauthsize = SHA1_DIGEST_SIZE,
18230 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18231 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18232 + OP_ALG_AAI_HMAC_PRECOMP,
18238 + .cra_name = "echainiv(authenc(hmac(sha1),"
18240 + .cra_driver_name = "echainiv-authenc-"
18241 + "hmac-sha1-cbc-des-caam-qi2",
18242 + .cra_blocksize = DES_BLOCK_SIZE,
18244 + .setkey = aead_setkey,
18245 + .setauthsize = aead_setauthsize,
18246 + .encrypt = aead_encrypt,
18247 + .decrypt = aead_decrypt,
18248 + .ivsize = DES_BLOCK_SIZE,
18249 + .maxauthsize = SHA1_DIGEST_SIZE,
18252 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18253 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18254 + OP_ALG_AAI_HMAC_PRECOMP,
18261 + .cra_name = "authenc(hmac(sha224),cbc(des))",
18262 + .cra_driver_name = "authenc-hmac-sha224-"
18263 + "cbc-des-caam-qi2",
18264 + .cra_blocksize = DES_BLOCK_SIZE,
18266 + .setkey = aead_setkey,
18267 + .setauthsize = aead_setauthsize,
18268 + .encrypt = aead_encrypt,
18269 + .decrypt = aead_decrypt,
18270 + .ivsize = DES_BLOCK_SIZE,
18271 + .maxauthsize = SHA224_DIGEST_SIZE,
18274 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18275 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18276 + OP_ALG_AAI_HMAC_PRECOMP,
18282 + .cra_name = "echainiv(authenc(hmac(sha224),"
18284 + .cra_driver_name = "echainiv-authenc-"
18285 + "hmac-sha224-cbc-des-"
18287 + .cra_blocksize = DES_BLOCK_SIZE,
18289 + .setkey = aead_setkey,
18290 + .setauthsize = aead_setauthsize,
18291 + .encrypt = aead_encrypt,
18292 + .decrypt = aead_decrypt,
18293 + .ivsize = DES_BLOCK_SIZE,
18294 + .maxauthsize = SHA224_DIGEST_SIZE,
18297 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18298 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18299 + OP_ALG_AAI_HMAC_PRECOMP,
18306 + .cra_name = "authenc(hmac(sha256),cbc(des))",
18307 + .cra_driver_name = "authenc-hmac-sha256-"
18308 + "cbc-des-caam-qi2",
18309 + .cra_blocksize = DES_BLOCK_SIZE,
18311 + .setkey = aead_setkey,
18312 + .setauthsize = aead_setauthsize,
18313 + .encrypt = aead_encrypt,
18314 + .decrypt = aead_decrypt,
18315 + .ivsize = DES_BLOCK_SIZE,
18316 + .maxauthsize = SHA256_DIGEST_SIZE,
18319 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18320 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18321 + OP_ALG_AAI_HMAC_PRECOMP,
18327 + .cra_name = "echainiv(authenc(hmac(sha256),"
18329 + .cra_driver_name = "echainiv-authenc-"
18330 + "hmac-sha256-cbc-desi-"
18332 + .cra_blocksize = DES_BLOCK_SIZE,
18334 + .setkey = aead_setkey,
18335 + .setauthsize = aead_setauthsize,
18336 + .encrypt = aead_encrypt,
18337 + .decrypt = aead_decrypt,
18338 + .ivsize = DES_BLOCK_SIZE,
18339 + .maxauthsize = SHA256_DIGEST_SIZE,
18342 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18343 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18344 + OP_ALG_AAI_HMAC_PRECOMP,
18351 + .cra_name = "authenc(hmac(sha384),cbc(des))",
18352 + .cra_driver_name = "authenc-hmac-sha384-"
18353 + "cbc-des-caam-qi2",
18354 + .cra_blocksize = DES_BLOCK_SIZE,
18356 + .setkey = aead_setkey,
18357 + .setauthsize = aead_setauthsize,
18358 + .encrypt = aead_encrypt,
18359 + .decrypt = aead_decrypt,
18360 + .ivsize = DES_BLOCK_SIZE,
18361 + .maxauthsize = SHA384_DIGEST_SIZE,
18364 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18365 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18366 + OP_ALG_AAI_HMAC_PRECOMP,
18372 + .cra_name = "echainiv(authenc(hmac(sha384),"
18374 + .cra_driver_name = "echainiv-authenc-"
18375 + "hmac-sha384-cbc-des-"
18377 + .cra_blocksize = DES_BLOCK_SIZE,
18379 + .setkey = aead_setkey,
18380 + .setauthsize = aead_setauthsize,
18381 + .encrypt = aead_encrypt,
18382 + .decrypt = aead_decrypt,
18383 + .ivsize = DES_BLOCK_SIZE,
18384 + .maxauthsize = SHA384_DIGEST_SIZE,
18387 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18388 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18389 + OP_ALG_AAI_HMAC_PRECOMP,
18396 + .cra_name = "authenc(hmac(sha512),cbc(des))",
18397 + .cra_driver_name = "authenc-hmac-sha512-"
18398 + "cbc-des-caam-qi2",
18399 + .cra_blocksize = DES_BLOCK_SIZE,
18401 + .setkey = aead_setkey,
18402 + .setauthsize = aead_setauthsize,
18403 + .encrypt = aead_encrypt,
18404 + .decrypt = aead_decrypt,
18405 + .ivsize = DES_BLOCK_SIZE,
18406 + .maxauthsize = SHA512_DIGEST_SIZE,
18409 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18410 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18411 + OP_ALG_AAI_HMAC_PRECOMP,
18417 + .cra_name = "echainiv(authenc(hmac(sha512),"
18419 + .cra_driver_name = "echainiv-authenc-"
18420 + "hmac-sha512-cbc-des-"
18422 + .cra_blocksize = DES_BLOCK_SIZE,
18424 + .setkey = aead_setkey,
18425 + .setauthsize = aead_setauthsize,
18426 + .encrypt = aead_encrypt,
18427 + .decrypt = aead_decrypt,
18428 + .ivsize = DES_BLOCK_SIZE,
18429 + .maxauthsize = SHA512_DIGEST_SIZE,
18432 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18433 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18434 + OP_ALG_AAI_HMAC_PRECOMP,
18441 + .cra_name = "authenc(hmac(md5),"
18442 + "rfc3686(ctr(aes)))",
18443 + .cra_driver_name = "authenc-hmac-md5-"
18444 + "rfc3686-ctr-aes-caam-qi2",
18445 + .cra_blocksize = 1,
18447 + .setkey = aead_setkey,
18448 + .setauthsize = aead_setauthsize,
18449 + .encrypt = aead_encrypt,
18450 + .decrypt = aead_decrypt,
18451 + .ivsize = CTR_RFC3686_IV_SIZE,
18452 + .maxauthsize = MD5_DIGEST_SIZE,
18455 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18456 + OP_ALG_AAI_CTR_MOD128,
18457 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18458 + OP_ALG_AAI_HMAC_PRECOMP,
18465 + .cra_name = "seqiv(authenc("
18466 + "hmac(md5),rfc3686(ctr(aes))))",
18467 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
18468 + "rfc3686-ctr-aes-caam-qi2",
18469 + .cra_blocksize = 1,
18471 + .setkey = aead_setkey,
18472 + .setauthsize = aead_setauthsize,
18473 + .encrypt = aead_encrypt,
18474 + .decrypt = aead_decrypt,
18475 + .ivsize = CTR_RFC3686_IV_SIZE,
18476 + .maxauthsize = MD5_DIGEST_SIZE,
18479 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18480 + OP_ALG_AAI_CTR_MOD128,
18481 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18482 + OP_ALG_AAI_HMAC_PRECOMP,
18490 + .cra_name = "authenc(hmac(sha1),"
18491 + "rfc3686(ctr(aes)))",
18492 + .cra_driver_name = "authenc-hmac-sha1-"
18493 + "rfc3686-ctr-aes-caam-qi2",
18494 + .cra_blocksize = 1,
18496 + .setkey = aead_setkey,
18497 + .setauthsize = aead_setauthsize,
18498 + .encrypt = aead_encrypt,
18499 + .decrypt = aead_decrypt,
18500 + .ivsize = CTR_RFC3686_IV_SIZE,
18501 + .maxauthsize = SHA1_DIGEST_SIZE,
18504 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18505 + OP_ALG_AAI_CTR_MOD128,
18506 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18507 + OP_ALG_AAI_HMAC_PRECOMP,
18514 + .cra_name = "seqiv(authenc("
18515 + "hmac(sha1),rfc3686(ctr(aes))))",
18516 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18517 + "rfc3686-ctr-aes-caam-qi2",
18518 + .cra_blocksize = 1,
18520 + .setkey = aead_setkey,
18521 + .setauthsize = aead_setauthsize,
18522 + .encrypt = aead_encrypt,
18523 + .decrypt = aead_decrypt,
18524 + .ivsize = CTR_RFC3686_IV_SIZE,
18525 + .maxauthsize = SHA1_DIGEST_SIZE,
18528 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18529 + OP_ALG_AAI_CTR_MOD128,
18530 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18531 + OP_ALG_AAI_HMAC_PRECOMP,
18539 + .cra_name = "authenc(hmac(sha224),"
18540 + "rfc3686(ctr(aes)))",
18541 + .cra_driver_name = "authenc-hmac-sha224-"
18542 + "rfc3686-ctr-aes-caam-qi2",
18543 + .cra_blocksize = 1,
18545 + .setkey = aead_setkey,
18546 + .setauthsize = aead_setauthsize,
18547 + .encrypt = aead_encrypt,
18548 + .decrypt = aead_decrypt,
18549 + .ivsize = CTR_RFC3686_IV_SIZE,
18550 + .maxauthsize = SHA224_DIGEST_SIZE,
18553 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18554 + OP_ALG_AAI_CTR_MOD128,
18555 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18556 + OP_ALG_AAI_HMAC_PRECOMP,
18563 + .cra_name = "seqiv(authenc("
18564 + "hmac(sha224),rfc3686(ctr(aes))))",
18565 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18566 + "rfc3686-ctr-aes-caam-qi2",
18567 + .cra_blocksize = 1,
18569 + .setkey = aead_setkey,
18570 + .setauthsize = aead_setauthsize,
18571 + .encrypt = aead_encrypt,
18572 + .decrypt = aead_decrypt,
18573 + .ivsize = CTR_RFC3686_IV_SIZE,
18574 + .maxauthsize = SHA224_DIGEST_SIZE,
18577 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18578 + OP_ALG_AAI_CTR_MOD128,
18579 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18580 + OP_ALG_AAI_HMAC_PRECOMP,
18588 + .cra_name = "authenc(hmac(sha256),"
18589 + "rfc3686(ctr(aes)))",
18590 + .cra_driver_name = "authenc-hmac-sha256-"
18591 + "rfc3686-ctr-aes-caam-qi2",
18592 + .cra_blocksize = 1,
18594 + .setkey = aead_setkey,
18595 + .setauthsize = aead_setauthsize,
18596 + .encrypt = aead_encrypt,
18597 + .decrypt = aead_decrypt,
18598 + .ivsize = CTR_RFC3686_IV_SIZE,
18599 + .maxauthsize = SHA256_DIGEST_SIZE,
18602 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18603 + OP_ALG_AAI_CTR_MOD128,
18604 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18605 + OP_ALG_AAI_HMAC_PRECOMP,
18612 + .cra_name = "seqiv(authenc(hmac(sha256),"
18613 + "rfc3686(ctr(aes))))",
18614 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18615 + "rfc3686-ctr-aes-caam-qi2",
18616 + .cra_blocksize = 1,
18618 + .setkey = aead_setkey,
18619 + .setauthsize = aead_setauthsize,
18620 + .encrypt = aead_encrypt,
18621 + .decrypt = aead_decrypt,
18622 + .ivsize = CTR_RFC3686_IV_SIZE,
18623 + .maxauthsize = SHA256_DIGEST_SIZE,
18626 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18627 + OP_ALG_AAI_CTR_MOD128,
18628 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18629 + OP_ALG_AAI_HMAC_PRECOMP,
18637 + .cra_name = "authenc(hmac(sha384),"
18638 + "rfc3686(ctr(aes)))",
18639 + .cra_driver_name = "authenc-hmac-sha384-"
18640 + "rfc3686-ctr-aes-caam-qi2",
18641 + .cra_blocksize = 1,
18643 + .setkey = aead_setkey,
18644 + .setauthsize = aead_setauthsize,
18645 + .encrypt = aead_encrypt,
18646 + .decrypt = aead_decrypt,
18647 + .ivsize = CTR_RFC3686_IV_SIZE,
18648 + .maxauthsize = SHA384_DIGEST_SIZE,
18651 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18652 + OP_ALG_AAI_CTR_MOD128,
18653 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18654 + OP_ALG_AAI_HMAC_PRECOMP,
18661 + .cra_name = "seqiv(authenc(hmac(sha384),"
18662 + "rfc3686(ctr(aes))))",
18663 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
18664 + "rfc3686-ctr-aes-caam-qi2",
18665 + .cra_blocksize = 1,
18667 + .setkey = aead_setkey,
18668 + .setauthsize = aead_setauthsize,
18669 + .encrypt = aead_encrypt,
18670 + .decrypt = aead_decrypt,
18671 + .ivsize = CTR_RFC3686_IV_SIZE,
18672 + .maxauthsize = SHA384_DIGEST_SIZE,
18675 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18676 + OP_ALG_AAI_CTR_MOD128,
18677 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18678 + OP_ALG_AAI_HMAC_PRECOMP,
18686 + .cra_name = "authenc(hmac(sha512),"
18687 + "rfc3686(ctr(aes)))",
18688 + .cra_driver_name = "authenc-hmac-sha512-"
18689 + "rfc3686-ctr-aes-caam-qi2",
18690 + .cra_blocksize = 1,
18692 + .setkey = aead_setkey,
18693 + .setauthsize = aead_setauthsize,
18694 + .encrypt = aead_encrypt,
18695 + .decrypt = aead_decrypt,
18696 + .ivsize = CTR_RFC3686_IV_SIZE,
18697 + .maxauthsize = SHA512_DIGEST_SIZE,
18700 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18701 + OP_ALG_AAI_CTR_MOD128,
18702 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18703 + OP_ALG_AAI_HMAC_PRECOMP,
18710 + .cra_name = "seqiv(authenc(hmac(sha512),"
18711 + "rfc3686(ctr(aes))))",
18712 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
18713 + "rfc3686-ctr-aes-caam-qi2",
18714 + .cra_blocksize = 1,
18716 + .setkey = aead_setkey,
18717 + .setauthsize = aead_setauthsize,
18718 + .encrypt = aead_encrypt,
18719 + .decrypt = aead_decrypt,
18720 + .ivsize = CTR_RFC3686_IV_SIZE,
18721 + .maxauthsize = SHA512_DIGEST_SIZE,
18724 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18725 + OP_ALG_AAI_CTR_MOD128,
18726 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18727 + OP_ALG_AAI_HMAC_PRECOMP,
18735 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
18736 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
18737 + .cra_blocksize = AES_BLOCK_SIZE,
18739 + .setkey = tls_setkey,
18740 + .setauthsize = tls_setauthsize,
18741 + .encrypt = tls_encrypt,
18742 + .decrypt = tls_decrypt,
18743 + .ivsize = AES_BLOCK_SIZE,
18744 + .maxauthsize = SHA1_DIGEST_SIZE,
18747 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18748 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18749 + OP_ALG_AAI_HMAC_PRECOMP,
18754 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
18757 + struct caam_crypto_alg *t_alg;
18758 + struct crypto_alg *alg;
18760 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
18762 + return ERR_PTR(-ENOMEM);
18764 + alg = &t_alg->crypto_alg;
18766 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
18767 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
18768 + template->driver_name);
18769 + alg->cra_module = THIS_MODULE;
18770 + alg->cra_exit = caam_cra_exit;
18771 + alg->cra_priority = CAAM_CRA_PRIORITY;
18772 + alg->cra_blocksize = template->blocksize;
18773 + alg->cra_alignmask = 0;
18774 + alg->cra_ctxsize = sizeof(struct caam_ctx);
18775 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
18777 + switch (template->type) {
18778 + case CRYPTO_ALG_TYPE_GIVCIPHER:
18779 + alg->cra_init = caam_cra_init_ablkcipher;
18780 + alg->cra_type = &crypto_givcipher_type;
18781 + alg->cra_ablkcipher = template->template_ablkcipher;
18783 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
18784 + alg->cra_init = caam_cra_init_ablkcipher;
18785 + alg->cra_type = &crypto_ablkcipher_type;
18786 + alg->cra_ablkcipher = template->template_ablkcipher;
18790 + t_alg->caam.class1_alg_type = template->class1_alg_type;
18791 + t_alg->caam.class2_alg_type = template->class2_alg_type;
18796 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
18798 + struct aead_alg *alg = &t_alg->aead;
18800 + alg->base.cra_module = THIS_MODULE;
18801 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
18802 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
18803 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
18805 + alg->init = caam_cra_init_aead;
18806 + alg->exit = caam_cra_exit_aead;
18809 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
18811 + struct dpaa2_caam_priv_per_cpu *ppriv;
18813 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
18814 + napi_schedule_irqoff(&ppriv->napi);
18817 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
18819 + struct device *dev = priv->dev;
18820 + struct dpaa2_io_notification_ctx *nctx;
18821 + struct dpaa2_caam_priv_per_cpu *ppriv;
18822 + int err, i = 0, cpu;
18824 + for_each_online_cpu(cpu) {
18825 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18826 + ppriv->priv = priv;
18827 + nctx = &ppriv->nctx;
18828 + nctx->is_cdan = 0;
18829 + nctx->id = ppriv->rsp_fqid;
18830 + nctx->desired_cpu = cpu;
18831 + nctx->cb = dpaa2_caam_fqdan_cb;
18833 + /* Register notification callbacks */
18834 + err = dpaa2_io_service_register(NULL, nctx);
18835 + if (unlikely(err)) {
18836 + dev_err(dev, "notification register failed\n");
18841 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
18843 + if (unlikely(!ppriv->store)) {
18844 + dev_err(dev, "dpaa2_io_store_create() failed\n");
18848 + if (++i == priv->num_pairs)
18855 + for_each_online_cpu(cpu) {
18856 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18857 + if (!ppriv->nctx.cb)
18859 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18862 + for_each_online_cpu(cpu) {
18863 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18864 + if (!ppriv->store)
18866 + dpaa2_io_store_destroy(ppriv->store);
18872 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
18874 + struct dpaa2_caam_priv_per_cpu *ppriv;
18877 + for_each_online_cpu(cpu) {
18878 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18879 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
18880 + dpaa2_io_store_destroy(ppriv->store);
18882 + if (++i == priv->num_pairs)
18887 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
18889 + struct dpseci_rx_queue_cfg rx_queue_cfg;
18890 + struct device *dev = priv->dev;
18891 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18892 + struct dpaa2_caam_priv_per_cpu *ppriv;
18893 + int err = 0, i = 0, cpu;
18895 + /* Configure Rx queues */
18896 + for_each_online_cpu(cpu) {
18897 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
18899 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
18900 + DPSECI_QUEUE_OPT_USER_CTX;
18901 + rx_queue_cfg.order_preservation_en = 0;
18902 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
18903 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
18905 + * Rx priority (WQ) doesn't really matter, since we use
18906 + * pull mode, i.e. volatile dequeues from specific FQs
18908 + rx_queue_cfg.dest_cfg.priority = 0;
18909 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
18911 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
18914 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
18919 + if (++i == priv->num_pairs)
18926 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
18928 + struct device *dev = priv->dev;
18930 + if (!priv->cscn_mem)
18933 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
18934 + kfree(priv->cscn_mem);
18937 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
18939 + struct device *dev = priv->dev;
18940 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
18942 + dpaa2_dpseci_congestion_free(priv);
18943 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
18946 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
18947 + const struct dpaa2_fd *fd)
18949 + struct caam_request *req;
18952 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
18953 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
18957 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
18958 + if (unlikely(fd_err))
18959 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
18962 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
18963 + * in FD[ERR] or FD[FRC].
18965 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
18966 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
18967 + DMA_BIDIRECTIONAL);
18968 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
18971 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
18975 + /* Retry while portal is busy */
18977 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
18979 + } while (err == -EBUSY);
18981 + if (unlikely(err))
18982 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
18987 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
18989 + struct dpaa2_dq *dq;
18990 + int cleaned = 0, is_last;
18993 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
18994 + if (unlikely(!dq)) {
18995 + if (unlikely(!is_last)) {
18996 + dev_dbg(ppriv->priv->dev,
18997 + "FQ %d returned no valid frames\n",
18998 + ppriv->rsp_fqid);
19000 + * MUST retry until we get some sort of
19001 + * valid response token (be it "empty dequeue"
19002 + * or a valid frame).
19010 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
19012 + } while (!is_last);
19017 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
19019 + struct dpaa2_caam_priv_per_cpu *ppriv;
19020 + struct dpaa2_caam_priv *priv;
19021 + int err, cleaned = 0, store_cleaned;
19023 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
19024 + priv = ppriv->priv;
19026 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
19030 + store_cleaned = dpaa2_caam_store_consume(ppriv);
19031 + cleaned += store_cleaned;
19033 + if (store_cleaned == 0 ||
19034 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
19037 + /* Try to dequeue some more */
19038 + err = dpaa2_caam_pull_fq(ppriv);
19039 + if (unlikely(err))
19043 + if (cleaned < budget) {
19044 + napi_complete_done(napi, cleaned);
19045 + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
19046 + if (unlikely(err))
19047 + dev_err(priv->dev, "Notification rearm failed: %d\n",
19054 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
19057 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
19058 + struct device *dev = priv->dev;
19062 + * Congestion group feature supported starting with DPSECI API v5.1
19063 + * and only when object has been created with this capability.
19065 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
19066 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
19069 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
19070 + GFP_KERNEL | GFP_DMA);
19071 + if (!priv->cscn_mem)
19074 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
19075 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
19076 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19077 + if (dma_mapping_error(dev, priv->cscn_dma)) {
19078 + dev_err(dev, "Error mapping CSCN memory area\n");
19080 + goto err_dma_map;
19083 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
19084 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
19085 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
19086 + cong_notif_cfg.message_ctx = (u64)priv;
19087 + cong_notif_cfg.message_iova = priv->cscn_dma;
19088 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
19089 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
19090 + DPSECI_CGN_MODE_COHERENT_WRITE;
19092 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
19093 + &cong_notif_cfg);
19095 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
19096 + goto err_set_cong;
19102 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
19104 + kfree(priv->cscn_mem);
19109 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
19111 + struct device *dev = &ls_dev->dev;
19112 + struct dpaa2_caam_priv *priv;
19113 + struct dpaa2_caam_priv_per_cpu *ppriv;
19117 + priv = dev_get_drvdata(dev);
19120 + priv->dpsec_id = ls_dev->obj_desc.id;
19122 + /* Get a handle for the DPSECI this interface is associate with */
19123 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
19125 + dev_err(dev, "dpsec_open() failed: %d\n", err);
19129 + dev_info(dev, "Opened dpseci object successfully\n");
19131 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
19132 + &priv->minor_ver);
19134 + dev_err(dev, "dpseci_get_api_version() failed\n");
19135 + goto err_get_vers;
19138 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
19139 + &priv->dpseci_attr);
19141 + dev_err(dev, "dpseci_get_attributes() failed\n");
19142 + goto err_get_vers;
19145 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
19146 + &priv->sec_attr);
19148 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
19149 + goto err_get_vers;
19152 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
19154 + dev_err(dev, "setup_congestion() failed\n");
19155 + goto err_get_vers;
19158 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
19159 + priv->dpseci_attr.num_tx_queues);
19160 + if (priv->num_pairs > num_online_cpus()) {
19161 + dev_warn(dev, "%d queues won't be used\n",
19162 + priv->num_pairs - num_online_cpus());
19163 + priv->num_pairs = num_online_cpus();
19166 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
19167 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19168 + &priv->rx_queue_attr[i]);
19170 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
19171 + goto err_get_rx_queue;
19175 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
19176 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
19177 + &priv->tx_queue_attr[i]);
19179 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
19180 + goto err_get_rx_queue;
19185 + for_each_online_cpu(cpu) {
19186 + dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
19187 + priv->rx_queue_attr[i].fqid,
19188 + priv->tx_queue_attr[i].fqid);
19190 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
19191 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
19192 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
19195 + ppriv->net_dev.dev = *dev;
19196 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
19197 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
19198 + DPAA2_CAAM_NAPI_WEIGHT);
19199 + if (++i == priv->num_pairs)
19206 + dpaa2_dpseci_congestion_free(priv);
19208 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
19213 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
19215 + struct device *dev = priv->dev;
19216 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19217 + struct dpaa2_caam_priv_per_cpu *ppriv;
19220 + for (i = 0; i < priv->num_pairs; i++) {
19221 + ppriv = per_cpu_ptr(priv->ppriv, i);
19222 + napi_enable(&ppriv->napi);
19225 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
19227 + dev_err(dev, "dpseci_enable() failed\n");
19231 + dev_info(dev, "DPSECI version %d.%d\n",
19233 + priv->minor_ver);
19238 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
19240 + struct device *dev = priv->dev;
19241 + struct dpaa2_caam_priv_per_cpu *ppriv;
19242 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
19243 + int i, err = 0, enabled;
19245 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
19247 + dev_err(dev, "dpseci_disable() failed\n");
19251 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
19253 + dev_err(dev, "dpseci_is_enabled() failed\n");
19257 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
19259 + for (i = 0; i < priv->num_pairs; i++) {
19260 + ppriv = per_cpu_ptr(priv->ppriv, i);
19261 + napi_disable(&ppriv->napi);
19262 + netif_napi_del(&ppriv->napi);
19268 +static struct list_head alg_list;
19270 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
19272 + struct device *dev;
19273 + struct dpaa2_caam_priv *priv;
19275 + bool registered = false;
19278 + * There is no way to get CAAM endianness - there is no direct register
19279 + * space access and MC f/w does not provide this attribute.
19280 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
19283 + caam_little_end = true;
19285 + caam_imx = false;
19287 + dev = &dpseci_dev->dev;
19289 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
19293 + dev_set_drvdata(dev, priv);
19295 + priv->domain = iommu_get_domain_for_dev(dev);
19297 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
19298 + 0, SLAB_CACHE_DMA, NULL);
19300 + dev_err(dev, "Can't allocate SEC cache\n");
19302 + goto err_qicache;
19305 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
19307 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
19308 + goto err_dma_mask;
19311 + /* Obtain a MC portal */
19312 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
19314 + dev_err(dev, "MC portal allocation failed\n");
19315 + goto err_dma_mask;
19318 + priv->ppriv = alloc_percpu(*priv->ppriv);
19319 + if (!priv->ppriv) {
19320 + dev_err(dev, "alloc_percpu() failed\n");
19321 + goto err_alloc_ppriv;
19324 + /* DPSECI initialization */
19325 + err = dpaa2_dpseci_setup(dpseci_dev);
19327 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
19328 + goto err_dpseci_setup;
19332 + err = dpaa2_dpseci_dpio_setup(priv);
19334 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
19335 + goto err_dpio_setup;
19338 + /* DPSECI binding to DPIO */
19339 + err = dpaa2_dpseci_bind(priv);
19341 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
19345 + /* DPSECI enable */
19346 + err = dpaa2_dpseci_enable(priv);
19348 + dev_err(dev, "dpaa2_dpseci_enable() failed");
19352 + /* register crypto algorithms the device supports */
19353 + INIT_LIST_HEAD(&alg_list);
19354 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
19355 + struct caam_crypto_alg *t_alg;
19356 + struct caam_alg_template *alg = driver_algs + i;
19357 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
19359 + /* Skip DES algorithms if not supported by device */
19360 + if (!priv->sec_attr.des_acc_num &&
19361 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
19362 + (alg_sel == OP_ALG_ALGSEL_DES)))
19365 + /* Skip AES algorithms if not supported by device */
19366 + if (!priv->sec_attr.aes_acc_num &&
19367 + (alg_sel == OP_ALG_ALGSEL_AES))
19370 + t_alg = caam_alg_alloc(alg);
19371 + if (IS_ERR(t_alg)) {
19372 + err = PTR_ERR(t_alg);
19373 + dev_warn(dev, "%s alg allocation failed: %d\n",
19374 + alg->driver_name, err);
19377 + t_alg->caam.dev = dev;
19379 + err = crypto_register_alg(&t_alg->crypto_alg);
19381 + dev_warn(dev, "%s alg registration failed: %d\n",
19382 + t_alg->crypto_alg.cra_driver_name, err);
19387 + list_add_tail(&t_alg->entry, &alg_list);
19388 + registered = true;
19391 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19392 + struct caam_aead_alg *t_alg = driver_aeads + i;
19393 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
19394 + OP_ALG_ALGSEL_MASK;
19395 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
19396 + OP_ALG_ALGSEL_MASK;
19398 + /* Skip DES algorithms if not supported by device */
19399 + if (!priv->sec_attr.des_acc_num &&
19400 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
19401 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
19404 + /* Skip AES algorithms if not supported by device */
19405 + if (!priv->sec_attr.aes_acc_num &&
19406 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
19410 + * Skip algorithms requiring message digests
19411 + * if MD not supported by device.
19413 + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
19416 + t_alg->caam.dev = dev;
19417 + caam_aead_alg_init(t_alg);
19419 + err = crypto_register_aead(&t_alg->aead);
19421 + dev_warn(dev, "%s alg registration failed: %d\n",
19422 + t_alg->aead.base.cra_driver_name, err);
19426 + t_alg->registered = true;
19427 + registered = true;
19430 + dev_info(dev, "algorithms registered in /proc/crypto\n");
19435 + dpaa2_dpseci_dpio_free(priv);
19437 + dpaa2_dpseci_free(priv);
19439 + free_percpu(priv->ppriv);
19441 + fsl_mc_portal_free(priv->mc_io);
19443 + kmem_cache_destroy(qi_cache);
19445 + dev_set_drvdata(dev, NULL);
19450 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
19452 + struct device *dev;
19453 + struct dpaa2_caam_priv *priv;
19456 + dev = &ls_dev->dev;
19457 + priv = dev_get_drvdata(dev);
19459 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
19460 + struct caam_aead_alg *t_alg = driver_aeads + i;
19462 + if (t_alg->registered)
19463 + crypto_unregister_aead(&t_alg->aead);
19466 + if (alg_list.next) {
19467 + struct caam_crypto_alg *t_alg, *n;
19469 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
19470 + crypto_unregister_alg(&t_alg->crypto_alg);
19471 + list_del(&t_alg->entry);
19476 + dpaa2_dpseci_disable(priv);
19477 + dpaa2_dpseci_dpio_free(priv);
19478 + dpaa2_dpseci_free(priv);
19479 + free_percpu(priv->ppriv);
19480 + fsl_mc_portal_free(priv->mc_io);
19481 + dev_set_drvdata(dev, NULL);
19482 + kmem_cache_destroy(qi_cache);
19487 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
19489 + struct dpaa2_fd fd;
19490 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
19491 + int err = 0, i, id;
19494 + return PTR_ERR(req);
19496 + if (priv->cscn_mem) {
19497 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
19499 + DMA_FROM_DEVICE);
19500 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
19501 + dev_dbg_ratelimited(dev, "Dropping request\n");
19506 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
19508 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
19509 + DMA_BIDIRECTIONAL);
19510 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
19511 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
19515 + memset(&fd, 0, sizeof(fd));
19516 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
19517 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
19518 + dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
19519 + dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
19522 + * There is no guarantee that preemption is disabled here,
19523 + * thus take action.
19525 + preempt_disable();
19526 + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
19527 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
19528 + err = dpaa2_io_service_enqueue_fq(NULL,
19529 + priv->tx_queue_attr[id].fqid,
19531 + if (err != -EBUSY)
19534 + preempt_enable();
19536 + if (unlikely(err < 0)) {
19537 + dev_err(dev, "Error enqueuing frame: %d\n", err);
19541 + return -EINPROGRESS;
19544 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
19545 + DMA_BIDIRECTIONAL);
19548 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
19550 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
19552 + .vendor = FSL_MC_VENDOR_FREESCALE,
19553 + .obj_type = "dpseci",
19555 + { .vendor = 0x0 }
19558 +static struct fsl_mc_driver dpaa2_caam_driver = {
19560 + .name = KBUILD_MODNAME,
19561 + .owner = THIS_MODULE,
19563 + .probe = dpaa2_caam_probe,
19564 + .remove = dpaa2_caam_remove,
19565 + .match_id_table = dpaa2_caam_match_id_table
19568 +MODULE_LICENSE("Dual BSD/GPL");
19569 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
19570 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
19572 +module_fsl_mc_driver(dpaa2_caam_driver);
19574 +++ b/drivers/crypto/caam/caamalg_qi2.h
19577 + * Copyright 2015-2016 Freescale Semiconductor Inc.
19578 + * Copyright 2017 NXP
19580 + * Redistribution and use in source and binary forms, with or without
19581 + * modification, are permitted provided that the following conditions are met:
19582 + * * Redistributions of source code must retain the above copyright
19583 + * notice, this list of conditions and the following disclaimer.
19584 + * * Redistributions in binary form must reproduce the above copyright
19585 + * notice, this list of conditions and the following disclaimer in the
19586 + * documentation and/or other materials provided with the distribution.
19587 + * * Neither the names of the above-listed copyright holders nor the
19588 + * names of any contributors may be used to endorse or promote products
19589 + * derived from this software without specific prior written permission.
19592 + * ALTERNATIVELY, this software may be distributed under the terms of the
19593 + * GNU General Public License ("GPL") as published by the Free Software
19594 + * Foundation, either version 2 of that License or (at your option) any
19597 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19598 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19599 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19600 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
19601 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19602 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
19603 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
19604 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
19605 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
19606 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
19607 + * POSSIBILITY OF SUCH DAMAGE.
19610 +#ifndef _CAAMALG_QI2_H_
19611 +#define _CAAMALG_QI2_H_
19613 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
19614 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
19615 +#include <linux/threads.h>
19616 +#include "dpseci.h"
19617 +#include "desc_constr.h"
19619 +#define DPAA2_CAAM_STORE_SIZE 16
19620 +/* NAPI weight *must* be a multiple of the store size. */
19621 +#define DPAA2_CAAM_NAPI_WEIGHT 64
19623 +/* The congestion entrance threshold was chosen so that on LS2088
19624 + * we support the maximum throughput for the available memory
19626 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
19627 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
19630 + * dpaa2_caam_priv - driver private data
19631 + * @dpseci_id: DPSECI object unique ID
19632 + * @major_ver: DPSECI major version
19633 + * @minor_ver: DPSECI minor version
19634 + * @dpseci_attr: DPSECI attributes
19635 + * @sec_attr: SEC engine attributes
19636 + * @rx_queue_attr: array of Rx queue attributes
19637 + * @tx_queue_attr: array of Tx queue attributes
19638 + * @cscn_mem: pointer to memory region containing the
19639 + * dpaa2_cscn struct; it's size is larger than
19640 + * sizeof(struct dpaa2_cscn) to accommodate alignment
19641 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
19642 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
19643 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
19644 + * @dev: device associated with the DPSECI object
19645 + * @mc_io: pointer to MC portal's I/O object
19646 + * @domain: IOMMU domain
19647 + * @ppriv: per CPU pointers to privata data
19649 +struct dpaa2_caam_priv {
19655 + struct dpseci_attr dpseci_attr;
19656 + struct dpseci_sec_attr sec_attr;
19657 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
19658 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
19663 + void *cscn_mem_aligned;
19664 + dma_addr_t cscn_dma;
19666 + struct device *dev;
19667 + struct fsl_mc_io *mc_io;
19668 + struct iommu_domain *domain;
19670 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
19674 + * dpaa2_caam_priv_per_cpu - per CPU private data
19675 + * @napi: napi structure
19676 + * @net_dev: netdev used by napi
19677 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
19678 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
19679 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
19680 + * @nctx: notification context of response FQ
19681 + * @store: where dequeued frames are stored
19682 + * @priv: backpointer to dpaa2_caam_priv
19684 +struct dpaa2_caam_priv_per_cpu {
19685 + struct napi_struct napi;
19686 + struct net_device net_dev;
19690 + struct dpaa2_io_notification_ctx nctx;
19691 + struct dpaa2_io_store *store;
19692 + struct dpaa2_caam_priv *priv;
19696 + * The CAAM QI hardware constructs a job descriptor which points
19697 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
19698 + * When the job descriptor is executed by deco, the whole job
19699 + * descriptor together with shared descriptor gets loaded in
19700 + * deco buffer which is 64 words long (each 32-bit).
19702 + * The job descriptor constructed by QI hardware has layout:
19704 + * HEADER (1 word)
19705 + * Shdesc ptr (1 or 2 words)
19706 + * SEQ_OUT_PTR (1 word)
19707 + * Out ptr (1 or 2 words)
19708 + * Out length (1 word)
19709 + * SEQ_IN_PTR (1 word)
19710 + * In ptr (1 or 2 words)
19711 + * In length (1 word)
19713 + * The shdesc ptr is used to fetch shared descriptor contents
19714 + * into deco buffer.
19716 + * Apart from shdesc contents, the total number of words that
19717 + * get loaded in deco buffer are '8' or '11'. The remaining words
19718 + * in deco buffer can be used for storing shared descriptor.
19720 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
19722 +/* Length of a single buffer in the QI driver memory cache */
19723 +#define CAAM_QI_MEMCACHE_SIZE 512
19726 + * aead_edesc - s/w-extended aead descriptor
19727 + * @src_nents: number of segments in input scatterlist
19728 + * @dst_nents: number of segments in output scatterlist
19729 + * @iv_dma: dma address of iv for checking continuity and link table
19730 + * @qm_sg_bytes: length of dma mapped h/w link table
19731 + * @qm_sg_dma: bus physical mapped address of h/w link table
19732 + * @assoclen_dma: bus physical mapped address of req->assoclen
19733 + * @sgt: the h/w link table
19735 +struct aead_edesc {
19738 + dma_addr_t iv_dma;
19740 + dma_addr_t qm_sg_dma;
19741 + dma_addr_t assoclen_dma;
19742 +#define CAAM_QI_MAX_AEAD_SG \
19743 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
19744 + sizeof(struct dpaa2_sg_entry))
19745 + struct dpaa2_sg_entry sgt[0];
19749 + * tls_edesc - s/w-extended tls descriptor
19750 + * @src_nents: number of segments in input scatterlist
19751 + * @dst_nents: number of segments in output scatterlist
19752 + * @iv_dma: dma address of iv for checking continuity and link table
19753 + * @qm_sg_bytes: length of dma mapped h/w link table
19754 + * @qm_sg_dma: bus physical mapped address of h/w link table
19755 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
19756 + * @dst: pointer to output scatterlist, usefull for unmapping
19757 + * @sgt: the h/w link table
19759 +struct tls_edesc {
19762 + dma_addr_t iv_dma;
19764 + dma_addr_t qm_sg_dma;
19765 + struct scatterlist tmp[2];
19766 + struct scatterlist *dst;
19767 + struct dpaa2_sg_entry sgt[0];
19771 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
19772 + * @src_nents: number of segments in input scatterlist
19773 + * @dst_nents: number of segments in output scatterlist
19774 + * @iv_dma: dma address of iv for checking continuity and link table
19775 + * @qm_sg_bytes: length of dma mapped qm_sg space
19776 + * @qm_sg_dma: I/O virtual address of h/w link table
19777 + * @sgt: the h/w link table
19779 +struct ablkcipher_edesc {
19782 + dma_addr_t iv_dma;
19784 + dma_addr_t qm_sg_dma;
19785 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
19786 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
19787 + sizeof(struct dpaa2_sg_entry))
19788 + struct dpaa2_sg_entry sgt[0];
19792 + * caam_flc - Flow Context (FLC)
19793 + * @flc: Flow Context options
19794 + * @sh_desc: Shared Descriptor
19795 + * @flc_dma: DMA address of the Flow Context
19799 + u32 sh_desc[MAX_SDLEN];
19800 + dma_addr_t flc_dma;
19801 +} ____cacheline_aligned;
19811 + * caam_request - the request structure the driver application should fill while
19812 + * submitting a job to driver.
19813 + * @fd_flt: Frame list table defining input and output
19814 + * fd_flt[0] - FLE pointing to output buffer
19815 + * fd_flt[1] - FLE pointing to input buffer
19816 + * @fd_flt_dma: DMA address for the frame list table
19817 + * @flc: Flow Context
19818 + * @op_type: operation type
19819 + * @cbk: Callback function to invoke when job is completed
19820 + * @ctx: arbit context attached with request by the application
19821 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
19823 +struct caam_request {
19824 + struct dpaa2_fl_entry fd_flt[2];
19825 + dma_addr_t fd_flt_dma;
19826 + struct caam_flc *flc;
19827 + enum optype op_type;
19828 + void (*cbk)(void *ctx, u32 err);
19834 + * dpaa2_caam_enqueue() - enqueue a crypto request
19835 + * @dev: device associated with the DPSECI object
19836 + * @req: pointer to caam_request
19838 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
19840 +#endif /* _CAAMALG_QI2_H_ */
19841 --- a/drivers/crypto/caam/caamhash.c
19842 +++ b/drivers/crypto/caam/caamhash.c
19844 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
19846 /* length of descriptors text */
19847 -#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
19848 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
19849 #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
19850 #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
19851 #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
19852 @@ -103,20 +103,14 @@ struct caam_hash_ctx {
19853 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19854 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19855 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19856 - u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
19857 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
19858 dma_addr_t sh_desc_update_first_dma;
19859 dma_addr_t sh_desc_fin_dma;
19860 dma_addr_t sh_desc_digest_dma;
19861 - dma_addr_t sh_desc_finup_dma;
19862 struct device *jrdev;
19865 u8 key[CAAM_MAX_HASH_KEY_SIZE];
19866 - dma_addr_t key_dma;
19868 - unsigned int split_key_len;
19869 - unsigned int split_key_pad_len;
19870 + struct alginfo adata;
19874 @@ -143,6 +137,31 @@ struct caam_export_state {
19875 int (*finup)(struct ahash_request *req);
19878 +static inline void switch_buf(struct caam_hash_state *state)
19880 + state->current_buf ^= 1;
19883 +static inline u8 *current_buf(struct caam_hash_state *state)
19885 + return state->current_buf ? state->buf_1 : state->buf_0;
19888 +static inline u8 *alt_buf(struct caam_hash_state *state)
19890 + return state->current_buf ? state->buf_0 : state->buf_1;
19893 +static inline int *current_buflen(struct caam_hash_state *state)
19895 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
19898 +static inline int *alt_buflen(struct caam_hash_state *state)
19900 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
19903 /* Common job descriptor seq in/out ptr routines */
19905 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
19906 @@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr
19910 -/* Map current buffer in state and put it in link table */
19911 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
19912 - struct sec4_sg_entry *sec4_sg,
19913 - u8 *buf, int buflen)
19914 +/* Map current buffer in state (if length > 0) and put it in link table */
19915 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
19916 + struct sec4_sg_entry *sec4_sg,
19917 + struct caam_hash_state *state)
19919 - dma_addr_t buf_dma;
19920 + int buflen = *current_buflen(state);
19922 - buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
19923 - dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
19929 + state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
19931 + if (dma_mapping_error(jrdev, state->buf_dma)) {
19932 + dev_err(jrdev, "unable to map buf\n");
19933 + state->buf_dma = 0;
19938 - * Only put buffer in link table if it contains data, which is possible,
19939 - * since a buffer has previously been used, and needs to be unmapped,
19941 -static inline dma_addr_t
19942 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
19943 - u8 *buf, dma_addr_t buf_dma, int buflen,
19946 - if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
19947 - dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
19949 - buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
19952 + dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
19958 /* Map state->caam_ctx, and add it to link table */
19959 @@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32
19963 -/* Common shared descriptor commands */
19964 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
19966 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
19967 - ctx->split_key_len, CLASS_2 |
19968 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
19971 -/* Append key if it has been set */
19972 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
19974 - u32 *key_jump_cmd;
19976 - init_sh_desc(desc, HDR_SHARE_SERIAL);
19978 - if (ctx->split_key_len) {
19979 - /* Skip if already shared */
19980 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
19983 - append_key_ahash(desc, ctx);
19985 - set_jump_tgt_here(desc, key_jump_cmd);
19988 - /* Propagate errors from shared to job descriptor */
19989 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
19993 - * For ahash read data from seqin following state->caam_ctx,
19994 - * and write resulting class2 context to seqout, which may be state->caam_ctx
19996 + * For ahash update, final and finup (import_ctx = true)
19997 + * import context, read and write to seqout
19998 + * For ahash firsts and digest (import_ctx = false)
19999 + * read and write to seqout
20001 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
20002 +static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
20003 + struct caam_hash_ctx *ctx, bool import_ctx)
20005 - /* Calculate remaining bytes to read */
20006 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20007 + u32 op = ctx->adata.algtype;
20008 + u32 *skip_key_load;
20010 - /* Read remaining bytes */
20011 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20012 - FIFOLD_TYPE_MSG | KEY_VLF);
20013 + init_sh_desc(desc, HDR_SHARE_SERIAL);
20015 - /* Store class2 context bytes */
20016 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20017 - LDST_SRCDST_BYTE_CONTEXT);
20019 + /* Append key if it has been set; ahash update excluded */
20020 + if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
20021 + /* Skip key loading if already shared */
20022 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
20026 - * For ahash update, final and finup, import context, read and write to seqout
20028 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
20030 - struct caam_hash_ctx *ctx)
20032 - init_sh_desc_key_ahash(desc, ctx);
20034 - /* Import context from software */
20035 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20036 - LDST_CLASS_2_CCB | ctx->ctx_len);
20037 + append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
20038 + ctx->adata.keylen, CLASS_2 |
20039 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
20041 - /* Class 2 operation */
20042 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
20043 + set_jump_tgt_here(desc, skip_key_load);
20046 - * Load from buf and/or src and write to req->result or state->context
20048 - ahash_append_load_str(desc, digestsize);
20050 + op |= OP_ALG_AAI_HMAC_PRECOMP;
20053 -/* For ahash firsts and digest, read and write to seqout */
20054 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
20055 - int digestsize, struct caam_hash_ctx *ctx)
20057 - init_sh_desc_key_ahash(desc, ctx);
20058 + /* If needed, import context from software */
20060 + append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
20061 + LDST_SRCDST_BYTE_CONTEXT);
20063 /* Class 2 operation */
20064 append_operation(desc, op | state | OP_ALG_ENCRYPT);
20067 * Load from buf and/or src and write to req->result or state->context
20068 + * Calculate remaining bytes to read
20070 - ahash_append_load_str(desc, digestsize);
20071 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
20072 + /* Read remaining bytes */
20073 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
20074 + FIFOLD_TYPE_MSG | KEY_VLF);
20075 + /* Store class2 context bytes */
20076 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
20077 + LDST_SRCDST_BYTE_CONTEXT);
20080 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
20081 @@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct cryp
20082 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20083 int digestsize = crypto_ahash_digestsize(ahash);
20084 struct device *jrdev = ctx->jrdev;
20085 - u32 have_key = 0;
20088 - if (ctx->split_key_len)
20089 - have_key = OP_ALG_AAI_HMAC_PRECOMP;
20091 /* ahash_update shared descriptor */
20092 desc = ctx->sh_desc_update;
20094 - init_sh_desc(desc, HDR_SHARE_SERIAL);
20096 - /* Import context from software */
20097 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
20098 - LDST_CLASS_2_CCB | ctx->ctx_len);
20100 - /* Class 2 operation */
20101 - append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
20104 - /* Load data and write to result or context */
20105 - ahash_append_load_str(desc, ctx->ctx_len);
20107 - ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20109 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
20110 - dev_err(jrdev, "unable to map shared descriptor\n");
20113 + ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
20114 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
20115 + desc_bytes(desc), DMA_TO_DEVICE);
20117 print_hex_dump(KERN_ERR,
20118 "ahash update shdesc@"__stringify(__LINE__)": ",
20119 @@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct cryp
20121 /* ahash_update_first shared descriptor */
20122 desc = ctx->sh_desc_update_first;
20124 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
20125 - ctx->ctx_len, ctx);
20127 - ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
20128 - desc_bytes(desc),
20130 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
20131 - dev_err(jrdev, "unable to map shared descriptor\n");
20134 + ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
20135 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
20136 + desc_bytes(desc), DMA_TO_DEVICE);
20138 print_hex_dump(KERN_ERR,
20139 "ahash update first shdesc@"__stringify(__LINE__)": ",
20140 @@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct cryp
20142 /* ahash_final shared descriptor */
20143 desc = ctx->sh_desc_fin;
20145 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20146 - OP_ALG_AS_FINALIZE, digestsize, ctx);
20148 - ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20150 - if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
20151 - dev_err(jrdev, "unable to map shared descriptor\n");
20154 + ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
20155 + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
20156 + desc_bytes(desc), DMA_TO_DEVICE);
20158 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
20159 DUMP_PREFIX_ADDRESS, 16, 4, desc,
20160 desc_bytes(desc), 1);
20163 - /* ahash_finup shared descriptor */
20164 - desc = ctx->sh_desc_finup;
20166 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
20167 - OP_ALG_AS_FINALIZE, digestsize, ctx);
20169 - ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
20171 - if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
20172 - dev_err(jrdev, "unable to map shared descriptor\n");
20176 - print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
20177 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
20178 - desc_bytes(desc), 1);
20181 /* ahash_digest shared descriptor */
20182 desc = ctx->sh_desc_digest;
20184 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
20185 - digestsize, ctx);
20187 - ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
20188 - desc_bytes(desc),
20190 - if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
20191 - dev_err(jrdev, "unable to map shared descriptor\n");
20194 + ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
20195 + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
20196 + desc_bytes(desc), DMA_TO_DEVICE);
20198 print_hex_dump(KERN_ERR,
20199 "ahash digest shdesc@"__stringify(__LINE__)": ",
20200 @@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct cryp
20204 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20207 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
20208 - ctx->split_key_pad_len, key_in, keylen,
20212 /* Digest hash size if it is too large */
20213 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
20214 u32 *keylen, u8 *key_out, u32 digestsize)
20215 @@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_h
20218 /* Job descriptor to perform unkeyed hash on key_in */
20219 - append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
20220 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
20221 OP_ALG_AS_INITFINAL);
20222 append_seq_in_ptr(desc, src_dma, *keylen, 0);
20223 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
20224 @@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_h
20225 static int ahash_setkey(struct crypto_ahash *ahash,
20226 const u8 *key, unsigned int keylen)
20228 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
20229 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
20230 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20231 - struct device *jrdev = ctx->jrdev;
20232 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
20233 int digestsize = crypto_ahash_digestsize(ahash);
20235 @@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ah
20239 - /* Pick class 2 key length from algorithm submask */
20240 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20241 - OP_ALG_ALGSEL_SHIFT] * 2;
20242 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
20245 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
20246 - ctx->split_key_len, ctx->split_key_pad_len);
20247 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
20248 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
20251 - ret = gen_split_hash_key(ctx, key, keylen);
20252 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
20253 + CAAM_MAX_HASH_KEY_SIZE);
20257 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
20259 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
20260 - dev_err(jrdev, "unable to map key i/o memory\n");
20262 - goto error_free_key;
20265 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
20266 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
20267 - ctx->split_key_pad_len, 1);
20268 + ctx->adata.keylen_pad, 1);
20271 - ret = ahash_set_sh_desc(ahash);
20273 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
20279 + return ahash_set_sh_desc(ahash);
20282 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
20283 @@ -604,6 +482,8 @@ static inline void ahash_unmap(struct de
20284 struct ahash_edesc *edesc,
20285 struct ahash_request *req, int dst_len)
20287 + struct caam_hash_state *state = ahash_request_ctx(req);
20289 if (edesc->src_nents)
20290 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
20291 if (edesc->dst_dma)
20292 @@ -612,6 +492,12 @@ static inline void ahash_unmap(struct de
20293 if (edesc->sec4_sg_bytes)
20294 dma_unmap_single(dev, edesc->sec4_sg_dma,
20295 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
20297 + if (state->buf_dma) {
20298 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
20300 + state->buf_dma = 0;
20304 static inline void ahash_unmap_ctx(struct device *dev,
20305 @@ -643,8 +529,7 @@ static void ahash_done(struct device *jr
20306 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20309 - edesc = (struct ahash_edesc *)((char *)desc -
20310 - offsetof(struct ahash_edesc, hw_desc));
20311 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20313 caam_jr_strstatus(jrdev, err);
20315 @@ -671,19 +556,19 @@ static void ahash_done_bi(struct device
20316 struct ahash_edesc *edesc;
20317 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20318 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20320 struct caam_hash_state *state = ahash_request_ctx(req);
20322 int digestsize = crypto_ahash_digestsize(ahash);
20324 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20327 - edesc = (struct ahash_edesc *)((char *)desc -
20328 - offsetof(struct ahash_edesc, hw_desc));
20329 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20331 caam_jr_strstatus(jrdev, err);
20333 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
20334 + switch_buf(state);
20338 @@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct de
20339 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20342 - edesc = (struct ahash_edesc *)((char *)desc -
20343 - offsetof(struct ahash_edesc, hw_desc));
20344 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20346 caam_jr_strstatus(jrdev, err);
20348 @@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct de
20349 struct ahash_edesc *edesc;
20350 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20351 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20353 struct caam_hash_state *state = ahash_request_ctx(req);
20355 int digestsize = crypto_ahash_digestsize(ahash);
20357 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
20360 - edesc = (struct ahash_edesc *)((char *)desc -
20361 - offsetof(struct ahash_edesc, hw_desc));
20362 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
20364 caam_jr_strstatus(jrdev, err);
20366 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
20367 + switch_buf(state);
20371 @@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash
20372 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20373 struct caam_hash_state *state = ahash_request_ctx(req);
20374 struct device *jrdev = ctx->jrdev;
20375 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20376 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20377 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20378 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20379 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20380 - int *next_buflen = state->current_buf ? &state->buflen_0 :
20381 - &state->buflen_1, last_buflen;
20382 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20383 + GFP_KERNEL : GFP_ATOMIC;
20384 + u8 *buf = current_buf(state);
20385 + int *buflen = current_buflen(state);
20386 + u8 *next_buf = alt_buf(state);
20387 + int *next_buflen = alt_buflen(state), last_buflen;
20388 int in_len = *buflen + req->nbytes, to_hash;
20390 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
20391 @@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash
20395 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
20396 - edesc->sec4_sg + 1,
20397 - buf, state->buf_dma,
20398 - *buflen, last_buflen);
20399 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20403 if (mapped_nents) {
20404 sg_to_sec4_sg_last(req->src, mapped_nents,
20405 @@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash
20409 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20410 - cpu_to_caam32(SEC4_SG_LEN_FIN);
20411 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
20415 - state->current_buf = !state->current_buf;
20417 desc = edesc->hw_desc;
20419 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20420 @@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_
20421 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20422 struct caam_hash_state *state = ahash_request_ctx(req);
20423 struct device *jrdev = ctx->jrdev;
20424 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20425 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20426 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20427 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20428 - int last_buflen = state->current_buf ? state->buflen_0 :
20430 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20431 + GFP_KERNEL : GFP_ATOMIC;
20432 + int buflen = *current_buflen(state);
20434 int sec4_sg_bytes, sec4_sg_src_index;
20435 int digestsize = crypto_ahash_digestsize(ahash);
20436 @@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_
20440 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20441 - buf, state->buf_dma, buflen,
20443 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
20444 - cpu_to_caam32(SEC4_SG_LEN_FIN);
20445 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20449 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
20451 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20452 sec4_sg_bytes, DMA_TO_DEVICE);
20453 @@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_
20454 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20455 struct caam_hash_state *state = ahash_request_ctx(req);
20456 struct device *jrdev = ctx->jrdev;
20457 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20458 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20459 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20460 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20461 - int last_buflen = state->current_buf ? state->buflen_0 :
20463 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20464 + GFP_KERNEL : GFP_ATOMIC;
20465 + int buflen = *current_buflen(state);
20467 int sec4_sg_src_index;
20468 int src_nents, mapped_nents;
20469 @@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_
20471 /* allocate space for base edesc and hw desc commands, link tables */
20472 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
20473 - ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
20474 + ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
20477 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
20478 @@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_
20482 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
20483 - buf, state->buf_dma, buflen,
20485 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
20489 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
20490 sec4_sg_src_index, ctx->ctx_len + buflen,
20491 @@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_req
20493 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20494 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20495 + struct caam_hash_state *state = ahash_request_ctx(req);
20496 struct device *jrdev = ctx->jrdev;
20497 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20498 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20499 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20500 + GFP_KERNEL : GFP_ATOMIC;
20502 int digestsize = crypto_ahash_digestsize(ahash);
20503 int src_nents, mapped_nents;
20504 struct ahash_edesc *edesc;
20507 + state->buf_dma = 0;
20509 src_nents = sg_nents_for_len(req->src, req->nbytes);
20510 if (src_nents < 0) {
20511 dev_err(jrdev, "Invalid number of src SG.\n");
20512 @@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct aha
20513 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20514 struct caam_hash_state *state = ahash_request_ctx(req);
20515 struct device *jrdev = ctx->jrdev;
20516 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20517 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20518 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20519 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20520 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20521 + GFP_KERNEL : GFP_ATOMIC;
20522 + u8 *buf = current_buf(state);
20523 + int buflen = *current_buflen(state);
20525 int digestsize = crypto_ahash_digestsize(ahash);
20526 struct ahash_edesc *edesc;
20527 @@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ah
20528 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20529 struct caam_hash_state *state = ahash_request_ctx(req);
20530 struct device *jrdev = ctx->jrdev;
20531 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20532 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20533 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20534 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
20535 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
20536 - int *next_buflen = state->current_buf ? &state->buflen_0 :
20537 - &state->buflen_1;
20538 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20539 + GFP_KERNEL : GFP_ATOMIC;
20540 + u8 *buf = current_buf(state);
20541 + int *buflen = current_buflen(state);
20542 + u8 *next_buf = alt_buf(state);
20543 + int *next_buflen = alt_buflen(state);
20544 int in_len = *buflen + req->nbytes, to_hash;
20545 int sec4_sg_bytes, src_nents, mapped_nents;
20546 struct ahash_edesc *edesc;
20547 @@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ah
20548 edesc->sec4_sg_bytes = sec4_sg_bytes;
20549 edesc->dst_dma = 0;
20551 - state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
20553 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20557 sg_to_sec4_sg_last(req->src, mapped_nents,
20558 edesc->sec4_sg + 1, 0);
20560 @@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ah
20564 - state->current_buf = !state->current_buf;
20566 desc = edesc->hw_desc;
20568 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
20569 @@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct aha
20570 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20571 struct caam_hash_state *state = ahash_request_ctx(req);
20572 struct device *jrdev = ctx->jrdev;
20573 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20574 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20575 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
20576 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
20577 - int last_buflen = state->current_buf ? state->buflen_0 :
20579 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20580 + GFP_KERNEL : GFP_ATOMIC;
20581 + int buflen = *current_buflen(state);
20583 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
20584 int digestsize = crypto_ahash_digestsize(ahash);
20585 @@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct aha
20586 edesc->src_nents = src_nents;
20587 edesc->sec4_sg_bytes = sec4_sg_bytes;
20589 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
20590 - state->buf_dma, buflen,
20592 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
20596 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
20598 @@ -1496,11 +1369,10 @@ static int ahash_update_first(struct aha
20599 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20600 struct caam_hash_state *state = ahash_request_ctx(req);
20601 struct device *jrdev = ctx->jrdev;
20602 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20603 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20604 - u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
20605 - int *next_buflen = state->current_buf ?
20606 - &state->buflen_1 : &state->buflen_0;
20607 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20608 + GFP_KERNEL : GFP_ATOMIC;
20609 + u8 *next_buf = alt_buf(state);
20610 + int *next_buflen = alt_buflen(state);
20613 int src_nents, mapped_nents;
20614 @@ -1582,6 +1454,7 @@ static int ahash_update_first(struct aha
20615 state->final = ahash_final_no_ctx;
20616 scatterwalk_map_and_copy(next_buf, req->src, 0,
20618 + switch_buf(state);
20621 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
20622 @@ -1688,7 +1561,6 @@ struct caam_hash_template {
20623 unsigned int blocksize;
20624 struct ahash_alg template_ahash;
20629 /* ahash descriptors */
20630 @@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_
20633 .alg_type = OP_ALG_ALGSEL_SHA1,
20634 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
20637 .driver_name = "sha224-caam",
20638 @@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_
20641 .alg_type = OP_ALG_ALGSEL_SHA224,
20642 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
20645 .driver_name = "sha256-caam",
20646 @@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_
20649 .alg_type = OP_ALG_ALGSEL_SHA256,
20650 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
20653 .driver_name = "sha384-caam",
20654 @@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_
20657 .alg_type = OP_ALG_ALGSEL_SHA384,
20658 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
20661 .driver_name = "sha512-caam",
20662 @@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_
20665 .alg_type = OP_ALG_ALGSEL_SHA512,
20666 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
20669 .driver_name = "md5-caam",
20670 @@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_
20673 .alg_type = OP_ALG_ALGSEL_MD5,
20674 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
20678 struct caam_hash_alg {
20679 struct list_head entry;
20682 struct ahash_alg ahash_alg;
20685 @@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct cry
20686 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20688 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20689 + dma_addr_t dma_addr;
20692 * Get a Job ring from Job Ring driver to ensure in-order
20693 @@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct cry
20694 pr_err("Job Ring Device allocation for transform failed\n");
20695 return PTR_ERR(ctx->jrdev);
20698 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
20699 + offsetof(struct caam_hash_ctx,
20700 + sh_desc_update_dma),
20701 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20702 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
20703 + dev_err(ctx->jrdev, "unable to map shared descriptors\n");
20704 + caam_jr_free(ctx->jrdev);
20708 + ctx->sh_desc_update_dma = dma_addr;
20709 + ctx->sh_desc_update_first_dma = dma_addr +
20710 + offsetof(struct caam_hash_ctx,
20711 + sh_desc_update_first);
20712 + ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
20714 + ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
20717 /* copy descriptor header template value */
20718 - ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20719 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
20720 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20722 - ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
20723 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
20724 + OP_ALG_ALGSEL_SUBMASK) >>
20725 OP_ALG_ALGSEL_SHIFT];
20727 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20728 @@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct cr
20730 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20732 - if (ctx->sh_desc_update_dma &&
20733 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
20734 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
20735 - desc_bytes(ctx->sh_desc_update),
20737 - if (ctx->sh_desc_update_first_dma &&
20738 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
20739 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
20740 - desc_bytes(ctx->sh_desc_update_first),
20742 - if (ctx->sh_desc_fin_dma &&
20743 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
20744 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
20745 - desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
20746 - if (ctx->sh_desc_digest_dma &&
20747 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
20748 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
20749 - desc_bytes(ctx->sh_desc_digest),
20751 - if (ctx->sh_desc_finup_dma &&
20752 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
20753 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
20754 - desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
20756 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
20757 + offsetof(struct caam_hash_ctx,
20758 + sh_desc_update_dma),
20759 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
20760 caam_jr_free(ctx->jrdev);
20763 @@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_templat
20764 alg->cra_type = &crypto_ahash_type;
20766 t_alg->alg_type = template->alg_type;
20767 - t_alg->alg_op = template->alg_op;
20771 --- a/drivers/crypto/caam/caampkc.c
20772 +++ b/drivers/crypto/caam/caampkc.c
20774 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
20775 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
20776 sizeof(struct rsa_priv_f1_pdb))
20777 +#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
20778 + sizeof(struct rsa_priv_f2_pdb))
20779 +#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
20780 + sizeof(struct rsa_priv_f3_pdb))
20782 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
20783 struct akcipher_request *req)
20784 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct dev
20785 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20788 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
20789 + struct akcipher_request *req)
20791 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20792 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20793 + struct caam_rsa_key *key = &ctx->key;
20794 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20795 + size_t p_sz = key->p_sz;
20796 + size_t q_sz = key->p_sz;
20798 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20799 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20800 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20801 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20802 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20805 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
20806 + struct akcipher_request *req)
20808 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20809 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20810 + struct caam_rsa_key *key = &ctx->key;
20811 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
20812 + size_t p_sz = key->p_sz;
20813 + size_t q_sz = key->p_sz;
20815 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20816 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20817 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
20818 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
20819 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
20820 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20821 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
20824 /* RSA Job Completion handler */
20825 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
20827 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct devi
20828 akcipher_request_complete(req, err);
20831 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
20834 + struct akcipher_request *req = context;
20835 + struct rsa_edesc *edesc;
20838 + caam_jr_strstatus(dev, err);
20840 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20842 + rsa_priv_f2_unmap(dev, edesc, req);
20843 + rsa_io_unmap(dev, edesc, req);
20846 + akcipher_request_complete(req, err);
20849 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
20852 + struct akcipher_request *req = context;
20853 + struct rsa_edesc *edesc;
20856 + caam_jr_strstatus(dev, err);
20858 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
20860 + rsa_priv_f3_unmap(dev, edesc, req);
20861 + rsa_io_unmap(dev, edesc, req);
20864 + akcipher_request_complete(req, err);
20867 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
20870 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc
20871 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20872 struct device *dev = ctx->dev;
20873 struct rsa_edesc *edesc;
20874 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
20875 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
20876 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20877 + GFP_KERNEL : GFP_ATOMIC;
20879 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
20880 int src_nents, dst_nents;
20881 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct ak
20885 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
20886 + struct rsa_edesc *edesc)
20888 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20889 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20890 + struct caam_rsa_key *key = &ctx->key;
20891 + struct device *dev = ctx->dev;
20892 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
20893 + int sec4_sg_index = 0;
20894 + size_t p_sz = key->p_sz;
20895 + size_t q_sz = key->p_sz;
20897 + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
20898 + if (dma_mapping_error(dev, pdb->d_dma)) {
20899 + dev_err(dev, "Unable to map RSA private exponent memory\n");
20903 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
20904 + if (dma_mapping_error(dev, pdb->p_dma)) {
20905 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
20909 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
20910 + if (dma_mapping_error(dev, pdb->q_dma)) {
20911 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
20915 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
20916 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
20917 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
20921 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
20922 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
20923 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
20927 + if (edesc->src_nents > 1) {
20928 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
20929 + pdb->g_dma = edesc->sec4_sg_dma;
20930 + sec4_sg_index += edesc->src_nents;
20932 + pdb->g_dma = sg_dma_address(req->src);
20935 + if (edesc->dst_nents > 1) {
20936 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
20937 + pdb->f_dma = edesc->sec4_sg_dma +
20938 + sec4_sg_index * sizeof(struct sec4_sg_entry);
20940 + pdb->f_dma = sg_dma_address(req->dst);
20943 + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
20944 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
20949 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
20951 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
20953 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
20955 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
20960 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
20961 + struct rsa_edesc *edesc)
20963 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
20964 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
20965 + struct caam_rsa_key *key = &ctx->key;
20966 + struct device *dev = ctx->dev;
20967 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
20968 + int sec4_sg_index = 0;
20969 + size_t p_sz = key->p_sz;
20970 + size_t q_sz = key->p_sz;
20972 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
20973 + if (dma_mapping_error(dev, pdb->p_dma)) {
20974 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
20978 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
20979 + if (dma_mapping_error(dev, pdb->q_dma)) {
20980 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
20984 + pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
20985 + if (dma_mapping_error(dev, pdb->dp_dma)) {
20986 + dev_err(dev, "Unable to map RSA exponent dp memory\n");
20990 + pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
20991 + if (dma_mapping_error(dev, pdb->dq_dma)) {
20992 + dev_err(dev, "Unable to map RSA exponent dq memory\n");
20996 + pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
20997 + if (dma_mapping_error(dev, pdb->c_dma)) {
20998 + dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
21002 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
21003 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
21004 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
21008 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
21009 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
21010 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
21014 + if (edesc->src_nents > 1) {
21015 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
21016 + pdb->g_dma = edesc->sec4_sg_dma;
21017 + sec4_sg_index += edesc->src_nents;
21019 + pdb->g_dma = sg_dma_address(req->src);
21022 + if (edesc->dst_nents > 1) {
21023 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
21024 + pdb->f_dma = edesc->sec4_sg_dma +
21025 + sec4_sg_index * sizeof(struct sec4_sg_entry);
21027 + pdb->f_dma = sg_dma_address(req->dst);
21030 + pdb->sgf |= key->n_sz;
21031 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
21036 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
21038 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
21040 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
21042 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
21044 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
21046 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
21051 static int caam_rsa_enc(struct akcipher_request *req)
21053 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21054 @@ -301,24 +543,14 @@ init_fail:
21058 -static int caam_rsa_dec(struct akcipher_request *req)
21059 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
21061 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21062 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21063 - struct caam_rsa_key *key = &ctx->key;
21064 struct device *jrdev = ctx->dev;
21065 struct rsa_edesc *edesc;
21068 - if (unlikely(!key->n || !key->d))
21071 - if (req->dst_len < key->n_sz) {
21072 - req->dst_len = key->n_sz;
21073 - dev_err(jrdev, "Output buffer length less than parameter n\n");
21074 - return -EOVERFLOW;
21077 /* Allocate extended descriptor */
21078 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
21080 @@ -344,17 +576,147 @@ init_fail:
21084 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
21086 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21087 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21088 + struct device *jrdev = ctx->dev;
21089 + struct rsa_edesc *edesc;
21092 + /* Allocate extended descriptor */
21093 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
21094 + if (IS_ERR(edesc))
21095 + return PTR_ERR(edesc);
21097 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
21098 + ret = set_rsa_priv_f2_pdb(req, edesc);
21102 + /* Initialize Job Descriptor */
21103 + init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
21105 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
21107 + return -EINPROGRESS;
21109 + rsa_priv_f2_unmap(jrdev, edesc, req);
21112 + rsa_io_unmap(jrdev, edesc, req);
21117 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
21119 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21120 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21121 + struct device *jrdev = ctx->dev;
21122 + struct rsa_edesc *edesc;
21125 + /* Allocate extended descriptor */
21126 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
21127 + if (IS_ERR(edesc))
21128 + return PTR_ERR(edesc);
21130 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
21131 + ret = set_rsa_priv_f3_pdb(req, edesc);
21135 + /* Initialize Job Descriptor */
21136 + init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
21138 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
21140 + return -EINPROGRESS;
21142 + rsa_priv_f3_unmap(jrdev, edesc, req);
21145 + rsa_io_unmap(jrdev, edesc, req);
21150 +static int caam_rsa_dec(struct akcipher_request *req)
21152 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
21153 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21154 + struct caam_rsa_key *key = &ctx->key;
21157 + if (unlikely(!key->n || !key->d))
21160 + if (req->dst_len < key->n_sz) {
21161 + req->dst_len = key->n_sz;
21162 + dev_err(ctx->dev, "Output buffer length less than parameter n\n");
21163 + return -EOVERFLOW;
21166 + if (key->priv_form == FORM3)
21167 + ret = caam_rsa_dec_priv_f3(req);
21168 + else if (key->priv_form == FORM2)
21169 + ret = caam_rsa_dec_priv_f2(req);
21171 + ret = caam_rsa_dec_priv_f1(req);
21176 static void caam_rsa_free_key(struct caam_rsa_key *key)
21183 + kzfree(key->qinv);
21184 + kzfree(key->tmp1);
21185 + kzfree(key->tmp2);
21194 + memset(key, 0, sizeof(*key));
21197 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
21199 + while (!**ptr && *nbytes) {
21206 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
21207 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
21208 + * BER-encoding requires that the minimum number of bytes be used to encode the
21209 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
21212 + * @ptr : pointer to {dP, dQ, qInv} CRT member
21213 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
21214 + * @dstlen: length in bytes of corresponding p or q prime factor
21216 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
21220 + caam_rsa_drop_leading_zeros(&ptr, &nbytes);
21224 + dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
21228 + memcpy(dst + (dstlen - nbytes), ptr, nbytes);
21234 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(con
21238 - while (!*buf && *nbytes) {
21242 + caam_rsa_drop_leading_zeros(&buf, nbytes);
21246 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
21248 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct c
21249 unsigned int keylen)
21251 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21252 - struct rsa_key raw_key = {0};
21253 + struct rsa_key raw_key = {NULL};
21254 struct caam_rsa_key *rsa_key = &ctx->key;
21257 @@ -437,11 +798,69 @@ err:
21261 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
21262 + struct rsa_key *raw_key)
21264 + struct caam_rsa_key *rsa_key = &ctx->key;
21265 + size_t p_sz = raw_key->p_sz;
21266 + size_t q_sz = raw_key->q_sz;
21268 + rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
21271 + rsa_key->p_sz = p_sz;
21273 + rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
21276 + rsa_key->q_sz = q_sz;
21278 + rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
21279 + if (!rsa_key->tmp1)
21282 + rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
21283 + if (!rsa_key->tmp2)
21286 + rsa_key->priv_form = FORM2;
21288 + rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
21289 + if (!rsa_key->dp)
21292 + rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
21293 + if (!rsa_key->dq)
21296 + rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
21298 + if (!rsa_key->qinv)
21301 + rsa_key->priv_form = FORM3;
21306 + kzfree(rsa_key->dq);
21308 + kzfree(rsa_key->dp);
21310 + kzfree(rsa_key->tmp2);
21312 + kzfree(rsa_key->tmp1);
21314 + kzfree(rsa_key->q);
21316 + kzfree(rsa_key->p);
21319 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
21320 unsigned int keylen)
21322 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
21323 - struct rsa_key raw_key = {0};
21324 + struct rsa_key raw_key = {NULL};
21325 struct caam_rsa_key *rsa_key = &ctx->key;
21328 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct
21329 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
21330 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
21332 + caam_rsa_set_priv_key_form(ctx, &raw_key);
21337 --- a/drivers/crypto/caam/caampkc.h
21338 +++ b/drivers/crypto/caam/caampkc.h
21339 @@ -13,21 +13,75 @@
21343 + * caam_priv_key_form - CAAM RSA private key representation
21344 + * CAAM RSA private key may have either of three forms.
21346 + * 1. The first representation consists of the pair (n, d), where the
21347 + * components have the following meanings:
21348 + * n the RSA modulus
21349 + * d the RSA private exponent
21351 + * 2. The second representation consists of the triplet (p, q, d), where the
21352 + * components have the following meanings:
21353 + * p the first prime factor of the RSA modulus n
21354 + * q the second prime factor of the RSA modulus n
21355 + * d the RSA private exponent
21357 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
21358 + * where the components have the following meanings:
21359 + * p the first prime factor of the RSA modulus n
21360 + * q the second prime factor of the RSA modulus n
21361 + * dP the first factors's CRT exponent
21362 + * dQ the second factors's CRT exponent
21363 + * qInv the (first) CRT coefficient
21365 + * The benefit of using the third or the second key form is lower computational
21366 + * cost for the decryption and signature operations.
21368 +enum caam_priv_key_form {
21375 * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
21376 * @n : RSA modulus raw byte stream
21377 * @e : RSA public exponent raw byte stream
21378 * @d : RSA private exponent raw byte stream
21379 + * @p : RSA prime factor p of RSA modulus n
21380 + * @q : RSA prime factor q of RSA modulus n
21381 + * @dp : RSA CRT exponent of p
21382 + * @dp : RSA CRT exponent of q
21383 + * @qinv : RSA CRT coefficient
21384 + * @tmp1 : CAAM uses this temporary buffer as internal state buffer.
21385 + * It is assumed to be as long as p.
21386 + * @tmp2 : CAAM uses this temporary buffer as internal state buffer.
21387 + * It is assumed to be as long as q.
21388 * @n_sz : length in bytes of RSA modulus n
21389 * @e_sz : length in bytes of RSA public exponent
21390 * @d_sz : length in bytes of RSA private exponent
21391 + * @p_sz : length in bytes of RSA prime factor p of RSA modulus n
21392 + * @q_sz : length in bytes of RSA prime factor q of RSA modulus n
21393 + * @priv_form : CAAM RSA private key representation
21395 struct caam_rsa_key {
21411 + enum caam_priv_key_form priv_form;
21415 @@ -59,6 +113,8 @@ struct rsa_edesc {
21417 struct rsa_pub_pdb pub;
21418 struct rsa_priv_f1_pdb priv_f1;
21419 + struct rsa_priv_f2_pdb priv_f2;
21420 + struct rsa_priv_f3_pdb priv_f3;
21424 @@ -66,5 +122,7 @@ struct rsa_edesc {
21425 /* Descriptor construction primitives. */
21426 void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
21427 void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
21428 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
21429 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
21432 --- a/drivers/crypto/caam/caamrng.c
21433 +++ b/drivers/crypto/caam/caamrng.c
21436 /* length of descriptors */
21437 #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
21438 -#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
21439 +#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
21441 /* Buffer, its dma address and lock */
21443 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrde
21445 struct buf_data *bd;
21447 - bd = (struct buf_data *)((char *)desc -
21448 - offsetof(struct buf_data, hw_desc));
21449 + bd = container_of(desc, struct buf_data, hw_desc[0]);
21452 caam_jr_strstatus(jrdev, err);
21453 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(str
21455 init_sh_desc(desc, HDR_SHARE_SERIAL);
21457 - /* Propagate errors from shared to job descriptor */
21458 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
21460 /* Generate random bytes */
21461 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
21463 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng
21467 - err = caam_init_buf(ctx, 1);
21472 + return caam_init_buf(ctx, 1);
21475 static struct hwrng caam_rng = {
21476 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
21477 pr_err("Job Ring Device allocation for transform failed\n");
21478 return PTR_ERR(dev);
21480 - rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
21481 + rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
21484 goto free_caam_alloc;
21485 --- a/drivers/crypto/caam/compat.h
21486 +++ b/drivers/crypto/caam/compat.h
21488 #include <linux/of_platform.h>
21489 #include <linux/dma-mapping.h>
21490 #include <linux/io.h>
21491 +#include <linux/iommu.h>
21492 #include <linux/spinlock.h>
21493 #include <linux/rtnetlink.h>
21494 #include <linux/in.h>
21495 --- a/drivers/crypto/caam/ctrl.c
21496 +++ b/drivers/crypto/caam/ctrl.c
21498 * Controller-level driver, kernel property detection, initialization
21500 * Copyright 2008-2012 Freescale Semiconductor, Inc.
21501 + * Copyright 2017 NXP
21504 #include <linux/device.h>
21505 #include <linux/of_address.h>
21506 #include <linux/of_irq.h>
21507 +#include <linux/sys_soc.h>
21509 #include "compat.h"
21511 #include "intern.h"
21513 #include "desc_constr.h"
21514 -#include "error.h"
21517 bool caam_little_end;
21518 EXPORT_SYMBOL(caam_little_end);
21520 +EXPORT_SYMBOL(caam_imx);
21522 +EXPORT_SYMBOL(caam_dpaa2);
21524 +#ifdef CONFIG_CAAM_QI
21529 * i.MX targets tend to have clock control subsystems that can
21530 * enable/disable clocking to our device.
21532 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
21533 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
21536 - return devm_clk_get(dev, clk_name);
21539 static inline struct clk *caam_drv_identify_clk(struct device *dev,
21543 + return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
21548 * Descriptor to instantiate RNG State Handle 0 in normal mode and
21549 @@ -270,7 +271,7 @@ static int deinstantiate_rng(struct devi
21551 * If the corresponding bit is set, then it means the state
21552 * handle was initialized by us, and thus it needs to be
21553 - * deintialized as well
21554 + * deinitialized as well
21556 if ((1 << sh_idx) & state_handle_mask) {
21558 @@ -303,20 +304,24 @@ static int caam_remove(struct platform_d
21559 struct device *ctrldev;
21560 struct caam_drv_private *ctrlpriv;
21561 struct caam_ctrl __iomem *ctrl;
21564 ctrldev = &pdev->dev;
21565 ctrlpriv = dev_get_drvdata(ctrldev);
21566 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
21568 - /* Remove platform devices for JobRs */
21569 - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
21570 - if (ctrlpriv->jrpdev[ring])
21571 - of_device_unregister(ctrlpriv->jrpdev[ring]);
21573 + /* Remove platform devices under the crypto node */
21574 + of_platform_depopulate(ctrldev);
21576 +#ifdef CONFIG_CAAM_QI
21577 + if (ctrlpriv->qidev)
21578 + caam_qi_shutdown(ctrlpriv->qidev);
21581 - /* De-initialize RNG state handles initialized by this driver. */
21582 - if (ctrlpriv->rng4_sh_init)
21584 + * De-initialize RNG state handles initialized by this driver.
21585 + * In case of DPAA 2.x, RNG is managed by MC firmware.
21587 + if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
21588 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
21590 /* Shut down debug views */
21591 @@ -331,8 +336,8 @@ static int caam_remove(struct platform_d
21592 clk_disable_unprepare(ctrlpriv->caam_ipg);
21593 clk_disable_unprepare(ctrlpriv->caam_mem);
21594 clk_disable_unprepare(ctrlpriv->caam_aclk);
21595 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21597 + if (ctrlpriv->caam_emi_slow)
21598 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
21602 @@ -366,11 +371,8 @@ static void kick_trng(struct platform_de
21604 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
21605 >> RTSDCTL_ENT_DLY_SHIFT;
21606 - if (ent_delay <= val) {
21607 - /* put RNG4 into run mode */
21608 - clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
21611 + if (ent_delay <= val)
21614 val = rd_reg32(&r4tst->rtsdctl);
21615 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
21616 @@ -382,15 +384,12 @@ static void kick_trng(struct platform_de
21617 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
21618 /* read the control register */
21619 val = rd_reg32(&r4tst->rtmctl);
21622 * select raw sampling in both entropy shifter
21623 - * and statistical checker
21624 + * and statistical checker; ; put RNG4 into run mode
21626 - clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
21627 - /* put RNG4 into run mode */
21628 - clrsetbits_32(&val, RTMCTL_PRGM, 0);
21629 - /* write back the control register */
21630 - wr_reg32(&r4tst->rtmctl, val);
21631 + clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
21635 @@ -411,28 +410,26 @@ int caam_get_era(void)
21637 EXPORT_SYMBOL(caam_get_era);
21639 -#ifdef CONFIG_DEBUG_FS
21640 -static int caam_debugfs_u64_get(void *data, u64 *val)
21642 - *val = caam64_to_cpu(*(u64 *)data);
21646 -static int caam_debugfs_u32_get(void *data, u64 *val)
21648 - *val = caam32_to_cpu(*(u32 *)data);
21652 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
21653 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
21655 +static const struct of_device_id caam_match[] = {
21657 + .compatible = "fsl,sec-v4.0",
21660 + .compatible = "fsl,sec4.0",
21664 +MODULE_DEVICE_TABLE(of, caam_match);
21666 /* Probe routine for CAAM top (controller) level */
21667 static int caam_probe(struct platform_device *pdev)
21669 - int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21670 + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
21672 + static const struct soc_device_attribute imx_soc[] = {
21673 + {.family = "Freescale i.MX"},
21676 struct device *dev;
21677 struct device_node *nprop, *np;
21678 struct caam_ctrl __iomem *ctrl;
21679 @@ -452,9 +449,10 @@ static int caam_probe(struct platform_de
21682 dev_set_drvdata(dev, ctrlpriv);
21683 - ctrlpriv->pdev = pdev;
21684 nprop = pdev->dev.of_node;
21686 + caam_imx = (bool)soc_device_match(imx_soc);
21688 /* Enable clocking */
21689 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
21691 @@ -483,14 +481,16 @@ static int caam_probe(struct platform_de
21693 ctrlpriv->caam_aclk = clk;
21695 - clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21696 - if (IS_ERR(clk)) {
21697 - ret = PTR_ERR(clk);
21698 - dev_err(&pdev->dev,
21699 - "can't identify CAAM emi_slow clk: %d\n", ret);
21701 + if (!of_machine_is_compatible("fsl,imx6ul")) {
21702 + clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
21703 + if (IS_ERR(clk)) {
21704 + ret = PTR_ERR(clk);
21705 + dev_err(&pdev->dev,
21706 + "can't identify CAAM emi_slow clk: %d\n", ret);
21709 + ctrlpriv->caam_emi_slow = clk;
21711 - ctrlpriv->caam_emi_slow = clk;
21713 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
21715 @@ -511,11 +511,13 @@ static int caam_probe(struct platform_de
21716 goto disable_caam_mem;
21719 - ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21721 - dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21723 - goto disable_caam_aclk;
21724 + if (ctrlpriv->caam_emi_slow) {
21725 + ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
21727 + dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
21729 + goto disable_caam_aclk;
21733 /* Get configuration properties from device tree */
21734 @@ -542,13 +544,13 @@ static int caam_probe(struct platform_de
21736 BLOCK_OFFSET = PG_SIZE_64K;
21738 - ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
21739 - ctrlpriv->assure = (struct caam_assurance __force *)
21740 - ((uint8_t *)ctrl +
21741 + ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
21742 + ctrlpriv->assure = (struct caam_assurance __iomem __force *)
21743 + ((__force uint8_t *)ctrl +
21744 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
21746 - ctrlpriv->deco = (struct caam_deco __force *)
21747 - ((uint8_t *)ctrl +
21748 + ctrlpriv->deco = (struct caam_deco __iomem __force *)
21749 + ((__force uint8_t *)ctrl +
21750 BLOCK_OFFSET * DECO_BLOCK_NUMBER
21753 @@ -557,12 +559,17 @@ static int caam_probe(struct platform_de
21756 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
21757 - * long pointers in master configuration register
21758 + * long pointers in master configuration register.
21759 + * In case of DPAA 2.x, Management Complex firmware performs
21760 + * the configuration.
21762 - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21763 - MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21764 - MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21765 - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
21766 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
21768 + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
21769 + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
21770 + MCFGR_WDENABLE | MCFGR_LARGE_BURST |
21771 + (sizeof(dma_addr_t) == sizeof(u64) ?
21772 + MCFGR_LONG_PTR : 0));
21775 * Read the Compile Time paramters and SCFGR to determine
21776 @@ -590,64 +597,67 @@ static int caam_probe(struct platform_de
21777 JRSTART_JR1_START | JRSTART_JR2_START |
21778 JRSTART_JR3_START);
21780 - if (sizeof(dma_addr_t) == sizeof(u64))
21781 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21782 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21783 + if (sizeof(dma_addr_t) == sizeof(u64)) {
21785 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
21786 + else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
21787 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
21789 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21791 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21794 - * Detect and enable JobRs
21795 - * First, find out how many ring spec'ed, allocate references
21796 - * for all, then go probe each one.
21799 - for_each_available_child_of_node(nprop, np)
21800 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21801 - of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
21803 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
21805 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
21808 + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
21809 + goto iounmap_ctrl;
21812 - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
21813 - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
21814 - if (ctrlpriv->jrpdev == NULL) {
21816 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
21818 + dev_err(dev, "JR platform devices creation error\n");
21822 +#ifdef CONFIG_DEBUG_FS
21824 + * FIXME: needs better naming distinction, as some amalgamation of
21825 + * "caam" and nprop->full_name. The OF name isn't distinctive,
21826 + * but does separate instances
21828 + perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21830 + ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21831 + ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21834 - ctrlpriv->total_jobrs = 0;
21835 for_each_available_child_of_node(nprop, np)
21836 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
21837 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
21838 - ctrlpriv->jrpdev[ring] =
21839 - of_platform_device_create(np, NULL, dev);
21840 - if (!ctrlpriv->jrpdev[ring]) {
21841 - pr_warn("JR%d Platform device creation error\n",
21845 - ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
21846 - ((uint8_t *)ctrl +
21847 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
21848 + ((__force uint8_t *)ctrl +
21849 (ring + JR_BLOCK_NUMBER) *
21852 ctrlpriv->total_jobrs++;
21857 - /* Check to see if QI present. If so, enable */
21858 - ctrlpriv->qi_present =
21859 - !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
21860 - CTPR_MS_QI_MASK);
21861 - if (ctrlpriv->qi_present) {
21862 - ctrlpriv->qi = (struct caam_queue_if __force *)
21863 - ((uint8_t *)ctrl +
21864 + /* Check to see if (DPAA 1.x) QI present. If so, enable */
21865 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
21866 + if (ctrlpriv->qi_present && !caam_dpaa2) {
21867 + ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
21868 + ((__force uint8_t *)ctrl +
21869 BLOCK_OFFSET * QI_BLOCK_NUMBER
21871 /* This is all that's required to physically enable QI */
21872 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
21874 + /* If QMAN driver is present, init CAAM-QI backend */
21875 +#ifdef CONFIG_CAAM_QI
21876 + ret = caam_qi_init(pdev);
21878 + dev_err(dev, "caam qi i/f init failed: %d\n", ret);
21882 /* If no QI and no rings specified, quit and go home */
21883 @@ -662,8 +672,10 @@ static int caam_probe(struct platform_de
21885 * If SEC has RNG version >= 4 and RNG state handle has not been
21886 * already instantiated, do RNG instantiation
21887 + * In case of DPAA 2.x, RNG is managed by MC firmware.
21889 - if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21890 + if (!caam_dpaa2 &&
21891 + (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
21892 ctrlpriv->rng4_sh_init =
21893 rd_reg32(&ctrl->r4tst[0].rdsta);
21895 @@ -731,77 +743,46 @@ static int caam_probe(struct platform_de
21896 /* Report "alive" for developer to see */
21897 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
21899 - dev_info(dev, "job rings = %d, qi = %d\n",
21900 - ctrlpriv->total_jobrs, ctrlpriv->qi_present);
21901 + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
21902 + ctrlpriv->total_jobrs, ctrlpriv->qi_present,
21903 + caam_dpaa2 ? "yes" : "no");
21905 #ifdef CONFIG_DEBUG_FS
21907 - * FIXME: needs better naming distinction, as some amalgamation of
21908 - * "caam" and nprop->full_name. The OF name isn't distinctive,
21909 - * but does separate instances
21911 - perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
21913 - ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
21914 - ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
21916 - /* Controller-level - performance monitor counters */
21918 - ctrlpriv->ctl_rq_dequeued =
21919 - debugfs_create_file("rq_dequeued",
21920 - S_IRUSR | S_IRGRP | S_IROTH,
21921 - ctrlpriv->ctl, &perfmon->req_dequeued,
21922 - &caam_fops_u64_ro);
21923 - ctrlpriv->ctl_ob_enc_req =
21924 - debugfs_create_file("ob_rq_encrypted",
21925 - S_IRUSR | S_IRGRP | S_IROTH,
21926 - ctrlpriv->ctl, &perfmon->ob_enc_req,
21927 - &caam_fops_u64_ro);
21928 - ctrlpriv->ctl_ib_dec_req =
21929 - debugfs_create_file("ib_rq_decrypted",
21930 - S_IRUSR | S_IRGRP | S_IROTH,
21931 - ctrlpriv->ctl, &perfmon->ib_dec_req,
21932 - &caam_fops_u64_ro);
21933 - ctrlpriv->ctl_ob_enc_bytes =
21934 - debugfs_create_file("ob_bytes_encrypted",
21935 - S_IRUSR | S_IRGRP | S_IROTH,
21936 - ctrlpriv->ctl, &perfmon->ob_enc_bytes,
21937 - &caam_fops_u64_ro);
21938 - ctrlpriv->ctl_ob_prot_bytes =
21939 - debugfs_create_file("ob_bytes_protected",
21940 - S_IRUSR | S_IRGRP | S_IROTH,
21941 - ctrlpriv->ctl, &perfmon->ob_prot_bytes,
21942 - &caam_fops_u64_ro);
21943 - ctrlpriv->ctl_ib_dec_bytes =
21944 - debugfs_create_file("ib_bytes_decrypted",
21945 - S_IRUSR | S_IRGRP | S_IROTH,
21946 - ctrlpriv->ctl, &perfmon->ib_dec_bytes,
21947 - &caam_fops_u64_ro);
21948 - ctrlpriv->ctl_ib_valid_bytes =
21949 - debugfs_create_file("ib_bytes_validated",
21950 - S_IRUSR | S_IRGRP | S_IROTH,
21951 - ctrlpriv->ctl, &perfmon->ib_valid_bytes,
21952 - &caam_fops_u64_ro);
21953 + debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
21954 + ctrlpriv->ctl, &perfmon->req_dequeued,
21955 + &caam_fops_u64_ro);
21956 + debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
21957 + ctrlpriv->ctl, &perfmon->ob_enc_req,
21958 + &caam_fops_u64_ro);
21959 + debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
21960 + ctrlpriv->ctl, &perfmon->ib_dec_req,
21961 + &caam_fops_u64_ro);
21962 + debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
21963 + ctrlpriv->ctl, &perfmon->ob_enc_bytes,
21964 + &caam_fops_u64_ro);
21965 + debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
21966 + ctrlpriv->ctl, &perfmon->ob_prot_bytes,
21967 + &caam_fops_u64_ro);
21968 + debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
21969 + ctrlpriv->ctl, &perfmon->ib_dec_bytes,
21970 + &caam_fops_u64_ro);
21971 + debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
21972 + ctrlpriv->ctl, &perfmon->ib_valid_bytes,
21973 + &caam_fops_u64_ro);
21975 /* Controller level - global status values */
21976 - ctrlpriv->ctl_faultaddr =
21977 - debugfs_create_file("fault_addr",
21978 - S_IRUSR | S_IRGRP | S_IROTH,
21979 - ctrlpriv->ctl, &perfmon->faultaddr,
21980 - &caam_fops_u32_ro);
21981 - ctrlpriv->ctl_faultdetail =
21982 - debugfs_create_file("fault_detail",
21983 - S_IRUSR | S_IRGRP | S_IROTH,
21984 - ctrlpriv->ctl, &perfmon->faultdetail,
21985 - &caam_fops_u32_ro);
21986 - ctrlpriv->ctl_faultstatus =
21987 - debugfs_create_file("fault_status",
21988 - S_IRUSR | S_IRGRP | S_IROTH,
21989 - ctrlpriv->ctl, &perfmon->status,
21990 - &caam_fops_u32_ro);
21991 + debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
21992 + ctrlpriv->ctl, &perfmon->faultaddr,
21993 + &caam_fops_u32_ro);
21994 + debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
21995 + ctrlpriv->ctl, &perfmon->faultdetail,
21996 + &caam_fops_u32_ro);
21997 + debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
21998 + ctrlpriv->ctl, &perfmon->status,
21999 + &caam_fops_u32_ro);
22001 /* Internal covering keys (useful in non-secure mode only) */
22002 - ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
22003 + ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
22004 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22005 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
22007 @@ -809,7 +790,7 @@ static int caam_probe(struct platform_de
22009 &ctrlpriv->ctl_kek_wrap);
22011 - ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
22012 + ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
22013 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22014 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
22016 @@ -817,7 +798,7 @@ static int caam_probe(struct platform_de
22018 &ctrlpriv->ctl_tkek_wrap);
22020 - ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
22021 + ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
22022 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
22023 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
22025 @@ -828,13 +809,17 @@ static int caam_probe(struct platform_de
22029 +#ifdef CONFIG_DEBUG_FS
22030 + debugfs_remove_recursive(ctrlpriv->dfs_root);
22037 disable_caam_emi_slow:
22038 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22039 + if (ctrlpriv->caam_emi_slow)
22040 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
22042 clk_disable_unprepare(ctrlpriv->caam_aclk);
22044 @@ -844,17 +829,6 @@ disable_caam_ipg:
22048 -static struct of_device_id caam_match[] = {
22050 - .compatible = "fsl,sec-v4.0",
22053 - .compatible = "fsl,sec4.0",
22057 -MODULE_DEVICE_TABLE(of, caam_match);
22059 static struct platform_driver caam_driver = {
22062 --- a/drivers/crypto/caam/ctrl.h
22063 +++ b/drivers/crypto/caam/ctrl.h
22065 /* Prototypes for backend-level services exposed to APIs */
22066 int caam_get_era(void);
22068 +extern bool caam_dpaa2;
22070 #endif /* CTRL_H */
22071 --- a/drivers/crypto/caam/desc.h
22072 +++ b/drivers/crypto/caam/desc.h
22074 #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
22075 #define SEC4_SG_OFFSET_MASK 0x00001fff
22077 -struct sec4_sg_entry {
22083 /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
22084 #define MAX_CAAM_DESCSIZE 64
22086 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
22087 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
22088 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
22089 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
22090 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
22091 #define CMD_STORE (0x0a << CMD_SHIFT)
22092 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
22093 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
22094 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
22095 #define HDR_ZRO 0x00008000
22097 /* Start Index or SharedDesc Length */
22098 -#define HDR_START_IDX_MASK 0x3f
22099 #define HDR_START_IDX_SHIFT 16
22100 +#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
22102 /* If shared descriptor header, 6-bit length */
22103 #define HDR_DESCLEN_SHR_MASK 0x3f
22104 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
22105 #define HDR_PROP_DNR 0x00000800
22107 /* JobDesc/SharedDesc share property */
22108 -#define HDR_SD_SHARE_MASK 0x03
22109 #define HDR_SD_SHARE_SHIFT 8
22110 -#define HDR_JD_SHARE_MASK 0x07
22111 +#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
22112 #define HDR_JD_SHARE_SHIFT 8
22113 +#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
22115 #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
22116 #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
22117 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
22118 #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
22119 #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
22120 #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
22121 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
22122 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
22123 #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
22124 #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
22125 #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
22126 @@ -360,6 +355,7 @@ struct sec4_sg_entry {
22127 #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
22128 #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
22129 #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
22130 +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
22132 /* Other types. Need to OR in last/flush bits as desired */
22133 #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
22134 @@ -400,7 +396,7 @@ struct sec4_sg_entry {
22135 #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
22136 #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
22137 #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
22138 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
22139 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
22140 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
22141 #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
22142 #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
22143 @@ -413,6 +409,7 @@ struct sec4_sg_entry {
22144 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
22145 #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
22146 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
22147 +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
22148 #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
22151 @@ -1107,8 +1104,8 @@ struct sec4_sg_entry {
22152 /* For non-protocol/alg-only op commands */
22153 #define OP_ALG_TYPE_SHIFT 24
22154 #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
22155 -#define OP_ALG_TYPE_CLASS1 2
22156 -#define OP_ALG_TYPE_CLASS2 4
22157 +#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
22158 +#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
22160 #define OP_ALG_ALGSEL_SHIFT 16
22161 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
22162 @@ -1249,7 +1246,7 @@ struct sec4_sg_entry {
22163 #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
22165 /* PKHA mode copy-memory functions */
22166 -#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
22167 +#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
22168 #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
22169 #define OP_ALG_PKMODE_DST_REG_SHIFT 10
22170 #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
22171 @@ -1445,10 +1442,11 @@ struct sec4_sg_entry {
22172 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
22173 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
22174 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
22175 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
22176 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
22177 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
22178 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
22179 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
22180 +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
22182 /* Destination selectors */
22183 #define MATH_DEST_SHIFT 8
22184 @@ -1629,4 +1627,31 @@ struct sec4_sg_entry {
22185 /* Frame Descriptor Command for Replacement Job Descriptor */
22186 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
22188 +/* CHA Control Register bits */
22189 +#define CCTRL_RESET_CHA_ALL 0x1
22190 +#define CCTRL_RESET_CHA_AESA 0x2
22191 +#define CCTRL_RESET_CHA_DESA 0x4
22192 +#define CCTRL_RESET_CHA_AFHA 0x8
22193 +#define CCTRL_RESET_CHA_KFHA 0x10
22194 +#define CCTRL_RESET_CHA_SF8A 0x20
22195 +#define CCTRL_RESET_CHA_PKHA 0x40
22196 +#define CCTRL_RESET_CHA_MDHA 0x80
22197 +#define CCTRL_RESET_CHA_CRCA 0x100
22198 +#define CCTRL_RESET_CHA_RNG 0x200
22199 +#define CCTRL_RESET_CHA_SF9A 0x400
22200 +#define CCTRL_RESET_CHA_ZUCE 0x800
22201 +#define CCTRL_RESET_CHA_ZUCA 0x1000
22202 +#define CCTRL_UNLOAD_PK_A0 0x10000
22203 +#define CCTRL_UNLOAD_PK_A1 0x20000
22204 +#define CCTRL_UNLOAD_PK_A2 0x40000
22205 +#define CCTRL_UNLOAD_PK_A3 0x80000
22206 +#define CCTRL_UNLOAD_PK_B0 0x100000
22207 +#define CCTRL_UNLOAD_PK_B1 0x200000
22208 +#define CCTRL_UNLOAD_PK_B2 0x400000
22209 +#define CCTRL_UNLOAD_PK_B3 0x800000
22210 +#define CCTRL_UNLOAD_PK_N 0x1000000
22211 +#define CCTRL_UNLOAD_PK_A 0x4000000
22212 +#define CCTRL_UNLOAD_PK_B 0x8000000
22213 +#define CCTRL_UNLOAD_SBOX 0x10000000
22215 #endif /* DESC_H */
22216 --- a/drivers/crypto/caam/desc_constr.h
22217 +++ b/drivers/crypto/caam/desc_constr.h
22219 * Copyright 2008-2012 Freescale Semiconductor, Inc.
22222 +#ifndef DESC_CONSTR_H
22223 +#define DESC_CONSTR_H
22228 @@ -33,38 +36,39 @@
22230 extern bool caam_little_end;
22232 -static inline int desc_len(u32 *desc)
22233 +static inline int desc_len(u32 * const desc)
22235 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
22238 -static inline int desc_bytes(void *desc)
22239 +static inline int desc_bytes(void * const desc)
22241 return desc_len(desc) * CAAM_CMD_SZ;
22244 -static inline u32 *desc_end(u32 *desc)
22245 +static inline u32 *desc_end(u32 * const desc)
22247 return desc + desc_len(desc);
22250 -static inline void *sh_desc_pdb(u32 *desc)
22251 +static inline void *sh_desc_pdb(u32 * const desc)
22256 -static inline void init_desc(u32 *desc, u32 options)
22257 +static inline void init_desc(u32 * const desc, u32 options)
22259 *desc = cpu_to_caam32((options | HDR_ONE) + 1);
22262 -static inline void init_sh_desc(u32 *desc, u32 options)
22263 +static inline void init_sh_desc(u32 * const desc, u32 options)
22266 init_desc(desc, CMD_SHARED_DESC_HDR | options);
22269 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22270 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
22271 + size_t pdb_bytes)
22273 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22275 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32
22279 -static inline void init_job_desc(u32 *desc, u32 options)
22280 +static inline void init_job_desc(u32 * const desc, u32 options)
22282 init_desc(desc, CMD_DESC_HDR | options);
22285 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
22286 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
22287 + size_t pdb_bytes)
22289 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
22291 init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
22294 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
22295 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
22297 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
22299 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc,
22300 CAAM_PTR_SZ / CAAM_CMD_SZ);
22303 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
22305 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
22306 + int len, u32 options)
22309 init_job_desc(desc, HDR_SHARED | options |
22310 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(
22311 append_ptr(desc, ptr);
22314 -static inline void append_data(u32 *desc, void *data, int len)
22315 +static inline void append_data(u32 * const desc, void *data, int len)
22317 u32 *offset = desc_end(desc);
22319 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc
22320 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
22323 -static inline void append_cmd(u32 *desc, u32 command)
22324 +static inline void append_cmd(u32 * const desc, u32 command)
22326 u32 *cmd = desc_end(desc);
22328 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc,
22330 #define append_u32 append_cmd
22332 -static inline void append_u64(u32 *desc, u64 data)
22333 +static inline void append_u64(u32 * const desc, u64 data)
22335 u32 *offset = desc_end(desc);
22337 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc,
22340 /* Write command without affecting header, and return pointer to next word */
22341 -static inline u32 *write_cmd(u32 *desc, u32 command)
22342 +static inline u32 *write_cmd(u32 * const desc, u32 command)
22344 *desc = cpu_to_caam32(command);
22349 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
22350 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
22353 append_cmd(desc, command | len);
22354 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *d
22357 /* Write length after pointer, rather than inside command */
22358 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
22359 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
22360 unsigned int len, u32 command)
22362 append_cmd(desc, command);
22363 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen
22364 append_cmd(desc, len);
22367 -static inline void append_cmd_data(u32 *desc, void *data, int len,
22368 +static inline void append_cmd_data(u32 * const desc, void *data, int len,
22371 append_cmd(desc, command | IMMEDIATE | len);
22372 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *
22375 #define APPEND_CMD_RET(cmd, op) \
22376 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
22377 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
22379 u32 *cmd = desc_end(desc); \
22381 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *des
22383 APPEND_CMD_RET(jump, JUMP)
22384 APPEND_CMD_RET(move, MOVE)
22385 +APPEND_CMD_RET(moveb, MOVEB)
22387 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
22388 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
22390 *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
22391 (desc_len(desc) - (jump_cmd - desc)));
22394 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
22395 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
22397 u32 val = caam32_to_cpu(*move_cmd);
22399 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32
22402 #define APPEND_CMD(cmd, op) \
22403 -static inline void append_##cmd(u32 *desc, u32 options) \
22404 +static inline void append_##cmd(u32 * const desc, u32 options) \
22407 append_cmd(desc, CMD_##op | options); \
22408 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *des
22409 APPEND_CMD(operation, OPERATION)
22411 #define APPEND_CMD_LEN(cmd, op) \
22412 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
22413 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
22417 append_cmd(desc, CMD_##op | len | options); \
22418 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_L
22419 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
22421 #define APPEND_CMD_PTR(cmd, op) \
22422 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
22424 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22425 + unsigned int len, u32 options) \
22428 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
22429 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
22430 APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
22431 APPEND_CMD_PTR(fifo_store, FIFO_STORE)
22433 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
22435 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
22436 + unsigned int len, u32 options)
22440 @@ -249,7 +256,8 @@ static inline void append_store(u32 *des
22443 #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
22444 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
22445 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
22446 + dma_addr_t ptr, \
22447 unsigned int len, \
22450 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
22451 APPEND_SEQ_PTR_INTLEN(out, OUT)
22453 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
22454 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22455 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22456 unsigned int len, u32 options) \
22459 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
22460 APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
22462 #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
22463 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
22464 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
22465 unsigned int len, u32 options) \
22468 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_O
22469 * the size of its type
22471 #define APPEND_CMD_PTR_LEN(cmd, op, type) \
22472 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
22473 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
22474 type len, u32 options) \
22477 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
22478 * from length of immediate data provided, e.g., split keys
22480 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
22481 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
22482 +static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
22483 unsigned int data_len, \
22484 unsigned int len, u32 options) \
22486 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm
22487 APPEND_CMD_PTR_TO_IMM2(key, KEY);
22489 #define APPEND_CMD_RAW_IMM(cmd, op, type) \
22490 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
22491 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
22495 @@ -426,3 +434,66 @@ do { \
22496 APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
22497 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
22498 APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
22501 + * struct alginfo - Container for algorithm details
22502 + * @algtype: algorithm selector; for valid values, see documentation of the
22503 + * functions where it is used.
22504 + * @keylen: length of the provided algorithm key, in bytes
22505 + * @keylen_pad: padded length of the provided algorithm key, in bytes
22506 + * @key: address where algorithm key resides; virtual address if key_inline
22507 + * is true, dma (bus) address if key_inline is false.
22508 + * @key_inline: true - key can be inlined in the descriptor; false - key is
22509 + * referenced by the descriptor
22513 + unsigned int keylen;
22514 + unsigned int keylen_pad;
22516 + dma_addr_t key_dma;
22523 + * desc_inline_query() - Provide indications on which data items can be inlined
22524 + * and which shall be referenced in a shared descriptor.
22525 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
22526 + * excluding the data items to be inlined (or corresponding
22527 + * pointer if an item is not inlined). Each cnstr_* function that
22528 + * generates descriptors should have a define mentioning
22529 + * corresponding length.
22530 + * @jd_len: Maximum length of the job descriptor(s) that will be used
22531 + * together with the shared descriptor.
22532 + * @data_len: Array of lengths of the data items trying to be inlined
22533 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
22535 + * @count: Number of data items (size of @data_len array); must be <= 32
22537 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
22538 + * check @inl_mask for details.
22540 +static inline int desc_inline_query(unsigned int sd_base_len,
22541 + unsigned int jd_len, unsigned int *data_len,
22542 + u32 *inl_mask, unsigned int count)
22544 + int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
22548 + for (i = 0; (i < count) && (rem_bytes > 0); i++) {
22549 + if (rem_bytes - (int)(data_len[i] +
22550 + (count - i - 1) * CAAM_PTR_SZ) >= 0) {
22551 + rem_bytes -= data_len[i];
22552 + *inl_mask |= (1 << i);
22554 + rem_bytes -= CAAM_PTR_SZ;
22558 + return (rem_bytes >= 0) ? 0 : -1;
22561 +#endif /* DESC_CONSTR_H */
22563 +++ b/drivers/crypto/caam/dpseci.c
22566 + * Copyright 2013-2016 Freescale Semiconductor Inc.
22567 + * Copyright 2017 NXP
22569 + * Redistribution and use in source and binary forms, with or without
22570 + * modification, are permitted provided that the following conditions are met:
22571 + * * Redistributions of source code must retain the above copyright
22572 + * notice, this list of conditions and the following disclaimer.
22573 + * * Redistributions in binary form must reproduce the above copyright
22574 + * notice, this list of conditions and the following disclaimer in the
22575 + * documentation and/or other materials provided with the distribution.
22576 + * * Neither the names of the above-listed copyright holders nor the
22577 + * names of any contributors may be used to endorse or promote products
22578 + * derived from this software without specific prior written permission.
22581 + * ALTERNATIVELY, this software may be distributed under the terms of the
22582 + * GNU General Public License ("GPL") as published by the Free Software
22583 + * Foundation, either version 2 of that License or (at your option) any
22586 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22587 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22588 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22589 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22590 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22591 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22592 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22593 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22594 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22595 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22596 + * POSSIBILITY OF SUCH DAMAGE.
22599 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
22600 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
22601 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
22602 +#include "dpseci.h"
22603 +#include "dpseci_cmd.h"
22606 + * dpseci_open() - Open a control session for the specified object
22607 + * @mc_io: Pointer to MC portal's I/O object
22608 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22609 + * @dpseci_id: DPSECI unique ID
22610 + * @token: Returned token; use in subsequent API calls
22612 + * This function can be used to open a control session for an already created
22613 + * object; an object may have been declared in the DPL or by calling the
22614 + * dpseci_create() function.
22615 + * This function returns a unique authentication token, associated with the
22616 + * specific object ID and the specific MC portal; this token must be used in all
22617 + * subsequent commands for this specific object.
22619 + * Return: '0' on success, error code otherwise
22621 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
22624 + struct mc_command cmd = { 0 };
22625 + struct dpseci_cmd_open *cmd_params;
22628 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
22631 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
22632 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
22633 + err = mc_send_command(mc_io, &cmd);
22637 + *token = mc_cmd_hdr_read_token(&cmd);
22643 + * dpseci_close() - Close the control session of the object
22644 + * @mc_io: Pointer to MC portal's I/O object
22645 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22646 + * @token: Token of DPSECI object
22648 + * After this function is called, no further operations are allowed on the
22649 + * object without opening a new control session.
22651 + * Return: '0' on success, error code otherwise
22653 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22655 + struct mc_command cmd = { 0 };
22657 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
22660 + return mc_send_command(mc_io, &cmd);
22664 + * dpseci_create() - Create the DPSECI object
22665 + * @mc_io: Pointer to MC portal's I/O object
22666 + * @dprc_token: Parent container token; '0' for default container
22667 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22668 + * @cfg: Configuration structure
22669 + * @obj_id: returned object id
22671 + * Create the DPSECI object, allocate required resources and perform required
22672 + * initialization.
22674 + * The object can be created either by declaring it in the DPL file, or by
22675 + * calling this function.
22677 + * The function accepts an authentication token of a parent container that this
22678 + * object should be assigned to. The token can be '0' so the object will be
22679 + * assigned to the default container.
22680 + * The newly created object can be opened with the returned object id and using
22681 + * the container's associated tokens and MC portals.
22683 + * Return: '0' on success, error code otherwise
22685 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22686 + const struct dpseci_cfg *cfg, u32 *obj_id)
22688 + struct mc_command cmd = { 0 };
22689 + struct dpseci_cmd_create *cmd_params;
22692 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
22695 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
22696 + for (i = 0; i < 8; i++)
22697 + cmd_params->priorities[i] = cfg->priorities[i];
22698 + cmd_params->num_tx_queues = cfg->num_tx_queues;
22699 + cmd_params->num_rx_queues = cfg->num_rx_queues;
22700 + cmd_params->options = cpu_to_le32(cfg->options);
22701 + err = mc_send_command(mc_io, &cmd);
22705 + *obj_id = mc_cmd_read_object_id(&cmd);
22711 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
22712 + * @mc_io: Pointer to MC portal's I/O object
22713 + * @dprc_token: Parent container token; '0' for default container
22714 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22715 + * @object_id: The object id; it must be a valid id within the container that
22716 + * created this object
22718 + * The function accepts the authentication token of the parent container that
22719 + * created the object (not the one that currently owns the object). The object
22720 + * is searched within parent using the provided 'object_id'.
22721 + * All tokens to the object must be closed before calling destroy.
22723 + * Return: '0' on success, error code otherwise
22725 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
22728 + struct mc_command cmd = { 0 };
22729 + struct dpseci_cmd_destroy *cmd_params;
22731 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
22734 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
22735 + cmd_params->object_id = cpu_to_le32(object_id);
22737 + return mc_send_command(mc_io, &cmd);
22741 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
22742 + * @mc_io: Pointer to MC portal's I/O object
22743 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22744 + * @token: Token of DPSECI object
22746 + * Return: '0' on success, error code otherwise
22748 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22750 + struct mc_command cmd = { 0 };
22752 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
22755 + return mc_send_command(mc_io, &cmd);
22759 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
22760 + * @mc_io: Pointer to MC portal's I/O object
22761 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22762 + * @token: Token of DPSECI object
22764 + * Return: '0' on success, error code otherwise
22766 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22768 + struct mc_command cmd = { 0 };
22770 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
22774 + return mc_send_command(mc_io, &cmd);
22778 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
22779 + * @mc_io: Pointer to MC portal's I/O object
22780 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22781 + * @token: Token of DPSECI object
22782 + * @en: Returns '1' if object is enabled; '0' otherwise
22784 + * Return: '0' on success, error code otherwise
22786 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22789 + struct mc_command cmd = { 0 };
22790 + struct dpseci_rsp_is_enabled *rsp_params;
22793 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
22796 + err = mc_send_command(mc_io, &cmd);
22800 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
22801 + *en = le32_to_cpu(rsp_params->is_enabled);
22807 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
22808 + * @mc_io: Pointer to MC portal's I/O object
22809 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22810 + * @token: Token of DPSECI object
22812 + * Return: '0' on success, error code otherwise
22814 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
22816 + struct mc_command cmd = { 0 };
22818 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
22822 + return mc_send_command(mc_io, &cmd);
22826 + * dpseci_get_irq_enable() - Get overall interrupt state
22827 + * @mc_io: Pointer to MC portal's I/O object
22828 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22829 + * @token: Token of DPSECI object
22830 + * @irq_index: The interrupt index to configure
22831 + * @en: Returned Interrupt state - enable = 1, disable = 0
22833 + * Return: '0' on success, error code otherwise
22835 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22836 + u8 irq_index, u8 *en)
22838 + struct mc_command cmd = { 0 };
22839 + struct dpseci_cmd_irq_enable *cmd_params;
22840 + struct dpseci_rsp_get_irq_enable *rsp_params;
22843 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
22846 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22847 + cmd_params->irq_index = irq_index;
22848 + err = mc_send_command(mc_io, &cmd);
22852 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
22853 + *en = rsp_params->enable_state;
22859 + * dpseci_set_irq_enable() - Set overall interrupt state.
22860 + * @mc_io: Pointer to MC portal's I/O object
22861 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22862 + * @token: Token of DPSECI object
22863 + * @irq_index: The interrupt index to configure
22864 + * @en: Interrupt state - enable = 1, disable = 0
22866 + * Allows GPP software to control when interrupts are generated.
22867 + * Each interrupt can have up to 32 causes. The enable/disable control's the
22868 + * overall interrupt state. If the interrupt is disabled no causes will cause
22871 + * Return: '0' on success, error code otherwise
22873 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22874 + u8 irq_index, u8 en)
22876 + struct mc_command cmd = { 0 };
22877 + struct dpseci_cmd_irq_enable *cmd_params;
22879 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
22882 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
22883 + cmd_params->irq_index = irq_index;
22884 + cmd_params->enable_state = en;
22886 + return mc_send_command(mc_io, &cmd);
22890 + * dpseci_get_irq_mask() - Get interrupt mask.
22891 + * @mc_io: Pointer to MC portal's I/O object
22892 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22893 + * @token: Token of DPSECI object
22894 + * @irq_index: The interrupt index to configure
22895 + * @mask: Returned event mask to trigger interrupt
22897 + * Every interrupt can have up to 32 causes and the interrupt model supports
22898 + * masking/unmasking each cause independently.
22900 + * Return: '0' on success, error code otherwise
22902 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22903 + u8 irq_index, u32 *mask)
22905 + struct mc_command cmd = { 0 };
22906 + struct dpseci_cmd_irq_mask *cmd_params;
22909 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
22912 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
22913 + cmd_params->irq_index = irq_index;
22914 + err = mc_send_command(mc_io, &cmd);
22918 + *mask = le32_to_cpu(cmd_params->mask);
22924 + * dpseci_set_irq_mask() - Set interrupt mask.
22925 + * @mc_io: Pointer to MC portal's I/O object
22926 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22927 + * @token: Token of DPSECI object
22928 + * @irq_index: The interrupt index to configure
22929 + * @mask: event mask to trigger interrupt;
22931 + * 0 = ignore event
22932 + * 1 = consider event for asserting IRQ
22934 + * Every interrupt can have up to 32 causes and the interrupt model supports
22935 + * masking/unmasking each cause independently
22937 + * Return: '0' on success, error code otherwise
22939 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22940 + u8 irq_index, u32 mask)
22942 + struct mc_command cmd = { 0 };
22943 + struct dpseci_cmd_irq_mask *cmd_params;
22945 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
22948 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
22949 + cmd_params->mask = cpu_to_le32(mask);
22950 + cmd_params->irq_index = irq_index;
22952 + return mc_send_command(mc_io, &cmd);
22956 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
22957 + * @mc_io: Pointer to MC portal's I/O object
22958 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22959 + * @token: Token of DPSECI object
22960 + * @irq_index: The interrupt index to configure
22961 + * @status: Returned interrupts status - one bit per cause:
22962 + * 0 = no interrupt pending
22963 + * 1 = interrupt pending
22965 + * Return: '0' on success, error code otherwise
22967 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
22968 + u8 irq_index, u32 *status)
22970 + struct mc_command cmd = { 0 };
22971 + struct dpseci_cmd_irq_status *cmd_params;
22974 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
22977 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
22978 + cmd_params->status = cpu_to_le32(*status);
22979 + cmd_params->irq_index = irq_index;
22980 + err = mc_send_command(mc_io, &cmd);
22984 + *status = le32_to_cpu(cmd_params->status);
22990 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
22991 + * @mc_io: Pointer to MC portal's I/O object
22992 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
22993 + * @token: Token of DPSECI object
22994 + * @irq_index: The interrupt index to configure
22995 + * @status: bits to clear (W1C) - one bit per cause:
22996 + * 0 = don't change
22997 + * 1 = clear status bit
22999 + * Return: '0' on success, error code otherwise
23001 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23002 + u8 irq_index, u32 status)
23004 + struct mc_command cmd = { 0 };
23005 + struct dpseci_cmd_irq_status *cmd_params;
23007 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
23010 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
23011 + cmd_params->status = cpu_to_le32(status);
23012 + cmd_params->irq_index = irq_index;
23014 + return mc_send_command(mc_io, &cmd);
23018 + * dpseci_get_attributes() - Retrieve DPSECI attributes
23019 + * @mc_io: Pointer to MC portal's I/O object
23020 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23021 + * @token: Token of DPSECI object
23022 + * @attr: Returned object's attributes
23024 + * Return: '0' on success, error code otherwise
23026 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23027 + struct dpseci_attr *attr)
23029 + struct mc_command cmd = { 0 };
23030 + struct dpseci_rsp_get_attributes *rsp_params;
23033 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
23036 + err = mc_send_command(mc_io, &cmd);
23040 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
23041 + attr->id = le32_to_cpu(rsp_params->id);
23042 + attr->num_tx_queues = rsp_params->num_tx_queues;
23043 + attr->num_rx_queues = rsp_params->num_rx_queues;
23044 + attr->options = le32_to_cpu(rsp_params->options);
23050 + * dpseci_set_rx_queue() - Set Rx queue configuration
23051 + * @mc_io: Pointer to MC portal's I/O object
23052 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23053 + * @token: Token of DPSECI object
23054 + * @queue: Select the queue relative to number of priorities configured at
23055 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
23056 + * Rx queues identically.
23057 + * @cfg: Rx queue configuration
23059 + * Return: '0' on success, error code otherwise
23061 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23062 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
23064 + struct mc_command cmd = { 0 };
23065 + struct dpseci_cmd_queue *cmd_params;
23067 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
23070 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23071 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23072 + cmd_params->priority = cfg->dest_cfg.priority;
23073 + cmd_params->queue = queue;
23074 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
23075 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
23076 + cmd_params->options = cpu_to_le32(cfg->options);
23077 + cmd_params->order_preservation_en =
23078 + cpu_to_le32(cfg->order_preservation_en);
23080 + return mc_send_command(mc_io, &cmd);
23084 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
23085 + * @mc_io: Pointer to MC portal's I/O object
23086 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23087 + * @token: Token of DPSECI object
23088 + * @queue: Select the queue relative to number of priorities configured at
23089 + * DPSECI creation
23090 + * @attr: Returned Rx queue attributes
23092 + * Return: '0' on success, error code otherwise
23094 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23095 + u8 queue, struct dpseci_rx_queue_attr *attr)
23097 + struct mc_command cmd = { 0 };
23098 + struct dpseci_cmd_queue *cmd_params;
23101 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
23104 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23105 + cmd_params->queue = queue;
23106 + err = mc_send_command(mc_io, &cmd);
23110 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
23111 + attr->dest_cfg.priority = cmd_params->priority;
23112 + attr->dest_cfg.dest_type = cmd_params->dest_type;
23113 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
23114 + attr->fqid = le32_to_cpu(cmd_params->fqid);
23115 + attr->order_preservation_en =
23116 + le32_to_cpu(cmd_params->order_preservation_en);
23122 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
23123 + * @mc_io: Pointer to MC portal's I/O object
23124 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23125 + * @token: Token of DPSECI object
23126 + * @queue: Select the queue relative to number of priorities configured at
23127 + * DPSECI creation
23128 + * @attr: Returned Tx queue attributes
23130 + * Return: '0' on success, error code otherwise
23132 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23133 + u8 queue, struct dpseci_tx_queue_attr *attr)
23135 + struct mc_command cmd = { 0 };
23136 + struct dpseci_cmd_queue *cmd_params;
23137 + struct dpseci_rsp_get_tx_queue *rsp_params;
23140 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
23143 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
23144 + cmd_params->queue = queue;
23145 + err = mc_send_command(mc_io, &cmd);
23149 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
23150 + attr->fqid = le32_to_cpu(rsp_params->fqid);
23151 + attr->priority = rsp_params->priority;
23157 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
23158 + * @mc_io: Pointer to MC portal's I/O object
23159 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23160 + * @token: Token of DPSECI object
23161 + * @attr: Returned SEC attributes
23163 + * Return: '0' on success, error code otherwise
23165 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23166 + struct dpseci_sec_attr *attr)
23168 + struct mc_command cmd = { 0 };
23169 + struct dpseci_rsp_get_sec_attr *rsp_params;
23172 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
23175 + err = mc_send_command(mc_io, &cmd);
23179 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
23180 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
23181 + attr->major_rev = rsp_params->major_rev;
23182 + attr->minor_rev = rsp_params->minor_rev;
23183 + attr->era = rsp_params->era;
23184 + attr->deco_num = rsp_params->deco_num;
23185 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
23186 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
23187 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
23188 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
23189 + attr->crc_acc_num = rsp_params->crc_acc_num;
23190 + attr->pk_acc_num = rsp_params->pk_acc_num;
23191 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
23192 + attr->rng_acc_num = rsp_params->rng_acc_num;
23193 + attr->md_acc_num = rsp_params->md_acc_num;
23194 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
23195 + attr->des_acc_num = rsp_params->des_acc_num;
23196 + attr->aes_acc_num = rsp_params->aes_acc_num;
23202 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
23203 + * @mc_io: Pointer to MC portal's I/O object
23204 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23205 + * @token: Token of DPSECI object
23206 + * @counters: Returned SEC counters
23208 + * Return: '0' on success, error code otherwise
23210 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23211 + struct dpseci_sec_counters *counters)
23213 + struct mc_command cmd = { 0 };
23214 + struct dpseci_rsp_get_sec_counters *rsp_params;
23217 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
23220 + err = mc_send_command(mc_io, &cmd);
23224 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
23225 + counters->dequeued_requests =
23226 + le64_to_cpu(rsp_params->dequeued_requests);
23227 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
23228 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
23229 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
23230 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
23231 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
23232 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
23238 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
23239 + * @mc_io: Pointer to MC portal's I/O object
23240 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23241 + * @major_ver: Major version of data path sec API
23242 + * @minor_ver: Minor version of data path sec API
23244 + * Return: '0' on success, error code otherwise
23246 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23247 + u16 *major_ver, u16 *minor_ver)
23249 + struct mc_command cmd = { 0 };
23250 + struct dpseci_rsp_get_api_version *rsp_params;
23253 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
23255 + err = mc_send_command(mc_io, &cmd);
23259 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
23260 + *major_ver = le16_to_cpu(rsp_params->major);
23261 + *minor_ver = le16_to_cpu(rsp_params->minor);
23267 + * dpseci_set_opr() - Set Order Restoration configuration
23268 + * @mc_io: Pointer to MC portal's I/O object
23269 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23270 + * @token: Token of DPSECI object
23271 + * @index: The queue index
23272 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
23274 + * @cfg: Configuration options for the OPR
23276 + * Return: '0' on success, error code otherwise
23278 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23279 + u8 options, struct opr_cfg *cfg)
23281 + struct mc_command cmd = { 0 };
23282 + struct dpseci_cmd_opr *cmd_params;
23284 + cmd.header = mc_encode_cmd_header(
23285 + DPSECI_CMDID_SET_OPR,
23288 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23289 + cmd_params->index = index;
23290 + cmd_params->options = options;
23291 + cmd_params->oloe = cfg->oloe;
23292 + cmd_params->oeane = cfg->oeane;
23293 + cmd_params->olws = cfg->olws;
23294 + cmd_params->oa = cfg->oa;
23295 + cmd_params->oprrws = cfg->oprrws;
23297 + return mc_send_command(mc_io, &cmd);
23301 + * dpseci_get_opr() - Retrieve Order Restoration config and query
23302 + * @mc_io: Pointer to MC portal's I/O object
23303 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23304 + * @token: Token of DPSECI object
23305 + * @index: The queue index
23306 + * @cfg: Returned OPR configuration
23307 + * @qry: Returned OPR query
23309 + * Return: '0' on success, error code otherwise
23311 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23312 + struct opr_cfg *cfg, struct opr_qry *qry)
23314 + struct mc_command cmd = { 0 };
23315 + struct dpseci_cmd_opr *cmd_params;
23316 + struct dpseci_rsp_get_opr *rsp_params;
23319 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
23322 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
23323 + cmd_params->index = index;
23324 + err = mc_send_command(mc_io, &cmd);
23328 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
23329 + qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
23330 + qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
23331 + cfg->oloe = rsp_params->oloe;
23332 + cfg->oeane = rsp_params->oeane;
23333 + cfg->olws = rsp_params->olws;
23334 + cfg->oa = rsp_params->oa;
23335 + cfg->oprrws = rsp_params->oprrws;
23336 + qry->nesn = le16_to_cpu(rsp_params->nesn);
23337 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
23338 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
23339 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
23340 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
23341 + qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
23342 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
23343 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
23344 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
23345 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
23351 + * dpseci_set_congestion_notification() - Set congestion group
23352 + * notification configuration
23353 + * @mc_io: Pointer to MC portal's I/O object
23354 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23355 + * @token: Token of DPSECI object
23356 + * @cfg: congestion notification configuration
23358 + * Return: '0' on success, error code otherwise
23360 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23361 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
23363 + struct mc_command cmd = { 0 };
23364 + struct dpseci_cmd_congestion_notification *cmd_params;
23366 + cmd.header = mc_encode_cmd_header(
23367 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
23370 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23371 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
23372 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
23373 + cmd_params->priority = cfg->dest_cfg.priority;
23374 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
23375 + cfg->dest_cfg.dest_type);
23376 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
23377 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
23378 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
23379 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
23380 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
23382 + return mc_send_command(mc_io, &cmd);
23386 + * dpseci_get_congestion_notification() - Get congestion group notification
23388 + * @mc_io: Pointer to MC portal's I/O object
23389 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
23390 + * @token: Token of DPSECI object
23391 + * @cfg: congestion notification configuration
23393 + * Return: '0' on success, error code otherwise
23395 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23396 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
23398 + struct mc_command cmd = { 0 };
23399 + struct dpseci_cmd_congestion_notification *rsp_params;
23402 + cmd.header = mc_encode_cmd_header(
23403 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
23406 + err = mc_send_command(mc_io, &cmd);
23410 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
23411 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
23412 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
23413 + cfg->dest_cfg.priority = rsp_params->priority;
23414 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
23416 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
23417 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
23418 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
23419 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
23420 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
23425 +++ b/drivers/crypto/caam/dpseci.h
23428 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23429 + * Copyright 2017 NXP
23431 + * Redistribution and use in source and binary forms, with or without
23432 + * modification, are permitted provided that the following conditions are met:
23433 + * * Redistributions of source code must retain the above copyright
23434 + * notice, this list of conditions and the following disclaimer.
23435 + * * Redistributions in binary form must reproduce the above copyright
23436 + * notice, this list of conditions and the following disclaimer in the
23437 + * documentation and/or other materials provided with the distribution.
23438 + * * Neither the names of the above-listed copyright holders nor the
23439 + * names of any contributors may be used to endorse or promote products
23440 + * derived from this software without specific prior written permission.
23443 + * ALTERNATIVELY, this software may be distributed under the terms of the
23444 + * GNU General Public License ("GPL") as published by the Free Software
23445 + * Foundation, either version 2 of that License or (at your option) any
23448 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23449 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23450 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23451 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23452 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23453 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23454 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23455 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23456 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23457 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23458 + * POSSIBILITY OF SUCH DAMAGE.
23460 +#ifndef _DPSECI_H_
23461 +#define _DPSECI_H_
23464 + * Data Path SEC Interface API
23465 + * Contains initialization APIs and runtime control APIs for DPSECI
23473 + * General DPSECI macros
23477 + * Maximum number of Tx/Rx priorities per DPSECI object
23479 +#define DPSECI_PRIO_NUM 8
23482 + * All queues considered; see dpseci_set_rx_queue()
23484 +#define DPSECI_ALL_QUEUES (u8)(-1)
23486 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
23489 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23492 + * Enable the Congestion Group support
23494 +#define DPSECI_OPT_HAS_CG 0x000020
23497 + * Enable the Order Restoration support
23499 +#define DPSECI_OPT_HAS_OPR 0x000040
23502 + * Order Point Records are shared for the entire DPSECI
23504 +#define DPSECI_OPT_OPR_SHARED 0x000080
23507 + * struct dpseci_cfg - Structure representing DPSECI configuration
23508 + * @options: Any combination of the following options:
23509 + * DPSECI_OPT_HAS_CG
23510 + * DPSECI_OPT_HAS_OPR
23511 + * DPSECI_OPT_OPR_SHARED
23512 + * @num_tx_queues: num of queues towards the SEC
23513 + * @num_rx_queues: num of queues back from the SEC
23514 + * @priorities: Priorities for the SEC hardware processing;
23515 + * each place in the array is the priority of the tx queue
23516 + * towards the SEC;
23517 + * valid priorities are configured with values 1-8;
23519 +struct dpseci_cfg {
23521 + u8 num_tx_queues;
23522 + u8 num_rx_queues;
23523 + u8 priorities[DPSECI_PRIO_NUM];
23526 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23527 + const struct dpseci_cfg *cfg, u32 *obj_id);
23529 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
23532 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23534 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23536 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23539 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
23541 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23542 + u8 irq_index, u8 *en);
23544 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23545 + u8 irq_index, u8 en);
23547 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23548 + u8 irq_index, u32 *mask);
23550 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23551 + u8 irq_index, u32 mask);
23553 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23554 + u8 irq_index, u32 *status);
23556 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23557 + u8 irq_index, u32 status);
23560 + * struct dpseci_attr - Structure representing DPSECI attributes
23561 + * @id: DPSECI object ID
23562 + * @num_tx_queues: number of queues towards the SEC
23563 + * @num_rx_queues: number of queues back from the SEC
23564 + * @options: any combination of the following options:
23565 + * DPSECI_OPT_HAS_CG
23566 + * DPSECI_OPT_HAS_OPR
23567 + * DPSECI_OPT_OPR_SHARED
23569 +struct dpseci_attr {
23571 + u8 num_tx_queues;
23572 + u8 num_rx_queues;
23576 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23577 + struct dpseci_attr *attr);
23580 + * enum dpseci_dest - DPSECI destination types
23581 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
23582 + * and does not generate FQDAN notifications; user is expected to dequeue
23583 + * from the queue based on polling or other user-defined method
23584 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
23585 + * notifications to the specified DPIO; user is expected to dequeue from
23586 + * the queue only after notification is received
23587 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
23588 + * FQDAN notifications, but is connected to the specified DPCON object;
23589 + * user is expected to dequeue from the DPCON channel
23591 +enum dpseci_dest {
23592 + DPSECI_DEST_NONE = 0,
23593 + DPSECI_DEST_DPIO,
23594 + DPSECI_DEST_DPCON
23598 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
23599 + * @dest_type: Destination type
23600 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
23601 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
23602 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
23603 + * not relevant for 'DPSECI_DEST_NONE' option
23605 +struct dpseci_dest_cfg {
23606 + enum dpseci_dest dest_type;
23612 + * DPSECI queue modification options
23616 + * Select to modify the user's context associated with the queue
23618 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
23621 + * Select to modify the queue's destination
23623 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
23626 + * Select to modify the queue's order preservation
23628 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
23631 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
23632 + * @options: Flags representing the suggested modifications to the queue;
23633 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
23634 + * @order_preservation_en: order preservation configuration for the rx queue
23635 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
23636 + * @user_ctx: User context value provided in the frame descriptor of each
23637 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
23639 + * @dest_cfg: Queue destination parameters; valid only if
23640 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
23642 +struct dpseci_rx_queue_cfg {
23644 + int order_preservation_en;
23646 + struct dpseci_dest_cfg dest_cfg;
23649 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23650 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
23653 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
23654 + * @user_ctx: User context value provided in the frame descriptor of each
23656 + * @order_preservation_en: Status of the order preservation configuration on the
23658 + * @dest_cfg: Queue destination configuration
23659 + * @fqid: Virtual FQID value to be used for dequeue operations
23661 +struct dpseci_rx_queue_attr {
23663 + int order_preservation_en;
23664 + struct dpseci_dest_cfg dest_cfg;
23668 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23669 + u8 queue, struct dpseci_rx_queue_attr *attr);
23672 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
23673 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
23674 + * @priority: SEC hardware processing priority for the queue
23676 +struct dpseci_tx_queue_attr {
23681 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23682 + u8 queue, struct dpseci_tx_queue_attr *attr);
23685 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
23686 + * hardware accelerator
23687 + * @ip_id: ID for SEC
23688 + * @major_rev: Major revision number for SEC
23689 + * @minor_rev: Minor revision number for SEC
23691 + * @deco_num: The number of copies of the DECO that are implemented in this
23693 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
23695 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
23697 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
23698 + * implemented in this version of SEC
23699 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
23700 + * implemented in this version of SEC
23701 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
23702 + * this version of SEC
23703 + * @pk_acc_num: The number of copies of the Public Key module that are
23704 + * implemented in this version of SEC
23705 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
23706 + * implemented in this version of SEC
23707 + * @rng_acc_num: The number of copies of the Random Number Generator that are
23708 + * implemented in this version of SEC
23709 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
23710 + * implemented in this version of SEC
23711 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
23712 + * in this version of SEC
23713 + * @des_acc_num: The number of copies of the DES module that are implemented in
23714 + * this version of SEC
23715 + * @aes_acc_num: The number of copies of the AES module that are implemented in
23716 + * this version of SEC
23718 +struct dpseci_sec_attr {
23724 + u8 zuc_auth_acc_num;
23725 + u8 zuc_enc_acc_num;
23726 + u8 snow_f8_acc_num;
23727 + u8 snow_f9_acc_num;
23730 + u8 kasumi_acc_num;
23738 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23739 + struct dpseci_sec_attr *attr);
23742 + * struct dpseci_sec_counters - Structure representing global SEC counters and
23743 + * not per dpseci counters
23744 + * @dequeued_requests: Number of Requests Dequeued
23745 + * @ob_enc_requests: Number of Outbound Encrypt Requests
23746 + * @ib_dec_requests: Number of Inbound Decrypt Requests
23747 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
23748 + * @ob_prot_bytes: Number of Outbound Bytes Protected
23749 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
23750 + * @ib_valid_bytes: Number of Inbound Bytes Validated
23752 +struct dpseci_sec_counters {
23753 + u64 dequeued_requests;
23754 + u64 ob_enc_requests;
23755 + u64 ib_dec_requests;
23756 + u64 ob_enc_bytes;
23757 + u64 ob_prot_bytes;
23758 + u64 ib_dec_bytes;
23759 + u64 ib_valid_bytes;
23762 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
23763 + struct dpseci_sec_counters *counters);
23765 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
23766 + u16 *major_ver, u16 *minor_ver);
23768 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23769 + u8 options, struct opr_cfg *cfg);
23771 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
23772 + struct opr_cfg *cfg, struct opr_qry *qry);
23775 + * enum dpseci_congestion_unit - DPSECI congestion units
23776 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
23777 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
23779 +enum dpseci_congestion_unit {
23780 + DPSECI_CONGESTION_UNIT_BYTES = 0,
23781 + DPSECI_CONGESTION_UNIT_FRAMES
23784 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
23785 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
23786 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
23787 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
23788 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
23789 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
23792 + * struct dpseci_congestion_notification_cfg - congestion notification
23794 + * @units: units type
23795 + * @threshold_entry: above this threshold we enter a congestion state.
23796 + * set it to '0' to disable it
23797 + * @threshold_exit: below this threshold we exit the congestion state.
23798 + * @message_ctx: The context that will be part of the CSCN message
23799 + * @message_iova: I/O virtual address (must be in DMA-able memory),
23800 + * must be 16B aligned;
23801 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
23802 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
23805 +struct dpseci_congestion_notification_cfg {
23806 + enum dpseci_congestion_unit units;
23807 + u32 threshold_entry;
23808 + u32 threshold_exit;
23810 + u64 message_iova;
23811 + struct dpseci_dest_cfg dest_cfg;
23812 + u16 notification_mode;
23815 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23816 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
23818 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
23819 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
23821 +#endif /* _DPSECI_H_ */
23823 +++ b/drivers/crypto/caam/dpseci_cmd.h
23826 + * Copyright 2013-2016 Freescale Semiconductor Inc.
23827 + * Copyright 2017 NXP
23829 + * Redistribution and use in source and binary forms, with or without
23830 + * modification, are permitted provided that the following conditions are met:
23831 + * * Redistributions of source code must retain the above copyright
23832 + * notice, this list of conditions and the following disclaimer.
23833 + * * Redistributions in binary form must reproduce the above copyright
23834 + * notice, this list of conditions and the following disclaimer in the
23835 + * documentation and/or other materials provided with the distribution.
23836 + * * Neither the names of the above-listed copyright holders nor the
23837 + * names of any contributors may be used to endorse or promote products
23838 + * derived from this software without specific prior written permission.
23841 + * ALTERNATIVELY, this software may be distributed under the terms of the
23842 + * GNU General Public License ("GPL") as published by the Free Software
23843 + * Foundation, either version 2 of that License or (at your option) any
23846 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23847 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23848 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23849 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23850 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23851 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23852 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23853 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23854 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23855 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23856 + * POSSIBILITY OF SUCH DAMAGE.
23859 +#ifndef _DPSECI_CMD_H_
23860 +#define _DPSECI_CMD_H_
23862 +/* DPSECI Version */
23863 +#define DPSECI_VER_MAJOR 5
23864 +#define DPSECI_VER_MINOR 1
23866 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
23867 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
23871 +#define DPSECI_CMDID_CLOSE 0x8001
23872 +#define DPSECI_CMDID_OPEN 0x8091
23873 +#define DPSECI_CMDID_CREATE 0x9092
23874 +#define DPSECI_CMDID_DESTROY 0x9891
23875 +#define DPSECI_CMDID_GET_API_VERSION 0xa091
23877 +#define DPSECI_CMDID_ENABLE 0x0021
23878 +#define DPSECI_CMDID_DISABLE 0x0031
23879 +#define DPSECI_CMDID_GET_ATTR 0x0041
23880 +#define DPSECI_CMDID_RESET 0x0051
23881 +#define DPSECI_CMDID_IS_ENABLED 0x0061
23883 +#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121
23884 +#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131
23885 +#define DPSECI_CMDID_SET_IRQ_MASK 0x0141
23886 +#define DPSECI_CMDID_GET_IRQ_MASK 0x0151
23887 +#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161
23888 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171
23890 +#define DPSECI_CMDID_SET_RX_QUEUE 0x1941
23891 +#define DPSECI_CMDID_GET_RX_QUEUE 0x1961
23892 +#define DPSECI_CMDID_GET_TX_QUEUE 0x1971
23893 +#define DPSECI_CMDID_GET_SEC_ATTR 0x1981
23894 +#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991
23895 +#define DPSECI_CMDID_SET_OPR 0x19A1
23896 +#define DPSECI_CMDID_GET_OPR 0x19B1
23898 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701
23899 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711
23901 +/* Macros for accessing command fields smaller than 1 byte */
23902 +#define DPSECI_MASK(field) \
23903 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
23904 + DPSECI_##field##_SHIFT)
23906 +#define dpseci_set_field(var, field, val) \
23907 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
23909 +#define dpseci_get_field(var, field) \
23910 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
23912 +struct dpseci_cmd_open {
23913 + __le32 dpseci_id;
23916 +struct dpseci_cmd_create {
23917 + u8 priorities[8];
23918 + u8 num_tx_queues;
23919 + u8 num_rx_queues;
23924 +struct dpseci_cmd_destroy {
23925 + __le32 object_id;
23928 +struct dpseci_rsp_is_enabled {
23929 + __le32 is_enabled;
23932 +struct dpseci_cmd_irq_enable {
23938 +struct dpseci_rsp_get_irq_enable {
23942 +struct dpseci_cmd_irq_mask {
23947 +struct dpseci_cmd_irq_status {
23952 +struct dpseci_rsp_get_attributes {
23955 + u8 num_tx_queues;
23956 + u8 num_rx_queues;
23961 +struct dpseci_cmd_queue {
23972 + __le32 order_preservation_en;
23975 +struct dpseci_rsp_get_tx_queue {
23981 +struct dpseci_rsp_get_sec_attr {
23988 + u8 zuc_auth_acc_num;
23989 + u8 zuc_enc_acc_num;
23991 + u8 snow_f8_acc_num;
23992 + u8 snow_f9_acc_num;
23996 + u8 kasumi_acc_num;
24005 +struct dpseci_rsp_get_sec_counters {
24006 + __le64 dequeued_requests;
24007 + __le64 ob_enc_requests;
24008 + __le64 ib_dec_requests;
24009 + __le64 ob_enc_bytes;
24010 + __le64 ob_prot_bytes;
24011 + __le64 ib_dec_bytes;
24012 + __le64 ib_valid_bytes;
24015 +struct dpseci_rsp_get_api_version {
24020 +struct dpseci_cmd_opr {
24032 +#define DPSECI_OPR_RIP_SHIFT 0
24033 +#define DPSECI_OPR_RIP_SIZE 1
24034 +#define DPSECI_OPR_ENABLE_SHIFT 1
24035 +#define DPSECI_OPR_ENABLE_SIZE 1
24036 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1
24037 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
24038 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1
24039 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
24041 +struct dpseci_rsp_get_opr {
24069 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
24070 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
24071 +#define DPSECI_CGN_UNITS_SHIFT 4
24072 +#define DPSECI_CGN_UNITS_SIZE 2
24074 +struct dpseci_cmd_congestion_notification {
24076 + __le16 notification_mode;
24079 + __le64 message_iova;
24080 + __le64 message_ctx;
24081 + __le32 threshold_entry;
24082 + __le32 threshold_exit;
24085 +#endif /* _DPSECI_CMD_H_ */
24086 --- a/drivers/crypto/caam/error.c
24087 +++ b/drivers/crypto/caam/error.c
24090 #include "compat.h"
24092 -#include "intern.h"
24099 +#include <linux/highmem.h>
24101 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24102 + int rowsize, int groupsize, struct scatterlist *sg,
24103 + size_t tlen, bool ascii)
24105 + struct scatterlist *it;
24110 + for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
24112 + * make sure the scatterlist's page
24113 + * has a valid virtual memory mapping
24115 + it_page = kmap_atomic(sg_page(it));
24116 + if (unlikely(!it_page)) {
24117 + pr_err("caam_dump_sg: kmap failed\n");
24121 + buf = it_page + it->offset;
24122 + len = min_t(size_t, tlen, it->length);
24123 + print_hex_dump(level, prefix_str, prefix_type, rowsize,
24124 + groupsize, buf, len, ascii);
24127 + kunmap_atomic(it_page);
24133 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24134 + int rowsize, int groupsize, struct scatterlist *sg,
24135 + size_t tlen, bool ascii)
24140 +EXPORT_SYMBOL(caam_dump_sg);
24142 static const struct {
24144 const char *error_text;
24145 @@ -69,6 +112,54 @@ static const struct {
24146 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
24149 +static const struct {
24151 + const char *error_text;
24152 +} qi_error_list[] = {
24153 + { 0x1F, "Job terminated by FQ or ICID flush" },
24154 + { 0x20, "FD format error"},
24155 + { 0x21, "FD command format error"},
24156 + { 0x23, "FL format error"},
24157 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
24158 + { 0x30, "Max. buffer size too small"},
24159 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
24160 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
24161 + { 0x33, "Size over/underflow (allocate mode)"},
24162 + { 0x34, "Size over/underflow (reuse mode)"},
24163 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
24164 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
24165 + { 0x41, "SBC frame format not supported (allocate mode)"},
24166 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
24167 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
24168 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
24169 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
24170 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
24171 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
24172 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
24173 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
24174 + { 0x51, "Unsupported IF reuse mode"},
24175 + { 0x52, "Unsupported FL use mode"},
24176 + { 0x53, "Unsupported RJD use mode"},
24177 + { 0x54, "Unsupported inline descriptor use mode"},
24178 + { 0xC0, "Table buffer pool 0 depletion"},
24179 + { 0xC1, "Table buffer pool 1 depletion"},
24180 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
24181 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
24182 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
24183 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
24184 + { 0xD0, "FLC read error"},
24185 + { 0xD1, "FL read error"},
24186 + { 0xD2, "FL write error"},
24187 + { 0xD3, "OF SGT write error"},
24188 + { 0xD4, "PTA read error"},
24189 + { 0xD5, "PTA write error"},
24190 + { 0xD6, "OF SGT F-bit write error"},
24191 + { 0xD7, "ASA write error"},
24192 + { 0xE1, "FLC[ICR]=0 ICID error"},
24193 + { 0xE2, "FLC[ICR]=1 ICID error"},
24194 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
24197 static const char * const cha_id_list[] = {
24200 @@ -146,10 +237,9 @@ static void report_ccb_status(struct dev
24201 strlen(rng_err_id_list[err_id])) {
24202 /* RNG-only error */
24203 err_str = rng_err_id_list[err_id];
24204 - } else if (err_id < ARRAY_SIZE(err_id_list))
24206 err_str = err_id_list[err_id];
24208 - snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24212 * CCB ICV check failures are part of normal operation life;
24213 @@ -198,6 +288,27 @@ static void report_deco_status(struct de
24214 status, error, idx_str, idx, err_str, err_err_code);
24217 +static void report_qi_status(struct device *qidev, const u32 status,
24218 + const char *error)
24220 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
24221 + const char *err_str = "unidentified error value 0x";
24222 + char err_err_code[3] = { 0 };
24225 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
24226 + if (qi_error_list[i].value == err_id)
24229 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
24230 + err_str = qi_error_list[i].error_text;
24232 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
24234 + dev_err(qidev, "%08x: %s: %s%s\n",
24235 + status, error, err_str, err_err_code);
24238 static void report_jr_status(struct device *jrdev, const u32 status,
24241 @@ -212,7 +323,7 @@ static void report_cond_code_status(stru
24242 status, error, __func__);
24245 -void caam_jr_strstatus(struct device *jrdev, u32 status)
24246 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
24248 static const struct stat_src {
24249 void (*report_ssed)(struct device *jrdev, const u32 status,
24250 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jr
24251 { report_ccb_status, "CCB" },
24252 { report_jump_status, "Jump" },
24253 { report_deco_status, "DECO" },
24254 - { NULL, "Queue Manager Interface" },
24255 + { report_qi_status, "Queue Manager Interface" },
24256 { report_jr_status, "Job Ring" },
24257 { report_cond_code_status, "Condition Code" },
24259 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jr
24261 dev_err(jrdev, "%d: unknown error source\n", ssrc);
24263 -EXPORT_SYMBOL(caam_jr_strstatus);
24264 +EXPORT_SYMBOL(caam_strstatus);
24265 --- a/drivers/crypto/caam/error.h
24266 +++ b/drivers/crypto/caam/error.h
24268 #ifndef CAAM_ERROR_H
24269 #define CAAM_ERROR_H
24270 #define CAAM_ERROR_STR_MAX 302
24271 -void caam_jr_strstatus(struct device *jrdev, u32 status);
24273 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
24275 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
24276 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
24278 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
24279 + int rowsize, int groupsize, struct scatterlist *sg,
24280 + size_t tlen, bool ascii);
24281 #endif /* CAAM_ERROR_H */
24282 --- a/drivers/crypto/caam/intern.h
24283 +++ b/drivers/crypto/caam/intern.h
24284 @@ -41,6 +41,7 @@ struct caam_drv_private_jr {
24285 struct device *dev;
24287 struct caam_job_ring __iomem *rregs; /* JobR's register space */
24288 + struct tasklet_struct irqtask;
24289 int irq; /* One per queue */
24291 /* Number of scatterlist crypt transforms active on the JobR */
24292 @@ -63,10 +64,9 @@ struct caam_drv_private_jr {
24293 * Driver-private storage for a single CAAM block instance
24295 struct caam_drv_private {
24297 - struct device *dev;
24298 - struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
24299 - struct platform_device *pdev;
24300 +#ifdef CONFIG_CAAM_QI
24301 + struct device *qidev;
24304 /* Physical-presence section */
24305 struct caam_ctrl __iomem *ctrl; /* controller region */
24306 @@ -102,11 +102,6 @@ struct caam_drv_private {
24307 #ifdef CONFIG_DEBUG_FS
24308 struct dentry *dfs_root;
24309 struct dentry *ctl; /* controller dir */
24310 - struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
24311 - struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
24312 - struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
24313 - struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
24315 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
24316 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
24318 @@ -114,4 +109,22 @@ struct caam_drv_private {
24320 void caam_jr_algapi_init(struct device *dev);
24321 void caam_jr_algapi_remove(struct device *dev);
24323 +#ifdef CONFIG_DEBUG_FS
24324 +static int caam_debugfs_u64_get(void *data, u64 *val)
24326 + *val = caam64_to_cpu(*(u64 *)data);
24330 +static int caam_debugfs_u32_get(void *data, u64 *val)
24332 + *val = caam32_to_cpu(*(u32 *)data);
24336 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
24337 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
24340 #endif /* INTERN_H */
24341 --- a/drivers/crypto/caam/jr.c
24342 +++ b/drivers/crypto/caam/jr.c
24344 #include <linux/of_address.h>
24346 #include "compat.h"
24351 @@ -22,6 +23,14 @@ struct jr_driver_data {
24353 static struct jr_driver_data driver_data;
24355 +static int jr_driver_probed;
24357 +int caam_jr_driver_probed(void)
24359 + return jr_driver_probed;
24361 +EXPORT_SYMBOL(caam_jr_driver_probed);
24363 static int caam_reset_hw_jr(struct device *dev)
24365 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
24366 @@ -73,6 +82,8 @@ static int caam_jr_shutdown(struct devic
24368 ret = caam_reset_hw_jr(dev);
24370 + tasklet_kill(&jrp->irqtask);
24372 /* Release interrupt */
24373 free_irq(jrp->irq, dev);
24375 @@ -116,6 +127,8 @@ static int caam_jr_remove(struct platfor
24376 dev_err(jrdev, "Failed to shut down job ring\n");
24377 irq_dispose_mapping(jrpriv->irq);
24379 + jr_driver_probed--;
24384 @@ -128,7 +141,7 @@ static irqreturn_t caam_jr_interrupt(int
24387 * Check the output ring for ready responses, kick
24388 - * the threaded irq if jobs done.
24389 + * tasklet if jobs done.
24391 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
24393 @@ -150,13 +163,18 @@ static irqreturn_t caam_jr_interrupt(int
24394 /* Have valid interrupt at this point, just ACK and trigger */
24395 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
24397 - return IRQ_WAKE_THREAD;
24398 + preempt_disable();
24399 + tasklet_schedule(&jrp->irqtask);
24400 + preempt_enable();
24402 + return IRQ_HANDLED;
24405 -static irqreturn_t caam_jr_threadirq(int irq, void *st_dev)
24406 +/* Deferred service handler, run as interrupt-fired tasklet */
24407 +static void caam_jr_dequeue(unsigned long devarg)
24409 int hw_idx, sw_idx, i, head, tail;
24410 - struct device *dev = st_dev;
24411 + struct device *dev = (struct device *)devarg;
24412 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
24413 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
24414 u32 *userdesc, userstatus;
24415 @@ -230,8 +248,6 @@ static irqreturn_t caam_jr_threadirq(int
24417 /* reenable / unmask IRQs */
24418 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
24420 - return IRQ_HANDLED;
24424 @@ -275,6 +291,36 @@ struct device *caam_jr_alloc(void)
24425 EXPORT_SYMBOL(caam_jr_alloc);
24428 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
24430 + * returns : pointer to the newly allocated physical
24431 + * JobR dev can be written to if successful.
24433 +struct device *caam_jridx_alloc(int idx)
24435 + struct caam_drv_private_jr *jrpriv;
24436 + struct device *dev = ERR_PTR(-ENODEV);
24438 + spin_lock(&driver_data.jr_alloc_lock);
24440 + if (list_empty(&driver_data.jr_list))
24443 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
24444 + if (jrpriv->ridx == idx) {
24445 + atomic_inc(&jrpriv->tfm_count);
24446 + dev = jrpriv->dev;
24452 + spin_unlock(&driver_data.jr_alloc_lock);
24455 +EXPORT_SYMBOL(caam_jridx_alloc);
24458 * caam_jr_free() - Free the Job Ring
24459 * @rdev - points to the dev that identifies the Job ring to
24461 @@ -389,10 +435,11 @@ static int caam_jr_init(struct device *d
24463 jrp = dev_get_drvdata(dev);
24465 + tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
24467 /* Connect job ring interrupt handler. */
24468 - error = request_threaded_irq(jrp->irq, caam_jr_interrupt,
24469 - caam_jr_threadirq, IRQF_SHARED,
24470 - dev_name(dev), dev);
24471 + error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
24472 + dev_name(dev), dev);
24474 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
24475 jrp->ridx, jrp->irq);
24476 @@ -454,6 +501,7 @@ out_free_inpring:
24478 free_irq(jrp->irq, dev);
24480 + tasklet_kill(&jrp->irqtask);
24484 @@ -489,15 +537,28 @@ static int caam_jr_probe(struct platform
24488 - jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
24489 + jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
24491 - if (sizeof(dma_addr_t) == sizeof(u64))
24492 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
24493 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
24494 + if (sizeof(dma_addr_t) == sizeof(u64)) {
24496 + error = dma_set_mask_and_coherent(jrdev,
24497 + DMA_BIT_MASK(49));
24498 + else if (of_device_is_compatible(nprop,
24499 + "fsl,sec-v5.0-job-ring"))
24500 + error = dma_set_mask_and_coherent(jrdev,
24501 + DMA_BIT_MASK(40));
24503 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
24505 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24506 + error = dma_set_mask_and_coherent(jrdev,
24507 + DMA_BIT_MASK(36));
24509 + error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
24512 + dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
24518 /* Identify the interrupt */
24519 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
24520 @@ -517,10 +578,12 @@ static int caam_jr_probe(struct platform
24522 atomic_set(&jrpriv->tfm_count, 0);
24524 + jr_driver_probed++;
24529 -static struct of_device_id caam_jr_match[] = {
24530 +static const struct of_device_id caam_jr_match[] = {
24532 .compatible = "fsl,sec-v4.0-job-ring",
24534 --- a/drivers/crypto/caam/jr.h
24535 +++ b/drivers/crypto/caam/jr.h
24539 /* Prototypes for backend-level services exposed to APIs */
24540 +int caam_jr_driver_probed(void);
24541 struct device *caam_jr_alloc(void);
24542 +struct device *caam_jridx_alloc(int idx);
24543 void caam_jr_free(struct device *rdev);
24544 int caam_jr_enqueue(struct device *dev, u32 *desc,
24545 void (*cbk)(struct device *dev, u32 *desc, u32 status,
24546 --- a/drivers/crypto/caam/key_gen.c
24547 +++ b/drivers/crypto/caam/key_gen.c
24548 @@ -41,15 +41,29 @@ Split key generation--------------------
24549 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
24552 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24553 - int split_key_pad_len, const u8 *key_in, u32 keylen,
24555 +int gen_split_key(struct device *jrdev, u8 *key_out,
24556 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
24560 struct split_key_result result;
24561 dma_addr_t dma_addr_in, dma_addr_out;
24564 + adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
24565 + adata->keylen_pad = split_key_pad_len(adata->algtype &
24566 + OP_ALG_ALGSEL_MASK);
24569 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
24570 + adata->keylen, adata->keylen_pad);
24571 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
24572 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
24575 + if (adata->keylen_pad > max_keylen)
24578 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
24580 dev_err(jrdev, "unable to allocate key input memory\n");
24581 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev,
24585 - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
24586 + dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
24588 if (dma_mapping_error(jrdev, dma_addr_out)) {
24589 dev_err(jrdev, "unable to map key output memory\n");
24590 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev,
24591 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
24593 /* Sets MDHA up into an HMAC-INIT */
24594 - append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
24595 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
24596 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
24600 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
24601 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev,
24602 * FIFO_STORE with the explicit split-key content store
24603 * (0x26 output type)
24605 - append_fifo_store(desc, dma_addr_out, split_key_len,
24606 + append_fifo_store(desc, dma_addr_out, adata->keylen,
24607 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
24610 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev,
24612 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
24613 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
24614 - split_key_pad_len, 1);
24615 + adata->keylen_pad, 1);
24619 - dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
24620 + dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
24623 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
24624 --- a/drivers/crypto/caam/key_gen.h
24625 +++ b/drivers/crypto/caam/key_gen.h
24631 + * split_key_len - Compute MDHA split key length for a given algorithm
24632 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24633 + * SHA224, SHA384, SHA512.
24635 + * Return: MDHA split key length
24637 +static inline u32 split_key_len(u32 hash)
24639 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
24640 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
24643 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
24645 + return (u32)(mdpadlen[idx] * 2);
24649 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
24650 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
24651 + * SHA224, SHA384, SHA512.
24653 + * Return: MDHA split key pad length
24655 +static inline u32 split_key_pad_len(u32 hash)
24657 + return ALIGN(split_key_len(hash), 16);
24660 struct split_key_result {
24661 struct completion completion;
24663 @@ -12,6 +42,6 @@ struct split_key_result {
24665 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
24667 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
24668 - int split_key_pad_len, const u8 *key_in, u32 keylen,
24670 +int gen_split_key(struct device *jrdev, u8 *key_out,
24671 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
24673 --- a/drivers/crypto/caam/pdb.h
24674 +++ b/drivers/crypto/caam/pdb.h
24675 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
24676 #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
24677 #define RSA_PDB_D_SHIFT 12
24678 #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
24679 +#define RSA_PDB_Q_SHIFT 12
24680 +#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
24682 #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
24683 #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
24684 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
24685 #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
24687 #define RSA_PRIV_KEY_FRM_1 0
24688 +#define RSA_PRIV_KEY_FRM_2 1
24689 +#define RSA_PRIV_KEY_FRM_3 2
24692 * RSA Encrypt Protocol Data Block
24693 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
24698 + * RSA Decrypt PDB - Private Key Form #2
24699 + * @sgf : scatter-gather field
24700 + * @g_dma : dma address of encrypted input data
24701 + * @f_dma : dma address of output data
24702 + * @d_dma : dma address of RSA private exponent
24703 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
24704 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
24705 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24706 + * as internal state buffer. It is assumed to be as long as p.
24707 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24708 + * as internal state buffer. It is assumed to be as long as q.
24709 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24711 +struct rsa_priv_f2_pdb {
24713 + dma_addr_t g_dma;
24714 + dma_addr_t f_dma;
24715 + dma_addr_t d_dma;
24716 + dma_addr_t p_dma;
24717 + dma_addr_t q_dma;
24718 + dma_addr_t tmp1_dma;
24719 + dma_addr_t tmp2_dma;
24724 + * RSA Decrypt PDB - Private Key Form #3
24725 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
24726 + * the RSA modulus.
24727 + * @sgf : scatter-gather field
24728 + * @g_dma : dma address of encrypted input data
24729 + * @f_dma : dma address of output data
24730 + * @c_dma : dma address of RSA CRT coefficient
24731 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
24732 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
24733 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
24734 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
24735 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24736 + * as internal state buffer. It is assumed to be as long as p.
24737 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
24738 + * as internal state buffer. It is assumed to be as long as q.
24739 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
24741 +struct rsa_priv_f3_pdb {
24743 + dma_addr_t g_dma;
24744 + dma_addr_t f_dma;
24745 + dma_addr_t c_dma;
24746 + dma_addr_t p_dma;
24747 + dma_addr_t q_dma;
24748 + dma_addr_t dp_dma;
24749 + dma_addr_t dq_dma;
24750 + dma_addr_t tmp1_dma;
24751 + dma_addr_t tmp2_dma;
24756 --- a/drivers/crypto/caam/pkc_desc.c
24757 +++ b/drivers/crypto/caam/pkc_desc.c
24758 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, st
24759 append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24760 RSA_PRIV_KEY_FRM_1);
24763 +/* Descriptor for RSA Private operation - Private Key Form #2 */
24764 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
24766 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
24767 + append_cmd(desc, pdb->sgf);
24768 + append_ptr(desc, pdb->g_dma);
24769 + append_ptr(desc, pdb->f_dma);
24770 + append_ptr(desc, pdb->d_dma);
24771 + append_ptr(desc, pdb->p_dma);
24772 + append_ptr(desc, pdb->q_dma);
24773 + append_ptr(desc, pdb->tmp1_dma);
24774 + append_ptr(desc, pdb->tmp2_dma);
24775 + append_cmd(desc, pdb->p_q_len);
24776 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24777 + RSA_PRIV_KEY_FRM_2);
24780 +/* Descriptor for RSA Private operation - Private Key Form #3 */
24781 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
24783 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
24784 + append_cmd(desc, pdb->sgf);
24785 + append_ptr(desc, pdb->g_dma);
24786 + append_ptr(desc, pdb->f_dma);
24787 + append_ptr(desc, pdb->c_dma);
24788 + append_ptr(desc, pdb->p_dma);
24789 + append_ptr(desc, pdb->q_dma);
24790 + append_ptr(desc, pdb->dp_dma);
24791 + append_ptr(desc, pdb->dq_dma);
24792 + append_ptr(desc, pdb->tmp1_dma);
24793 + append_ptr(desc, pdb->tmp2_dma);
24794 + append_cmd(desc, pdb->p_q_len);
24795 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
24796 + RSA_PRIV_KEY_FRM_3);
24799 +++ b/drivers/crypto/caam/qi.c
24802 + * CAAM/SEC 4.x QI transport/backend driver
24803 + * Queue Interface backend functionality
24805 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
24806 + * Copyright 2016-2017 NXP
24809 +#include <linux/cpumask.h>
24810 +#include <linux/kthread.h>
24811 +#include <linux/fsl_qman.h>
24816 +#include "intern.h"
24817 +#include "desc_constr.h"
24819 +#define PREHDR_RSLS_SHIFT 31
24822 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
24823 + * so that resources used by the in-flight buffers do not become a memory hog.
24825 +#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
24827 +#define CAAM_QI_ENQUEUE_RETRIES 10000
24829 +#define CAAM_NAPI_WEIGHT 63
24832 + * caam_napi - struct holding CAAM NAPI-related params
24833 + * @irqtask: IRQ task for QI backend
24834 + * @p: QMan portal
24836 +struct caam_napi {
24837 + struct napi_struct irqtask;
24838 + struct qman_portal *p;
24842 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
24843 + * responses expected on each cpu.
24844 + * @caam_napi: CAAM NAPI params
24845 + * @net_dev: netdev used by NAPI
24846 + * @rsp_fq: response FQ from CAAM
24848 +struct caam_qi_pcpu_priv {
24849 + struct caam_napi caam_napi;
24850 + struct net_device net_dev;
24851 + struct qman_fq *rsp_fq;
24852 +} ____cacheline_aligned;
24854 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
24855 +static DEFINE_PER_CPU(int, last_cpu);
24858 + * caam_qi_priv - CAAM QI backend private params
24859 + * @cgr: QMan congestion group
24860 + * @qi_pdev: platform device for QI backend
24862 +struct caam_qi_priv {
24863 + struct qman_cgr cgr;
24864 + struct platform_device *qi_pdev;
24867 +static struct caam_qi_priv qipriv ____cacheline_aligned;
24870 + * This is written by only one core - the one that initialized the CGR - and
24871 + * read by multiple cores (all the others).
24873 +bool caam_congested __read_mostly;
24874 +EXPORT_SYMBOL(caam_congested);
24876 +#ifdef CONFIG_DEBUG_FS
24878 + * This is a counter for the number of times the congestion group (where all
24879 + * the request and response queueus are) reached congestion. Incremented
24880 + * each time the congestion callback is called with congested == true.
24882 +static u64 times_congested;
24886 + * CPU from where the module initialised. This is required because QMan driver
24887 + * requires CGRs to be removed from same CPU from where they were originally
24890 +static int mod_init_cpu;
24893 + * This is a a cache of buffers, from which the users of CAAM QI driver
24894 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
24895 + * doing malloc on the hotpath.
24896 + * NOTE: A more elegant solution would be to have some headroom in the frames
24897 + * being processed. This could be added by the dpaa-ethernet driver.
24898 + * This would pose a problem for userspace application processing which
24899 + * cannot know of this limitation. So for now, this will work.
24900 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
24902 +static struct kmem_cache *qi_cache;
24904 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
24908 + int num_retries = 0;
24911 + fd.format = qm_fd_compound;
24912 + fd.cong_weight = req->fd_sgt[1].length;
24913 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
24914 + DMA_BIDIRECTIONAL);
24915 + if (dma_mapping_error(qidev, fd.addr)) {
24916 + dev_err(qidev, "DMA mapping error for QI enqueue request\n");
24921 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
24922 + if (likely(!ret))
24925 + if (ret != -EBUSY)
24928 + } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
24930 + dev_err(qidev, "qman_enqueue failed: %d\n", ret);
24934 +EXPORT_SYMBOL(caam_qi_enqueue);
24936 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
24937 + const struct qm_mr_entry *msg)
24939 + const struct qm_fd *fd;
24940 + struct caam_drv_req *drv_req;
24941 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
24943 + fd = &msg->ern.fd;
24945 + if (fd->format != qm_fd_compound) {
24946 + dev_err(qidev, "Non-compound FD from CAAM\n");
24950 + drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
24953 + "Can't find original request for CAAM response\n");
24957 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
24958 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
24960 + drv_req->cbk(drv_req, -EIO);
24963 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
24964 + struct qman_fq *rsp_fq,
24965 + dma_addr_t hwdesc,
24966 + int fq_sched_flag)
24969 + struct qman_fq *req_fq;
24970 + struct qm_mcc_initfq opts;
24972 + req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
24974 + return ERR_PTR(-ENOMEM);
24976 + req_fq->cb.ern = caam_fq_ern_cb;
24977 + req_fq->cb.fqs = NULL;
24979 + ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
24980 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
24983 + dev_err(qidev, "Failed to create session req FQ\n");
24984 + goto create_req_fq_fail;
24987 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
24988 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
24989 + QM_INITFQ_WE_CGID;
24990 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
24991 + opts.fqd.dest.channel = qm_channel_caam;
24992 + opts.fqd.dest.wq = 2;
24993 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
24994 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
24995 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
24996 + opts.fqd.cgid = qipriv.cgr.cgrid;
24998 + ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
25000 + dev_err(qidev, "Failed to init session req FQ\n");
25001 + goto init_req_fq_fail;
25004 + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
25005 + smp_processor_id());
25009 + qman_destroy_fq(req_fq, 0);
25010 +create_req_fq_fail:
25012 + return ERR_PTR(ret);
25015 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
25019 + ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
25020 + QMAN_VOLATILE_FLAG_FINISH,
25021 + QM_VDQCR_PRECEDENCE_VDQCR |
25022 + QM_VDQCR_NUMFRAMES_TILLEMPTY);
25024 + dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
25029 + struct qman_portal *p;
25031 + p = qman_get_affine_portal(smp_processor_id());
25032 + qman_p_poll_dqrr(p, 16);
25033 + } while (fq->flags & QMAN_FQ_STATE_NE);
25038 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
25043 + ret = qman_retire_fq(fq, &flags);
25045 + dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
25052 + /* Async FQ retirement condition */
25054 + /* Retry till FQ gets in retired state */
25057 + } while (fq->state != qman_fq_state_retired);
25059 + WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
25060 + WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
25064 + if (fq->flags & QMAN_FQ_STATE_NE) {
25065 + ret = empty_retired_fq(qidev, fq);
25067 + dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
25073 + ret = qman_oos_fq(fq);
25075 + dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
25077 + qman_destroy_fq(fq, 0);
25083 +static int empty_caam_fq(struct qman_fq *fq)
25086 + struct qm_mcr_queryfq_np np;
25088 + /* Wait till the older CAAM FQ get empty */
25090 + ret = qman_query_fq_np(fq, &np);
25101 + * Give extra time for pending jobs from this FQ in holding tanks
25102 + * to get processed
25108 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
25112 + struct qman_fq *new_fq, *old_fq;
25113 + struct device *qidev = drv_ctx->qidev;
25115 + num_words = desc_len(sh_desc);
25116 + if (num_words > MAX_SDLEN) {
25117 + dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
25121 + /* Note down older req FQ */
25122 + old_fq = drv_ctx->req_fq;
25124 + /* Create a new req FQ in parked state */
25125 + new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
25126 + drv_ctx->context_a, 0);
25127 + if (unlikely(IS_ERR_OR_NULL(new_fq))) {
25128 + dev_err(qidev, "FQ allocation for shdesc update failed\n");
25129 + return PTR_ERR(new_fq);
25132 + /* Hook up new FQ to context so that new requests keep queuing */
25133 + drv_ctx->req_fq = new_fq;
25135 + /* Empty and remove the older FQ */
25136 + ret = empty_caam_fq(old_fq);
25138 + dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
25140 + /* We can revert to older FQ */
25141 + drv_ctx->req_fq = old_fq;
25143 + if (kill_fq(qidev, new_fq))
25144 + dev_warn(qidev, "New CAAM FQ kill failed\n");
25150 + * Re-initialise pre-header. Set RSLS and SDLEN.
25151 + * Update the shared descriptor for driver context.
25153 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25155 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25156 + dma_sync_single_for_device(qidev, drv_ctx->context_a,
25157 + sizeof(drv_ctx->sh_desc) +
25158 + sizeof(drv_ctx->prehdr),
25159 + DMA_BIDIRECTIONAL);
25161 + /* Put the new FQ in scheduled state */
25162 + ret = qman_schedule_fq(new_fq);
25164 + dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
25167 + * We can kill new FQ and revert to old FQ.
25168 + * Since the desc is already modified, it is success case
25171 + drv_ctx->req_fq = old_fq;
25173 + if (kill_fq(qidev, new_fq))
25174 + dev_warn(qidev, "New CAAM FQ kill failed\n");
25175 + } else if (kill_fq(qidev, old_fq)) {
25176 + dev_warn(qidev, "Old CAAM FQ kill failed\n");
25181 +EXPORT_SYMBOL(caam_drv_ctx_update);
25183 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
25189 + dma_addr_t hwdesc;
25190 + struct caam_drv_ctx *drv_ctx;
25191 + const cpumask_t *cpus = qman_affine_cpus();
25193 + num_words = desc_len(sh_desc);
25194 + if (num_words > MAX_SDLEN) {
25195 + dev_err(qidev, "Invalid descriptor len: %d words\n",
25197 + return ERR_PTR(-EINVAL);
25200 + drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
25202 + return ERR_PTR(-ENOMEM);
25205 + * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
25206 + * and dma-map them.
25208 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
25210 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
25211 + size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
25212 + hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
25213 + DMA_BIDIRECTIONAL);
25214 + if (dma_mapping_error(qidev, hwdesc)) {
25215 + dev_err(qidev, "DMA map error for preheader + shdesc\n");
25217 + return ERR_PTR(-ENOMEM);
25219 + drv_ctx->context_a = hwdesc;
25221 + /* If given CPU does not own the portal, choose another one that does */
25222 + if (!cpumask_test_cpu(*cpu, cpus)) {
25223 + int *pcpu = &get_cpu_var(last_cpu);
25225 + *pcpu = cpumask_next(*pcpu, cpus);
25226 + if (*pcpu >= nr_cpu_ids)
25227 + *pcpu = cpumask_first(cpus);
25230 + put_cpu_var(last_cpu);
25232 + drv_ctx->cpu = *cpu;
25234 + /* Find response FQ hooked with this CPU */
25235 + drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
25237 + /* Attach request FQ */
25238 + drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
25239 + QMAN_INITFQ_FLAG_SCHED);
25240 + if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
25241 + dev_err(qidev, "create_caam_req_fq failed\n");
25242 + dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
25244 + return ERR_PTR(-ENOMEM);
25247 + drv_ctx->qidev = qidev;
25250 +EXPORT_SYMBOL(caam_drv_ctx_init);
25252 +void *qi_cache_alloc(gfp_t flags)
25254 + return kmem_cache_alloc(qi_cache, flags);
25256 +EXPORT_SYMBOL(qi_cache_alloc);
25258 +void qi_cache_free(void *obj)
25260 + kmem_cache_free(qi_cache, obj);
25262 +EXPORT_SYMBOL(qi_cache_free);
25264 +static int caam_qi_poll(struct napi_struct *napi, int budget)
25266 + struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
25268 + int cleaned = qman_p_poll_dqrr(np->p, budget);
25270 + if (cleaned < budget) {
25271 + napi_complete(napi);
25272 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
25278 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
25280 + if (IS_ERR_OR_NULL(drv_ctx))
25283 + /* Remove request FQ */
25284 + if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
25285 + dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
25287 + dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
25288 + sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
25289 + DMA_BIDIRECTIONAL);
25292 +EXPORT_SYMBOL(caam_drv_ctx_rel);
25294 +int caam_qi_shutdown(struct device *qidev)
25297 + struct caam_qi_priv *priv = dev_get_drvdata(qidev);
25298 + const cpumask_t *cpus = qman_affine_cpus();
25299 + struct cpumask old_cpumask = current->cpus_allowed;
25301 + for_each_cpu(i, cpus) {
25302 + struct napi_struct *irqtask;
25304 + irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
25305 + napi_disable(irqtask);
25306 + netif_napi_del(irqtask);
25308 + if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
25309 + dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
25313 + * QMan driver requires CGRs to be deleted from same CPU from where they
25314 + * were instantiated. Hence we get the module removal execute from the
25315 + * same CPU from where it was originally inserted.
25317 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25319 + ret = qman_delete_cgr(&priv->cgr);
25321 + dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
25323 + qman_release_cgrid(priv->cgr.cgrid);
25325 + kmem_cache_destroy(qi_cache);
25327 + /* Now that we're done with the CGRs, restore the cpus allowed mask */
25328 + set_cpus_allowed_ptr(current, &old_cpumask);
25330 + platform_device_unregister(priv->qi_pdev);
25334 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
25336 + caam_congested = congested;
25339 +#ifdef CONFIG_DEBUG_FS
25340 + times_congested++;
25342 + pr_debug_ratelimited("CAAM entered congestion\n");
25345 + pr_debug_ratelimited("CAAM exited congestion\n");
25349 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
25352 + * In case of threaded ISR, for RT kernels in_irq() does not return
25353 + * appropriate value, so use in_serving_softirq to distinguish between
25354 + * softirq and irq contexts.
25356 + if (unlikely(in_irq() || !in_serving_softirq())) {
25357 + /* Disable QMan IRQ source and invoke NAPI */
25358 + qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
25360 + napi_schedule(&np->irqtask);
25366 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
25367 + struct qman_fq *rsp_fq,
25368 + const struct qm_dqrr_entry *dqrr)
25370 + struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
25371 + struct caam_drv_req *drv_req;
25372 + const struct qm_fd *fd;
25373 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
25375 + if (caam_qi_napi_schedule(p, caam_napi))
25376 + return qman_cb_dqrr_stop;
25379 + if (unlikely(fd->status))
25380 + dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
25382 + if (unlikely(fd->format != fd->format)) {
25383 + dev_err(qidev, "Non-compound FD from CAAM\n");
25384 + return qman_cb_dqrr_consume;
25387 + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
25388 + if (unlikely(!drv_req)) {
25390 + "Can't find original request for caam response\n");
25391 + return qman_cb_dqrr_consume;
25394 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
25395 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
25397 + drv_req->cbk(drv_req, fd->status);
25398 + return qman_cb_dqrr_consume;
25401 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
25403 + struct qm_mcc_initfq opts;
25404 + struct qman_fq *fq;
25407 + fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
25411 + fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
25413 + ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
25414 + QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
25416 + dev_err(qidev, "Rsp FQ create failed\n");
25421 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
25422 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
25423 + QM_INITFQ_WE_CGID;
25424 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
25426 + opts.fqd.dest.channel = qman_affine_channel(cpu);
25427 + opts.fqd.dest.wq = 3;
25428 + opts.fqd.cgid = qipriv.cgr.cgrid;
25429 + opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
25430 + QM_STASHING_EXCL_DATA;
25431 + opts.fqd.context_a.stashing.data_cl = 1;
25432 + opts.fqd.context_a.stashing.context_cl = 1;
25434 + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
25436 + dev_err(qidev, "Rsp FQ init failed\n");
25441 + per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
25443 + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
25447 +static int init_cgr(struct device *qidev)
25450 + struct qm_mcc_initcgr opts;
25451 + const u64 cpus = *(u64 *)qman_affine_cpus();
25452 + const int num_cpus = hweight64(cpus);
25453 + const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
25455 + ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
25457 + dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
25461 + qipriv.cgr.cb = cgr_cb;
25462 + memset(&opts, 0, sizeof(opts));
25463 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
25464 + opts.cgr.cscn_en = QM_CGR_EN;
25465 + opts.cgr.mode = QMAN_CGR_MODE_FRAME;
25466 + qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
25468 + ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
25470 + dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
25471 + qipriv.cgr.cgrid);
25475 + dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
25479 +static int alloc_rsp_fqs(struct device *qidev)
25482 + const cpumask_t *cpus = qman_affine_cpus();
25484 + /*Now create response FQs*/
25485 + for_each_cpu(i, cpus) {
25486 + ret = alloc_rsp_fq_cpu(qidev, i);
25488 + dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
25496 +static void free_rsp_fqs(void)
25499 + const cpumask_t *cpus = qman_affine_cpus();
25501 + for_each_cpu(i, cpus)
25502 + kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
25505 +int caam_qi_init(struct platform_device *caam_pdev)
25508 + struct platform_device *qi_pdev;
25509 + struct device *ctrldev = &caam_pdev->dev, *qidev;
25510 + struct caam_drv_private *ctrlpriv;
25511 + const cpumask_t *cpus = qman_affine_cpus();
25512 + struct cpumask old_cpumask = current->cpus_allowed;
25513 + static struct platform_device_info qi_pdev_info = {
25514 + .name = "caam_qi",
25515 + .id = PLATFORM_DEVID_NONE
25519 + * QMAN requires CGRs to be removed from same CPU+portal from where it
25520 + * was originally allocated. Hence we need to note down the
25521 + * initialisation CPU and use the same CPU for module exit.
25522 + * We select the first CPU to from the list of portal owning CPUs.
25523 + * Then we pin module init to this CPU.
25525 + mod_init_cpu = cpumask_first(cpus);
25526 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
25528 + qi_pdev_info.parent = ctrldev;
25529 + qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
25530 + qi_pdev = platform_device_register_full(&qi_pdev_info);
25531 + if (IS_ERR(qi_pdev))
25532 + return PTR_ERR(qi_pdev);
25533 + arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
25535 + ctrlpriv = dev_get_drvdata(ctrldev);
25536 + qidev = &qi_pdev->dev;
25538 + qipriv.qi_pdev = qi_pdev;
25539 + dev_set_drvdata(qidev, &qipriv);
25541 + /* Initialize the congestion detection */
25542 + err = init_cgr(qidev);
25544 + dev_err(qidev, "CGR initialization failed: %d\n", err);
25545 + platform_device_unregister(qi_pdev);
25549 + /* Initialise response FQs */
25550 + err = alloc_rsp_fqs(qidev);
25552 + dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
25554 + platform_device_unregister(qi_pdev);
25559 + * Enable the NAPI contexts on each of the core which has an affine
25562 + for_each_cpu(i, cpus) {
25563 + struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
25564 + struct caam_napi *caam_napi = &priv->caam_napi;
25565 + struct napi_struct *irqtask = &caam_napi->irqtask;
25566 + struct net_device *net_dev = &priv->net_dev;
25568 + net_dev->dev = *qidev;
25569 + INIT_LIST_HEAD(&net_dev->napi_list);
25571 + netif_napi_add(net_dev, irqtask, caam_qi_poll,
25572 + CAAM_NAPI_WEIGHT);
25574 + napi_enable(irqtask);
25577 + /* Hook up QI device to parent controlling caam device */
25578 + ctrlpriv->qidev = qidev;
25580 + qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
25581 + SLAB_CACHE_DMA, NULL);
25583 + dev_err(qidev, "Can't allocate CAAM cache\n");
25585 + platform_device_unregister(qi_pdev);
25589 + /* Done with the CGRs; restore the cpus allowed mask */
25590 + set_cpus_allowed_ptr(current, &old_cpumask);
25591 +#ifdef CONFIG_DEBUG_FS
25592 + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
25593 + ×_congested, &caam_fops_u64_ro);
25595 + dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
25599 +++ b/drivers/crypto/caam/qi.h
25602 + * Public definitions for the CAAM/QI (Queue Interface) backend.
25604 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25605 + * Copyright 2016-2017 NXP
25611 +#include <linux/fsl_qman.h>
25612 +#include "compat.h"
25614 +#include "desc_constr.h"
25617 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
25618 + * (as pointed by context_a of to-CAAM FQ).
25619 + * When the job descriptor is executed by DECO, the whole job descriptor
25620 + * together with shared descriptor gets loaded in DECO buffer, which is
25621 + * 64 words (each 32-bit) long.
25623 + * The job descriptor constructed by CAAM hardware has the following layout:
25625 + * HEADER (1 word)
25626 + * Shdesc ptr (1 or 2 words)
25627 + * SEQ_OUT_PTR (1 word)
25628 + * Out ptr (1 or 2 words)
25629 + * Out length (1 word)
25630 + * SEQ_IN_PTR (1 word)
25631 + * In ptr (1 or 2 words)
25632 + * In length (1 word)
25634 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
25636 + * Apart from shdesc contents, the total number of words that get loaded in DECO
25637 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
25638 + * storing shared descriptor.
25640 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
25642 +/* Length of a single buffer in the QI driver memory cache */
25643 +#define CAAM_QI_MEMCACHE_SIZE 768
25645 +extern bool caam_congested __read_mostly;
25648 + * This is the request structure the driver application should fill while
25649 + * submitting a job to driver.
25651 +struct caam_drv_req;
25654 + * caam_qi_cbk - application's callback function invoked by the driver when the
25655 + * request has been successfully processed.
25656 + * @drv_req: original request that was submitted
25657 + * @status: completion status of request (0 - success, non-zero - error code)
25659 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
25669 + * caam_drv_ctx - CAAM/QI backend driver context
25671 + * The jobs are processed by the driver against a driver context.
25672 + * With every cryptographic context, a driver context is attached.
25673 + * The driver context contains data for private use by driver.
25674 + * For the applications, this is an opaque structure.
25676 + * @prehdr: preheader placed before shrd desc
25677 + * @sh_desc: shared descriptor
25678 + * @context_a: shared descriptor dma address
25679 + * @req_fq: to-CAAM request frame queue
25680 + * @rsp_fq: from-CAAM response frame queue
25681 + * @cpu: cpu on which to receive CAAM response
25682 + * @op_type: operation type
25683 + * @qidev: device pointer for CAAM/QI backend
25685 +struct caam_drv_ctx {
25687 + u32 sh_desc[MAX_SDLEN];
25688 + dma_addr_t context_a;
25689 + struct qman_fq *req_fq;
25690 + struct qman_fq *rsp_fq;
25692 + enum optype op_type;
25693 + struct device *qidev;
25694 +} ____cacheline_aligned;
25697 + * caam_drv_req - The request structure the driver application should fill while
25698 + * submitting a job to driver.
25699 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
25701 + * @cbk: callback function to invoke when job is completed
25702 + * @app_ctx: arbitrary context attached with request by the application
25704 + * The fields mentioned below should not be used by application.
25705 + * These are for private use by driver.
25707 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
25708 + * @hwaddr: DMA address for the S/G table.
25710 +struct caam_drv_req {
25711 + struct qm_sg_entry fd_sgt[2];
25712 + struct caam_drv_ctx *drv_ctx;
25715 +} ____cacheline_aligned;
25718 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
25720 + * A CAAM/QI driver context must be attached with each cryptographic context.
25721 + * This function allocates memory for CAAM/QI context and returns a handle to
25722 + * the application. This handle must be submitted along with each enqueue
25723 + * request to the driver by the application.
25725 + * @cpu: CPU where the application prefers to the driver to receive CAAM
25726 + * responses. The request completion callback would be issued from this
25728 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
25731 + * Returns a driver context on success or negative error code on failure.
25733 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
25737 + * caam_qi_enqueue - Submit a request to QI backend driver.
25739 + * The request structure must be properly filled as described above.
25741 + * @qidev: device pointer for QI backend
25742 + * @req: CAAM QI request structure
25744 + * Returns 0 on success or negative error code on failure.
25746 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
25749 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
25750 + * or too many CAAM responses are pending to be processed.
25751 + * @drv_ctx: driver context for which job is to be submitted
25753 + * Returns caam congestion status 'true/false'
25755 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
25758 + * caam_drv_ctx_update - Update QI driver context
25760 + * Invoked when shared descriptor is required to be change in driver context.
25762 + * @drv_ctx: driver context to be updated
25763 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
25765 + * Returns 0 on success or negative error code on failure.
25767 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
25770 + * caam_drv_ctx_rel - Release a QI driver context
25771 + * @drv_ctx: context to be released
25773 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
25775 +int caam_qi_init(struct platform_device *pdev);
25776 +int caam_qi_shutdown(struct device *dev);
25779 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
25781 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
25782 + * to be allocated on the hotpath. Instead of using malloc, one can use the
25783 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
25784 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
25786 + * @flags: flags that would be used for the equivalent malloc(..) call
25788 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
25790 +void *qi_cache_alloc(gfp_t flags);
25793 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
25795 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
25796 + * the buffer previously allocated by a qi_cache_alloc call.
25797 + * No checking is being done, the call is a passthrough call to
25798 + * kmem_cache_free(...)
25800 + * @obj: object previously allocated using qi_cache_alloc()
25802 +void qi_cache_free(void *obj);
25804 +#endif /* __QI_H__ */
25805 --- a/drivers/crypto/caam/regs.h
25806 +++ b/drivers/crypto/caam/regs.h
25808 * CAAM hardware register-level view
25810 * Copyright 2008-2011 Freescale Semiconductor, Inc.
25811 + * Copyright 2017 NXP
25818 extern bool caam_little_end;
25819 +extern bool caam_imx;
25821 #define caam_to_cpu(len) \
25822 static inline u##len caam##len ## _to_cpu(u##len val) \
25823 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem
25824 #else /* CONFIG_64BIT */
25825 static inline void wr_reg64(void __iomem *reg, u64 data)
25827 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25828 - if (caam_little_end) {
25829 + if (!caam_imx && caam_little_end) {
25830 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
25831 wr_reg32((u32 __iomem *)(reg), data);
25836 wr_reg32((u32 __iomem *)(reg), data >> 32);
25837 wr_reg32((u32 __iomem *)(reg) + 1, data);
25839 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem
25841 static inline u64 rd_reg64(void __iomem *reg)
25843 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25844 - if (caam_little_end)
25845 + if (!caam_imx && caam_little_end)
25846 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
25847 (u64)rd_reg32((u32 __iomem *)(reg)));
25850 - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25851 - (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25853 + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
25854 + (u64)rd_reg32((u32 __iomem *)(reg) + 1));
25856 #endif /* CONFIG_64BIT */
25858 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
25861 + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
25862 + (u64)cpu_to_caam32(upper_32_bits(value)));
25864 + return cpu_to_caam64(value);
25867 +static inline u64 caam_dma64_to_cpu(u64 value)
25870 + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
25871 + (u64)caam32_to_cpu(upper_32_bits(value)));
25873 + return caam64_to_cpu(value);
25876 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
25877 -#ifdef CONFIG_SOC_IMX7D
25878 -#define cpu_to_caam_dma(value) \
25879 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25880 - (u64)cpu_to_caam32(upper_32_bits(value)))
25881 -#define caam_dma_to_cpu(value) \
25882 - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
25883 - (u64)caam32_to_cpu(upper_32_bits(value)))
25885 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
25886 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
25887 -#endif /* CONFIG_SOC_IMX7D */
25888 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
25889 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
25891 #define cpu_to_caam_dma(value) cpu_to_caam32(value)
25892 #define caam_dma_to_cpu(value) caam32_to_cpu(value)
25893 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
25895 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
25896 -#define cpu_to_caam_dma64(value) \
25897 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
25898 - (u64)cpu_to_caam32(upper_32_bits(value)))
25900 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
25902 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
25906 @@ -293,6 +291,7 @@ struct caam_perfmon {
25907 u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
25908 #define CTPR_MS_QI_SHIFT 25
25909 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
25910 +#define CTPR_MS_DPAA2 BIT(13)
25911 #define CTPR_MS_VIRT_EN_INCL 0x00000001
25912 #define CTPR_MS_VIRT_EN_POR 0x00000002
25913 #define CTPR_MS_PG_SZ_MASK 0x10
25914 @@ -628,6 +627,8 @@ struct caam_job_ring {
25915 #define JRSTA_DECOERR_INVSIGN 0x86
25916 #define JRSTA_DECOERR_DSASIGN 0x87
25918 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
25920 #define JRSTA_CCBERR_JUMP 0x08000000
25921 #define JRSTA_CCBERR_INDEX_MASK 0xff00
25922 #define JRSTA_CCBERR_INDEX_SHIFT 8
25924 +++ b/drivers/crypto/caam/sg_sw_qm.h
25927 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
25928 + * Copyright 2016-2017 NXP
25930 + * Redistribution and use in source and binary forms, with or without
25931 + * modification, are permitted provided that the following conditions are met:
25932 + * * Redistributions of source code must retain the above copyright
25933 + * notice, this list of conditions and the following disclaimer.
25934 + * * Redistributions in binary form must reproduce the above copyright
25935 + * notice, this list of conditions and the following disclaimer in the
25936 + * documentation and/or other materials provided with the distribution.
25937 + * * Neither the name of Freescale Semiconductor nor the
25938 + * names of its contributors may be used to endorse or promote products
25939 + * derived from this software without specific prior written permission.
25942 + * ALTERNATIVELY, this software may be distributed under the terms of the
25943 + * GNU General Public License ("GPL") as published by the Free Software
25944 + * Foundation, either version 2 of that License or (at your option) any
25947 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
25948 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25949 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25950 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
25951 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25952 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25953 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25954 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25955 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25956 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25959 +#ifndef __SG_SW_QM_H
25960 +#define __SG_SW_QM_H
25962 +#include <linux/fsl_qman.h>
25965 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
25967 + dma_addr_t addr = qm_sg_ptr->opaque;
25969 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
25970 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
25973 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
25974 + u32 len, u16 offset)
25976 + qm_sg_ptr->addr = dma;
25977 + qm_sg_ptr->length = len;
25978 + qm_sg_ptr->__reserved2 = 0;
25979 + qm_sg_ptr->bpid = 0;
25980 + qm_sg_ptr->__reserved3 = 0;
25981 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
25983 + cpu_to_hw_sg(qm_sg_ptr);
25986 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
25987 + dma_addr_t dma, u32 len, u16 offset)
25989 + qm_sg_ptr->extension = 0;
25990 + qm_sg_ptr->final = 0;
25991 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
25994 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
25995 + dma_addr_t dma, u32 len, u16 offset)
25997 + qm_sg_ptr->extension = 0;
25998 + qm_sg_ptr->final = 1;
25999 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26002 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
26003 + dma_addr_t dma, u32 len, u16 offset)
26005 + qm_sg_ptr->extension = 1;
26006 + qm_sg_ptr->final = 0;
26007 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26010 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
26011 + dma_addr_t dma, u32 len,
26014 + qm_sg_ptr->extension = 1;
26015 + qm_sg_ptr->final = 1;
26016 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
26020 + * convert scatterlist to h/w link table format
26021 + * but does not have final bit; instead, returns last entry
26023 +static inline struct qm_sg_entry *
26024 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
26025 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
26027 + while (sg_count && sg) {
26028 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
26029 + sg_dma_len(sg), offset);
26031 + sg = sg_next(sg);
26034 + return qm_sg_ptr - 1;
26038 + * convert scatterlist to h/w link table format
26039 + * scatterlist must have been previously dma mapped
26041 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
26042 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
26044 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
26046 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
26047 + qm_sg_ptr->final = 1;
26048 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
26051 +#endif /* __SG_SW_QM_H */
26053 +++ b/drivers/crypto/caam/sg_sw_qm2.h
26056 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
26057 + * Copyright 2017 NXP
26059 + * Redistribution and use in source and binary forms, with or without
26060 + * modification, are permitted provided that the following conditions are met:
26061 + * * Redistributions of source code must retain the above copyright
26062 + * notice, this list of conditions and the following disclaimer.
26063 + * * Redistributions in binary form must reproduce the above copyright
26064 + * notice, this list of conditions and the following disclaimer in the
26065 + * documentation and/or other materials provided with the distribution.
26066 + * * Neither the names of the above-listed copyright holders nor the
26067 + * names of any contributors may be used to endorse or promote products
26068 + * derived from this software without specific prior written permission.
26071 + * ALTERNATIVELY, this software may be distributed under the terms of the
26072 + * GNU General Public License ("GPL") as published by the Free Software
26073 + * Foundation, either version 2 of that License or (at your option) any
26076 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26077 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26078 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26079 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
26080 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26081 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26082 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26083 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26084 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26085 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26086 + * POSSIBILITY OF SUCH DAMAGE.
26089 +#ifndef _SG_SW_QM2_H_
26090 +#define _SG_SW_QM2_H_
26092 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26094 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
26095 + dma_addr_t dma, u32 len, u16 offset)
26097 + dpaa2_sg_set_addr(qm_sg_ptr, dma);
26098 + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
26099 + dpaa2_sg_set_final(qm_sg_ptr, false);
26100 + dpaa2_sg_set_len(qm_sg_ptr, len);
26101 + dpaa2_sg_set_bpid(qm_sg_ptr, 0);
26102 + dpaa2_sg_set_offset(qm_sg_ptr, offset);
26106 + * convert scatterlist to h/w link table format
26107 + * but does not have final bit; instead, returns last entry
26109 +static inline struct dpaa2_sg_entry *
26110 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
26111 + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
26113 + while (sg_count && sg) {
26114 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
26115 + sg_dma_len(sg), offset);
26117 + sg = sg_next(sg);
26120 + return qm_sg_ptr - 1;
26124 + * convert scatterlist to h/w link table format
26125 + * scatterlist must have been previously dma mapped
26127 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
26128 + struct dpaa2_sg_entry *qm_sg_ptr,
26131 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
26132 + dpaa2_sg_set_final(qm_sg_ptr, true);
26135 +#endif /* _SG_SW_QM2_H_ */
26136 --- a/drivers/crypto/caam/sg_sw_sec4.h
26137 +++ b/drivers/crypto/caam/sg_sw_sec4.h
26142 +#ifndef _SG_SW_SEC4_H_
26143 +#define _SG_SW_SEC4_H_
26147 +#include "sg_sw_qm2.h"
26148 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
26150 -struct sec4_sg_entry;
26151 +struct sec4_sg_entry {
26158 * convert single dma address to h/w link table format
26159 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
26160 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
26161 dma_addr_t dma, u32 len, u16 offset)
26163 - sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26164 - sec4_sg_ptr->len = cpu_to_caam32(len);
26165 - sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
26166 + if (caam_dpaa2) {
26167 + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
26170 + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
26171 + sec4_sg_ptr->len = cpu_to_caam32(len);
26172 + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
26173 + SEC4_SG_OFFSET_MASK);
26176 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
26177 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
26178 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, in
26179 return sec4_sg_ptr - 1;
26182 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
26185 + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
26187 + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26191 * convert scatterlist to h/w link table format
26192 * scatterlist must have been previously dma mapped
26193 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(st
26196 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
26197 - sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
26200 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
26201 - struct scatterlist *sg, unsigned int total,
26202 - struct sec4_sg_entry *sec4_sg_ptr)
26205 - unsigned int len = min(sg_dma_len(sg), total);
26207 - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
26209 - sg = sg_next(sg);
26212 - return sec4_sg_ptr - 1;
26213 + sg_to_sec4_set_last(sec4_sg_ptr);
26216 -/* derive number of elements in scatterlist, but return 0 for 1 */
26217 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
26219 - int sg_nents = sg_nents_for_len(sg_list, nbytes);
26221 - if (likely(sg_nents == 1))
26226 +#endif /* _SG_SW_SEC4_H_ */
26227 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
26228 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
26229 @@ -516,7 +516,7 @@ err:
26232 * rsi_disconnect() - This function performs the reverse of the probe function,
26233 - * it deintialize the driver structure.
26234 + * it deinitialize the driver structure.
26235 * @pfunction: Pointer to the USB interface structure.
26238 --- a/drivers/staging/wilc1000/linux_wlan.c
26239 +++ b/drivers/staging/wilc1000/linux_wlan.c
26240 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device
26241 vif = netdev_priv(dev);
26244 - /* Deintialize IRQ */
26245 + /* Deinitialize IRQ */
26246 if (wilc->dev_irq_num) {
26247 free_irq(wilc->dev_irq_num, wilc);
26248 gpio_free(wilc->gpio);
26249 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26250 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
26251 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_devi
26252 del_timer_sync(&wilc_during_ip_timer);
26255 - netdev_err(net, "Error while deintializing host interface\n");
26256 + netdev_err(net, "Error while deinitializing host interface\n");
26261 +++ b/include/crypto/acompress.h
26264 + * Asynchronous Compression operations
26266 + * Copyright (c) 2016, Intel Corporation
26267 + * Authors: Weigang Li <weigang.li@intel.com>
26268 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26270 + * This program is free software; you can redistribute it and/or modify it
26271 + * under the terms of the GNU General Public License as published by the Free
26272 + * Software Foundation; either version 2 of the License, or (at your option)
26273 + * any later version.
26276 +#ifndef _CRYPTO_ACOMP_H
26277 +#define _CRYPTO_ACOMP_H
26278 +#include <linux/crypto.h>
26280 +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
26283 + * struct acomp_req - asynchronous (de)compression request
26285 + * @base: Common attributes for asynchronous crypto requests
26286 + * @src: Source Data
26287 + * @dst: Destination data
26288 + * @slen: Size of the input buffer
26289 + * @dlen: Size of the output buffer and number of bytes produced
26290 + * @flags: Internal flags
26291 + * @__ctx: Start of private context data
26293 +struct acomp_req {
26294 + struct crypto_async_request base;
26295 + struct scatterlist *src;
26296 + struct scatterlist *dst;
26297 + unsigned int slen;
26298 + unsigned int dlen;
26300 + void *__ctx[] CRYPTO_MINALIGN_ATTR;
26304 + * struct crypto_acomp - user-instantiated objects which encapsulate
26305 + * algorithms and core processing logic
26307 + * @compress: Function performs a compress operation
26308 + * @decompress: Function performs a de-compress operation
26309 + * @dst_free: Frees destination buffer if allocated inside the
26311 + * @reqsize: Context size for (de)compression requests
26312 + * @base: Common crypto API algorithm data structure
26314 +struct crypto_acomp {
26315 + int (*compress)(struct acomp_req *req);
26316 + int (*decompress)(struct acomp_req *req);
26317 + void (*dst_free)(struct scatterlist *dst);
26318 + unsigned int reqsize;
26319 + struct crypto_tfm base;
26323 + * struct acomp_alg - asynchronous compression algorithm
26325 + * @compress: Function performs a compress operation
26326 + * @decompress: Function performs a de-compress operation
26327 + * @dst_free: Frees destination buffer if allocated inside the algorithm
26328 + * @init: Initialize the cryptographic transformation object.
26329 + * This function is used to initialize the cryptographic
26330 + * transformation object. This function is called only once at
26331 + * the instantiation time, right after the transformation context
26332 + * was allocated. In case the cryptographic hardware has some
26333 + * special requirements which need to be handled by software, this
26334 + * function shall check for the precise requirement of the
26335 + * transformation and put any software fallbacks in place.
26336 + * @exit: Deinitialize the cryptographic transformation object. This is a
26337 + * counterpart to @init, used to remove various changes set in
26340 + * @reqsize: Context size for (de)compression requests
26341 + * @base: Common crypto API algorithm data structure
26343 +struct acomp_alg {
26344 + int (*compress)(struct acomp_req *req);
26345 + int (*decompress)(struct acomp_req *req);
26346 + void (*dst_free)(struct scatterlist *dst);
26347 + int (*init)(struct crypto_acomp *tfm);
26348 + void (*exit)(struct crypto_acomp *tfm);
26349 + unsigned int reqsize;
26350 + struct crypto_alg base;
26354 + * DOC: Asynchronous Compression API
26356 + * The Asynchronous Compression API is used with the algorithms of type
26357 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
26361 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
26362 + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
26363 + * compression algorithm e.g. "deflate"
26364 + * @type: specifies the type of the algorithm
26365 + * @mask: specifies the mask for the algorithm
26367 + * Allocate a handle for a compression algorithm. The returned struct
26368 + * crypto_acomp is the handle that is required for any subsequent
26369 + * API invocation for the compression operations.
26371 + * Return: allocated handle in case of success; IS_ERR() is true in case
26372 + * of an error, PTR_ERR() returns the error code.
26374 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
26377 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
26379 + return &tfm->base;
26382 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
26384 + return container_of(alg, struct acomp_alg, base);
26387 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
26389 + return container_of(tfm, struct crypto_acomp, base);
26392 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
26394 + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
26397 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
26399 + return tfm->reqsize;
26402 +static inline void acomp_request_set_tfm(struct acomp_req *req,
26403 + struct crypto_acomp *tfm)
26405 + req->base.tfm = crypto_acomp_tfm(tfm);
26408 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
26410 + return __crypto_acomp_tfm(req->base.tfm);
26414 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
26416 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26418 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
26420 + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
26423 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
26425 + type &= ~CRYPTO_ALG_TYPE_MASK;
26426 + type |= CRYPTO_ALG_TYPE_ACOMPRESS;
26427 + mask |= CRYPTO_ALG_TYPE_MASK;
26429 + return crypto_has_alg(alg_name, type, mask);
26433 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
26435 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
26437 + * Return: allocated handle in case of success or NULL in case of an error
26439 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
26442 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
26443 + * request as well as the output buffer if allocated
26444 + * inside the algorithm
26446 + * @req: request to free
26448 +void acomp_request_free(struct acomp_req *req);
26451 + * acomp_request_set_callback() -- Sets an asynchronous callback
26453 + * Callback will be called when an asynchronous operation on a given
26454 + * request is finished.
26456 + * @req: request that the callback will be set for
26457 + * @flgs: specify for instance if the operation may backlog
26458 + * @cmlp: callback which will be called
26459 + * @data: private data used by the caller
26461 +static inline void acomp_request_set_callback(struct acomp_req *req,
26463 + crypto_completion_t cmpl,
26466 + req->base.complete = cmpl;
26467 + req->base.data = data;
26468 + req->base.flags = flgs;
26472 + * acomp_request_set_params() -- Sets request parameters
26474 + * Sets parameters required by an acomp operation
26476 + * @req: asynchronous compress request
26477 + * @src: pointer to input buffer scatterlist
26478 + * @dst: pointer to output buffer scatterlist. If this is NULL, the
26479 + * acomp layer will allocate the output memory
26480 + * @slen: size of the input buffer
26481 + * @dlen: size of the output buffer. If dst is NULL, this can be used by
26482 + * the user to specify the maximum amount of memory to allocate
26484 +static inline void acomp_request_set_params(struct acomp_req *req,
26485 + struct scatterlist *src,
26486 + struct scatterlist *dst,
26487 + unsigned int slen,
26488 + unsigned int dlen)
26492 + req->slen = slen;
26493 + req->dlen = dlen;
26496 + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
26500 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
26502 + * Function invokes the asynchronous compress operation
26504 + * @req: asynchronous compress request
26506 + * Return: zero on success; error code in case of error
26508 +static inline int crypto_acomp_compress(struct acomp_req *req)
26510 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26512 + return tfm->compress(req);
26516 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
26518 + * Function invokes the asynchronous decompress operation
26520 + * @req: asynchronous compress request
26522 + * Return: zero on success; error code in case of error
26524 +static inline int crypto_acomp_decompress(struct acomp_req *req)
26526 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
26528 + return tfm->decompress(req);
26533 +++ b/include/crypto/internal/acompress.h
26536 + * Asynchronous Compression operations
26538 + * Copyright (c) 2016, Intel Corporation
26539 + * Authors: Weigang Li <weigang.li@intel.com>
26540 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26542 + * This program is free software; you can redistribute it and/or modify it
26543 + * under the terms of the GNU General Public License as published by the Free
26544 + * Software Foundation; either version 2 of the License, or (at your option)
26545 + * any later version.
26548 +#ifndef _CRYPTO_ACOMP_INT_H
26549 +#define _CRYPTO_ACOMP_INT_H
26550 +#include <crypto/acompress.h>
26553 + * Transform internal helpers.
26555 +static inline void *acomp_request_ctx(struct acomp_req *req)
26557 + return req->__ctx;
26560 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
26562 + return tfm->base.__crt_ctx;
26565 +static inline void acomp_request_complete(struct acomp_req *req,
26568 + req->base.complete(&req->base, err);
26571 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
26573 + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
26576 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
26578 + struct acomp_req *req;
26580 + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
26582 + acomp_request_set_tfm(req, tfm);
26586 +static inline void __acomp_request_free(struct acomp_req *req)
26592 + * crypto_register_acomp() -- Register asynchronous compression algorithm
26594 + * Function registers an implementation of an asynchronous
26595 + * compression algorithm
26597 + * @alg: algorithm definition
26599 + * Return: zero on success; error code in case of error
26601 +int crypto_register_acomp(struct acomp_alg *alg);
26604 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
26606 + * Function unregisters an implementation of an asynchronous
26607 + * compression algorithm
26609 + * @alg: algorithm definition
26611 + * Return: zero on success; error code in case of error
26613 +int crypto_unregister_acomp(struct acomp_alg *alg);
26617 +++ b/include/crypto/internal/scompress.h
26620 + * Synchronous Compression operations
26622 + * Copyright 2015 LG Electronics Inc.
26623 + * Copyright (c) 2016, Intel Corporation
26624 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
26626 + * This program is free software; you can redistribute it and/or modify it
26627 + * under the terms of the GNU General Public License as published by the Free
26628 + * Software Foundation; either version 2 of the License, or (at your option)
26629 + * any later version.
26632 +#ifndef _CRYPTO_SCOMP_INT_H
26633 +#define _CRYPTO_SCOMP_INT_H
26634 +#include <linux/crypto.h>
26636 +#define SCOMP_SCRATCH_SIZE 131072
26638 +struct crypto_scomp {
26639 + struct crypto_tfm base;
26643 + * struct scomp_alg - synchronous compression algorithm
26645 + * @alloc_ctx: Function allocates algorithm specific context
26646 + * @free_ctx: Function frees context allocated with alloc_ctx
26647 + * @compress: Function performs a compress operation
26648 + * @decompress: Function performs a de-compress operation
26649 + * @init: Initialize the cryptographic transformation object.
26650 + * This function is used to initialize the cryptographic
26651 + * transformation object. This function is called only once at
26652 + * the instantiation time, right after the transformation context
26653 + * was allocated. In case the cryptographic hardware has some
26654 + * special requirements which need to be handled by software, this
26655 + * function shall check for the precise requirement of the
26656 + * transformation and put any software fallbacks in place.
26657 + * @exit: Deinitialize the cryptographic transformation object. This is a
26658 + * counterpart to @init, used to remove various changes set in
26660 + * @base: Common crypto API algorithm data structure
26662 +struct scomp_alg {
26663 + void *(*alloc_ctx)(struct crypto_scomp *tfm);
26664 + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
26665 + int (*compress)(struct crypto_scomp *tfm, const u8 *src,
26666 + unsigned int slen, u8 *dst, unsigned int *dlen,
26668 + int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
26669 + unsigned int slen, u8 *dst, unsigned int *dlen,
26671 + struct crypto_alg base;
26674 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
26676 + return container_of(alg, struct scomp_alg, base);
26679 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
26681 + return container_of(tfm, struct crypto_scomp, base);
26684 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
26686 + return &tfm->base;
26689 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
26691 + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
26694 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
26696 + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
26699 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
26701 + return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
26704 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
26707 + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
26710 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
26711 + const u8 *src, unsigned int slen,
26712 + u8 *dst, unsigned int *dlen, void *ctx)
26714 + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
26717 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
26718 + const u8 *src, unsigned int slen,
26719 + u8 *dst, unsigned int *dlen,
26722 + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
26726 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
26727 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
26728 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
26731 + * crypto_register_scomp() -- Register synchronous compression algorithm
26733 + * Function registers an implementation of a synchronous
26734 + * compression algorithm
26736 + * @alg: algorithm definition
26738 + * Return: zero on success; error code in case of error
26740 +int crypto_register_scomp(struct scomp_alg *alg);
26743 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
26745 + * Function unregisters an implementation of a synchronous
26746 + * compression algorithm
26748 + * @alg: algorithm definition
26750 + * Return: zero on success; error code in case of error
26752 +int crypto_unregister_scomp(struct scomp_alg *alg);
26755 --- a/include/linux/crypto.h
26756 +++ b/include/linux/crypto.h
26758 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
26759 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
26760 #define CRYPTO_ALG_TYPE_KPP 0x00000008
26761 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
26762 +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
26763 #define CRYPTO_ALG_TYPE_RNG 0x0000000c
26764 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
26765 #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
26767 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
26768 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
26769 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
26770 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
26772 #define CRYPTO_ALG_LARVAL 0x00000010
26773 #define CRYPTO_ALG_DEAD 0x00000020
26774 --- a/include/uapi/linux/cryptouser.h
26775 +++ b/include/uapi/linux/cryptouser.h
26776 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
26777 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
26778 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
26779 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
26780 + CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
26783 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
26784 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
26785 char type[CRYPTO_MAX_NAME];
26788 +struct crypto_report_acomp {
26789 + char type[CRYPTO_MAX_NAME];
26792 #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
26793 sizeof(struct crypto_report_blkcipher))
26794 --- a/scripts/spelling.txt
26795 +++ b/scripts/spelling.txt
26796 @@ -305,6 +305,9 @@ defintion||definition
26797 defintions||definitions
26800 +deintializing||deinitializing
26801 +deintialize||deinitialize
26802 +deintialized||deinitialized
26806 --- a/sound/soc/amd/acp-pcm-dma.c
26807 +++ b/sound/soc/amd/acp-pcm-dma.c
26808 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mm
26812 -/* Deintialize ACP */
26813 +/* Deinitialize ACP */
26814 static int acp_deinit(void __iomem *acp_mmio)