1 From a3310d64d7cb1ba0f9279e77d21f13a75fa66ab5 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:29:23 +0800
4 Subject: [PATCH 16/30] crypto: support layerscape
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 This is an integrated patch for layerscape sec support.
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
34 crypto/acompress.c | 169 +
35 crypto/algboss.c | 12 +-
36 crypto/crypto_user.c | 19 +
37 crypto/scompress.c | 356 ++
38 crypto/tcrypt.c | 17 +-
39 crypto/testmgr.c | 1708 +++---
40 crypto/testmgr.h | 1125 ++--
41 crypto/tls.c | 607 +++
42 drivers/crypto/caam/Kconfig | 77 +-
43 drivers/crypto/caam/Makefile | 16 +-
44 drivers/crypto/caam/caamalg.c | 2171 ++------
45 drivers/crypto/caam/caamalg_desc.c | 1961 +++++++
46 drivers/crypto/caam/caamalg_desc.h | 127 +
47 drivers/crypto/caam/caamalg_qi.c | 2929 ++++++++++
48 drivers/crypto/caam/caamalg_qi2.c | 5920 +++++++++++++++++++++
49 drivers/crypto/caam/caamalg_qi2.h | 281 +
50 drivers/crypto/caam/caamhash.c | 550 +-
51 drivers/crypto/caam/caamhash_desc.c | 108 +
52 drivers/crypto/caam/caamhash_desc.h | 49 +
53 drivers/crypto/caam/caampkc.c | 471 +-
54 drivers/crypto/caam/caampkc.h | 58 +
55 drivers/crypto/caam/caamrng.c | 16 +-
56 drivers/crypto/caam/compat.h | 1 +
57 drivers/crypto/caam/ctrl.c | 358 +-
58 drivers/crypto/caam/ctrl.h | 2 +
59 drivers/crypto/caam/desc.h | 84 +-
60 drivers/crypto/caam/desc_constr.h | 180 +-
61 drivers/crypto/caam/dpseci.c | 859 +++
62 drivers/crypto/caam/dpseci.h | 395 ++
63 drivers/crypto/caam/dpseci_cmd.h | 261 +
64 drivers/crypto/caam/error.c | 127 +-
65 drivers/crypto/caam/error.h | 10 +-
66 drivers/crypto/caam/intern.h | 31 +-
67 drivers/crypto/caam/jr.c | 72 +-
68 drivers/crypto/caam/jr.h | 2 +
69 drivers/crypto/caam/key_gen.c | 32 +-
70 drivers/crypto/caam/key_gen.h | 36 +-
71 drivers/crypto/caam/pdb.h | 62 +
72 drivers/crypto/caam/pkc_desc.c | 36 +
73 drivers/crypto/caam/qi.c | 797 +++
74 drivers/crypto/caam/qi.h | 204 +
75 drivers/crypto/caam/regs.h | 63 +-
76 drivers/crypto/caam/sg_sw_qm.h | 126 +
77 drivers/crypto/caam/sg_sw_qm2.h | 81 +
78 drivers/crypto/caam/sg_sw_sec4.h | 60 +-
79 drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
80 drivers/staging/wilc1000/linux_wlan.c | 2 +-
81 drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
82 include/crypto/acompress.h | 269 +
83 include/crypto/internal/acompress.h | 81 +
84 include/crypto/internal/scompress.h | 136 +
85 include/linux/crypto.h | 3 +
86 include/uapi/linux/cryptouser.h | 5 +
87 scripts/spelling.txt | 3 +
88 sound/soc/amd/acp-pcm-dma.c | 2 +-
89 57 files changed, 19177 insertions(+), 3988 deletions(-)
90 create mode 100644 crypto/acompress.c
91 create mode 100644 crypto/scompress.c
92 create mode 100644 crypto/tls.c
93 create mode 100644 drivers/crypto/caam/caamalg_desc.c
94 create mode 100644 drivers/crypto/caam/caamalg_desc.h
95 create mode 100644 drivers/crypto/caam/caamalg_qi.c
96 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
97 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
98 create mode 100644 drivers/crypto/caam/caamhash_desc.c
99 create mode 100644 drivers/crypto/caam/caamhash_desc.h
100 create mode 100644 drivers/crypto/caam/dpseci.c
101 create mode 100644 drivers/crypto/caam/dpseci.h
102 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
103 create mode 100644 drivers/crypto/caam/qi.c
104 create mode 100644 drivers/crypto/caam/qi.h
105 create mode 100644 drivers/crypto/caam/sg_sw_qm.h
106 create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
107 create mode 100644 include/crypto/acompress.h
108 create mode 100644 include/crypto/internal/acompress.h
109 create mode 100644 include/crypto/internal/scompress.h
113 @@ -102,6 +102,15 @@ config CRYPTO_KPP
117 +config CRYPTO_ACOMP2
119 + select CRYPTO_ALGAPI2
123 + select CRYPTO_ALGAPI
124 + select CRYPTO_ACOMP2
127 tristate "RSA algorithm"
128 select CRYPTO_AKCIPHER
129 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
130 select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
131 select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
132 select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
133 + select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
136 tristate "Userspace cryptographic algorithm configuration"
137 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
138 a sequence number xored with a salt. This is the default
142 + tristate "TLS support"
144 + select CRYPTO_BLKCIPHER
145 + select CRYPTO_MANAGER
148 + select CRYPTO_AUTHENC
150 + Support for TLS 1.0 record encryption and decryption
152 + This module adds support for encryption/decryption of TLS 1.0 frames
153 + using blockcipher algorithms. The name of the resulting algorithm is
154 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
155 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
156 + accelerated versions will be used automatically if available.
158 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
159 + operations through AF_ALG or cryptodev interfaces
161 comment "Block modes"
164 --- a/crypto/Makefile
165 +++ b/crypto/Makefile
166 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
167 rsa_generic-y += rsa-pkcs1pad.o
168 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
170 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
171 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
173 cryptomgr-y := algboss.o testmgr.o
175 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
176 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
177 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
178 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
179 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
180 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
181 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
182 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
183 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
185 +++ b/crypto/acompress.c
188 + * Asynchronous Compression operations
190 + * Copyright (c) 2016, Intel Corporation
191 + * Authors: Weigang Li <weigang.li@intel.com>
192 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
194 + * This program is free software; you can redistribute it and/or modify it
195 + * under the terms of the GNU General Public License as published by the Free
196 + * Software Foundation; either version 2 of the License, or (at your option)
197 + * any later version.
200 +#include <linux/errno.h>
201 +#include <linux/kernel.h>
202 +#include <linux/module.h>
203 +#include <linux/seq_file.h>
204 +#include <linux/slab.h>
205 +#include <linux/string.h>
206 +#include <linux/crypto.h>
207 +#include <crypto/algapi.h>
208 +#include <linux/cryptouser.h>
209 +#include <net/netlink.h>
210 +#include <crypto/internal/acompress.h>
211 +#include <crypto/internal/scompress.h>
212 +#include "internal.h"
214 +static const struct crypto_type crypto_acomp_type;
217 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
219 + struct crypto_report_acomp racomp;
221 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
223 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
224 + sizeof(struct crypto_report_acomp), &racomp))
225 + goto nla_put_failure;
232 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
238 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
239 + __attribute__ ((unused));
241 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
243 + seq_puts(m, "type : acomp\n");
246 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
248 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
249 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
254 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
256 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
257 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
259 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
260 + return crypto_init_scomp_ops_async(tfm);
262 + acomp->compress = alg->compress;
263 + acomp->decompress = alg->decompress;
264 + acomp->dst_free = alg->dst_free;
265 + acomp->reqsize = alg->reqsize;
268 + acomp->base.exit = crypto_acomp_exit_tfm;
271 + return alg->init(acomp);
276 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
278 + int extsize = crypto_alg_extsize(alg);
280 + if (alg->cra_type != &crypto_acomp_type)
281 + extsize += sizeof(struct crypto_scomp *);
286 +static const struct crypto_type crypto_acomp_type = {
287 + .extsize = crypto_acomp_extsize,
288 + .init_tfm = crypto_acomp_init_tfm,
289 +#ifdef CONFIG_PROC_FS
290 + .show = crypto_acomp_show,
292 + .report = crypto_acomp_report,
293 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
294 + .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
295 + .type = CRYPTO_ALG_TYPE_ACOMPRESS,
296 + .tfmsize = offsetof(struct crypto_acomp, base),
299 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
302 + return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
304 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
306 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
308 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
309 + struct acomp_req *req;
311 + req = __acomp_request_alloc(acomp);
312 + if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
313 + return crypto_acomp_scomp_alloc_ctx(req);
317 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
319 +void acomp_request_free(struct acomp_req *req)
321 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
322 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
324 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
325 + crypto_acomp_scomp_free_ctx(req);
327 + if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
328 + acomp->dst_free(req->dst);
332 + __acomp_request_free(req);
334 +EXPORT_SYMBOL_GPL(acomp_request_free);
336 +int crypto_register_acomp(struct acomp_alg *alg)
338 + struct crypto_alg *base = &alg->base;
340 + base->cra_type = &crypto_acomp_type;
341 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
342 + base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
344 + return crypto_register_alg(base);
346 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
348 +int crypto_unregister_acomp(struct acomp_alg *alg)
350 + return crypto_unregister_alg(&alg->base);
352 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
354 +MODULE_LICENSE("GPL");
355 +MODULE_DESCRIPTION("Asynchronous compression type");
356 --- a/crypto/algboss.c
357 +++ b/crypto/algboss.c
358 @@ -245,17 +245,9 @@ static int cryptomgr_schedule_test(struc
359 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
360 type = alg->cra_flags;
362 - /* This piece of crap needs to disappear into per-type test hooks. */
363 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
364 - type |= CRYPTO_ALG_TESTED;
366 - if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
367 - CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
368 - ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
369 - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
370 - alg->cra_ablkcipher.ivsize))
371 + /* Do not test internal algorithms. */
372 + if (type & CRYPTO_ALG_INTERNAL)
373 type |= CRYPTO_ALG_TESTED;
378 --- a/crypto/crypto_user.c
379 +++ b/crypto/crypto_user.c
380 @@ -115,6 +115,21 @@ nla_put_failure:
384 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
386 + struct crypto_report_acomp racomp;
388 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
390 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
391 + sizeof(struct crypto_report_acomp), &racomp))
392 + goto nla_put_failure;
399 static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
401 struct crypto_report_akcipher rakcipher;
402 @@ -189,7 +204,11 @@ static int crypto_report_one(struct cryp
403 goto nla_put_failure;
406 + case CRYPTO_ALG_TYPE_ACOMPRESS:
407 + if (crypto_report_acomp(skb, alg))
408 + goto nla_put_failure;
411 case CRYPTO_ALG_TYPE_AKCIPHER:
412 if (crypto_report_akcipher(skb, alg))
413 goto nla_put_failure;
415 +++ b/crypto/scompress.c
418 + * Synchronous Compression operations
420 + * Copyright 2015 LG Electronics Inc.
421 + * Copyright (c) 2016, Intel Corporation
422 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
424 + * This program is free software; you can redistribute it and/or modify it
425 + * under the terms of the GNU General Public License as published by the Free
426 + * Software Foundation; either version 2 of the License, or (at your option)
427 + * any later version.
430 +#include <linux/errno.h>
431 +#include <linux/kernel.h>
432 +#include <linux/module.h>
433 +#include <linux/seq_file.h>
434 +#include <linux/slab.h>
435 +#include <linux/string.h>
436 +#include <linux/crypto.h>
437 +#include <linux/vmalloc.h>
438 +#include <crypto/algapi.h>
439 +#include <linux/cryptouser.h>
440 +#include <net/netlink.h>
441 +#include <linux/scatterlist.h>
442 +#include <crypto/scatterwalk.h>
443 +#include <crypto/internal/acompress.h>
444 +#include <crypto/internal/scompress.h>
445 +#include "internal.h"
447 +static const struct crypto_type crypto_scomp_type;
448 +static void * __percpu *scomp_src_scratches;
449 +static void * __percpu *scomp_dst_scratches;
450 +static int scomp_scratch_users;
451 +static DEFINE_MUTEX(scomp_lock);
454 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
456 + struct crypto_report_comp rscomp;
458 + strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
460 + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
461 + sizeof(struct crypto_report_comp), &rscomp))
462 + goto nla_put_failure;
469 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
475 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
476 + __attribute__ ((unused));
478 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
480 + seq_puts(m, "type : scomp\n");
483 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
488 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
495 + for_each_possible_cpu(i)
496 + vfree(*per_cpu_ptr(scratches, i));
498 + free_percpu(scratches);
501 +static void * __percpu *crypto_scomp_alloc_scratches(void)
503 + void * __percpu *scratches;
506 + scratches = alloc_percpu(void *);
510 + for_each_possible_cpu(i) {
513 + scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
516 + *per_cpu_ptr(scratches, i) = scratch;
522 + crypto_scomp_free_scratches(scratches);
526 +static void crypto_scomp_free_all_scratches(void)
528 + if (!--scomp_scratch_users) {
529 + crypto_scomp_free_scratches(scomp_src_scratches);
530 + crypto_scomp_free_scratches(scomp_dst_scratches);
531 + scomp_src_scratches = NULL;
532 + scomp_dst_scratches = NULL;
536 +static int crypto_scomp_alloc_all_scratches(void)
538 + if (!scomp_scratch_users++) {
539 + scomp_src_scratches = crypto_scomp_alloc_scratches();
540 + if (!scomp_src_scratches)
542 + scomp_dst_scratches = crypto_scomp_alloc_scratches();
543 + if (!scomp_dst_scratches)
549 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
558 + for_each_sg(sgl, sgl, n, i) {
559 + page = sg_page(sgl);
567 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
569 + struct scatterlist *sgl;
573 + n = ((size - 1) >> PAGE_SHIFT) + 1;
575 + sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
579 + sg_init_table(sgl, n);
581 + for (i = 0; i < n; i++) {
582 + page = alloc_page(gfp);
585 + sg_set_page(sgl + i, page, PAGE_SIZE, 0);
591 + sg_mark_end(sgl + i);
592 + crypto_scomp_sg_free(sgl);
596 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
598 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
599 + void **tfm_ctx = acomp_tfm_ctx(tfm);
600 + struct crypto_scomp *scomp = *tfm_ctx;
601 + void **ctx = acomp_request_ctx(req);
602 + const int cpu = get_cpu();
603 + u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
604 + u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
607 + if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
612 + if (req->dst && !req->dlen) {
617 + if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
618 + req->dlen = SCOMP_SCRATCH_SIZE;
620 + scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
622 + ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
623 + scratch_dst, &req->dlen, *ctx);
625 + ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
626 + scratch_dst, &req->dlen, *ctx);
629 + req->dst = crypto_scomp_sg_alloc(req->dlen,
630 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
631 + GFP_KERNEL : GFP_ATOMIC);
635 + scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
643 +static int scomp_acomp_compress(struct acomp_req *req)
645 + return scomp_acomp_comp_decomp(req, 1);
648 +static int scomp_acomp_decompress(struct acomp_req *req)
650 + return scomp_acomp_comp_decomp(req, 0);
653 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
655 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
657 + crypto_free_scomp(*ctx);
660 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
662 + struct crypto_alg *calg = tfm->__crt_alg;
663 + struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
664 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
665 + struct crypto_scomp *scomp;
667 + if (!crypto_mod_get(calg))
670 + scomp = crypto_create_tfm(calg, &crypto_scomp_type);
671 + if (IS_ERR(scomp)) {
672 + crypto_mod_put(calg);
673 + return PTR_ERR(scomp);
677 + tfm->exit = crypto_exit_scomp_ops_async;
679 + crt->compress = scomp_acomp_compress;
680 + crt->decompress = scomp_acomp_decompress;
681 + crt->dst_free = crypto_scomp_sg_free;
682 + crt->reqsize = sizeof(void *);
687 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
689 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
690 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
691 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
692 + struct crypto_scomp *scomp = *tfm_ctx;
695 + ctx = crypto_scomp_alloc_ctx(scomp);
706 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
708 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
709 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
710 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
711 + struct crypto_scomp *scomp = *tfm_ctx;
712 + void *ctx = *req->__ctx;
715 + crypto_scomp_free_ctx(scomp, ctx);
718 +static const struct crypto_type crypto_scomp_type = {
719 + .extsize = crypto_alg_extsize,
720 + .init_tfm = crypto_scomp_init_tfm,
721 +#ifdef CONFIG_PROC_FS
722 + .show = crypto_scomp_show,
724 + .report = crypto_scomp_report,
725 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
726 + .maskset = CRYPTO_ALG_TYPE_MASK,
727 + .type = CRYPTO_ALG_TYPE_SCOMPRESS,
728 + .tfmsize = offsetof(struct crypto_scomp, base),
731 +int crypto_register_scomp(struct scomp_alg *alg)
733 + struct crypto_alg *base = &alg->base;
736 + mutex_lock(&scomp_lock);
737 + if (crypto_scomp_alloc_all_scratches())
740 + base->cra_type = &crypto_scomp_type;
741 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
742 + base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
744 + ret = crypto_register_alg(base);
748 + mutex_unlock(&scomp_lock);
752 + crypto_scomp_free_all_scratches();
753 + mutex_unlock(&scomp_lock);
756 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
758 +int crypto_unregister_scomp(struct scomp_alg *alg)
762 + mutex_lock(&scomp_lock);
763 + ret = crypto_unregister_alg(&alg->base);
764 + crypto_scomp_free_all_scratches();
765 + mutex_unlock(&scomp_lock);
769 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
771 +MODULE_LICENSE("GPL");
772 +MODULE_DESCRIPTION("Synchronous compression type");
773 --- a/crypto/tcrypt.c
774 +++ b/crypto/tcrypt.c
775 @@ -74,7 +74,7 @@ static char *check[] = {
776 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
777 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
778 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
783 struct tcrypt_result {
784 @@ -1336,6 +1336,10 @@ static int do_test(const char *alg, u32
785 ret += tcrypt_test("hmac(sha3-512)");
789 + ret += tcrypt_test("rsa");
793 ret += tcrypt_test("ansi_cprng");
795 @@ -1397,6 +1401,9 @@ static int do_test(const char *alg, u32
797 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
800 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
803 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
804 speed_template_16_24_32);
805 @@ -1411,9 +1418,9 @@ static int do_test(const char *alg, u32
806 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
807 speed_template_32_40_48);
808 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
809 - speed_template_32_48_64);
810 + speed_template_32_64);
811 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
812 - speed_template_32_48_64);
813 + speed_template_32_64);
814 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
815 speed_template_16_24_32);
816 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
817 @@ -1844,9 +1851,9 @@ static int do_test(const char *alg, u32
818 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
819 speed_template_32_40_48);
820 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
821 - speed_template_32_48_64);
822 + speed_template_32_64);
823 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
824 - speed_template_32_48_64);
825 + speed_template_32_64);
826 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
827 speed_template_16_24_32);
828 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
829 --- a/crypto/testmgr.c
830 +++ b/crypto/testmgr.c
832 #include <crypto/drbg.h>
833 #include <crypto/akcipher.h>
834 #include <crypto/kpp.h>
835 +#include <crypto/acompress.h>
837 #include "internal.h"
839 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const c
848 @@ -82,47 +83,54 @@ struct tcrypt_result {
850 struct aead_test_suite {
852 - struct aead_testvec *vecs;
853 + const struct aead_testvec *vecs;
858 struct cipher_test_suite {
860 - struct cipher_testvec *vecs;
861 + const struct cipher_testvec *vecs;
866 struct comp_test_suite {
868 - struct comp_testvec *vecs;
869 + const struct comp_testvec *vecs;
874 struct hash_test_suite {
875 - struct hash_testvec *vecs;
876 + const struct hash_testvec *vecs;
880 struct cprng_test_suite {
881 - struct cprng_testvec *vecs;
882 + const struct cprng_testvec *vecs;
886 struct drbg_test_suite {
887 - struct drbg_testvec *vecs;
888 + const struct drbg_testvec *vecs;
892 +struct tls_test_suite {
894 + struct tls_testvec *vecs;
895 + unsigned int count;
899 struct akcipher_test_suite {
900 - struct akcipher_testvec *vecs;
901 + const struct akcipher_testvec *vecs;
905 struct kpp_test_suite {
906 - struct kpp_testvec *vecs;
907 + const struct kpp_testvec *vecs;
911 @@ -139,12 +147,14 @@ struct alg_test_desc {
912 struct hash_test_suite hash;
913 struct cprng_test_suite cprng;
914 struct drbg_test_suite drbg;
915 + struct tls_test_suite tls;
916 struct akcipher_test_suite akcipher;
917 struct kpp_test_suite kpp;
921 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
922 +static const unsigned int IDX[8] = {
923 + IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
925 static void hexdump(unsigned char *buf, unsigned int len)
927 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_r
930 static int ahash_partial_update(struct ahash_request **preq,
931 - struct crypto_ahash *tfm, struct hash_testvec *template,
932 + struct crypto_ahash *tfm, const struct hash_testvec *template,
933 void *hash_buff, int k, int temp, struct scatterlist *sg,
934 const char *algo, char *result, struct tcrypt_result *tresult)
936 @@ -259,11 +269,12 @@ out_nostate:
940 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
941 - unsigned int tcount, bool use_digest,
942 - const int align_offset)
943 +static int __test_hash(struct crypto_ahash *tfm,
944 + const struct hash_testvec *template, unsigned int tcount,
945 + bool use_digest, const int align_offset)
947 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
948 + size_t digest_size = crypto_ahash_digestsize(tfm);
949 unsigned int i, j, k, temp;
950 struct scatterlist sg[8];
952 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_aha
953 char *xbuf[XBUFSIZE];
956 - result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
957 + result = kmalloc(digest_size, GFP_KERNEL);
960 key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
961 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_aha
965 - memset(result, 0, MAX_DIGEST_SIZE);
966 + memset(result, 0, digest_size);
969 hash_buff += align_offset;
970 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_aha
974 - memset(result, 0, MAX_DIGEST_SIZE);
975 + memset(result, 0, digest_size);
978 sg_init_table(sg, template[i].np);
979 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_aha
983 - memset(result, 0, MAX_DIGEST_SIZE);
984 + memset(result, 0, digest_size);
988 @@ -536,7 +547,8 @@ out_nobuf:
992 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
993 +static int test_hash(struct crypto_ahash *tfm,
994 + const struct hash_testvec *template,
995 unsigned int tcount, bool use_digest)
997 unsigned int alignmask;
998 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash
1001 static int __test_aead(struct crypto_aead *tfm, int enc,
1002 - struct aead_testvec *template, unsigned int tcount,
1003 + const struct aead_testvec *template, unsigned int tcount,
1004 const bool diff_dst, const int align_offset)
1006 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1007 @@ -955,7 +967,7 @@ out_noxbuf:
1010 static int test_aead(struct crypto_aead *tfm, int enc,
1011 - struct aead_testvec *template, unsigned int tcount)
1012 + const struct aead_testvec *template, unsigned int tcount)
1014 unsigned int alignmask;
1016 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead
1020 +static int __test_tls(struct crypto_aead *tfm, int enc,
1021 + struct tls_testvec *template, unsigned int tcount,
1022 + const bool diff_dst)
1024 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1025 + unsigned int i, k, authsize;
1027 + struct aead_request *req;
1028 + struct scatterlist *sg;
1029 + struct scatterlist *sgout;
1030 + const char *e, *d;
1031 + struct tcrypt_result result;
1037 + char *xbuf[XBUFSIZE];
1038 + char *xoutbuf[XBUFSIZE];
1039 + char *axbuf[XBUFSIZE];
1040 + int ret = -ENOMEM;
1042 + if (testmgr_alloc_buf(xbuf))
1045 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
1046 + goto out_nooutbuf;
1048 + if (testmgr_alloc_buf(axbuf))
1051 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1055 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1059 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1065 + d = diff_dst ? "-ddst" : "";
1066 + e = enc ? "encryption" : "decryption";
1068 + init_completion(&result.completion);
1070 + req = aead_request_alloc(tfm, GFP_KERNEL);
1072 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
1077 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1078 + tcrypt_complete, &result);
1080 + for (i = 0; i < tcount; i++) {
1085 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1086 + template[i].alen > PAGE_SIZE))
1089 + memcpy(assoc, template[i].assoc, template[i].alen);
1090 + memcpy(input, template[i].input, template[i].ilen);
1092 + if (template[i].iv)
1093 + memcpy(iv, template[i].iv, MAX_IVLEN);
1095 + memset(iv, 0, MAX_IVLEN);
1097 + crypto_aead_clear_flags(tfm, ~0);
1099 + if (template[i].klen > MAX_KEYLEN) {
1100 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1101 + d, i, algo, template[i].klen, MAX_KEYLEN);
1105 + memcpy(key, template[i].key, template[i].klen);
1107 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
1108 + if (!ret == template[i].fail) {
1109 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1110 + d, i, algo, crypto_aead_get_flags(tfm));
1116 + ret = crypto_aead_setauthsize(tfm, authsize);
1118 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1119 + d, authsize, i, algo);
1123 + k = !!template[i].alen;
1124 + sg_init_table(sg, k + 1);
1125 + sg_set_buf(&sg[0], assoc, template[i].alen);
1126 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1127 + template[i].ilen));
1131 + sg_init_table(sgout, k + 1);
1132 + sg_set_buf(&sgout[0], assoc, template[i].alen);
1134 + output = xoutbuf[0];
1135 + sg_set_buf(&sgout[k], output,
1136 + (enc ? template[i].rlen : template[i].ilen));
1139 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1140 + template[i].ilen, iv);
1142 + aead_request_set_ad(req, template[i].alen);
1144 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1148 + if (template[i].novrfy) {
1149 + /* verification was supposed to fail */
1150 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1152 + /* so really, we got a bad message */
1157 + case -EINPROGRESS:
1159 + wait_for_completion(&result.completion);
1160 + reinit_completion(&result.completion);
1165 + /* verification failure was expected */
1166 + if (template[i].novrfy)
1168 + /* fall through */
1170 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1171 + d, e, i, algo, -ret);
1176 + if (memcmp(q, template[i].result, template[i].rlen)) {
1177 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1179 + hexdump(q, template[i].rlen);
1180 + pr_err("should be:\n");
1181 + hexdump(template[i].result, template[i].rlen);
1188 + aead_request_free(req);
1196 + testmgr_free_buf(axbuf);
1199 + testmgr_free_buf(xoutbuf);
1201 + testmgr_free_buf(xbuf);
1206 +static int test_tls(struct crypto_aead *tfm, int enc,
1207 + struct tls_testvec *template, unsigned int tcount)
1210 + /* test 'dst == src' case */
1211 + ret = __test_tls(tfm, enc, template, tcount, false);
1214 + /* test 'dst != src' case */
1215 + return __test_tls(tfm, enc, template, tcount, true);
1218 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1219 + u32 type, u32 mask)
1221 + struct crypto_aead *tfm;
1224 + tfm = crypto_alloc_aead(driver, type, mask);
1225 + if (IS_ERR(tfm)) {
1226 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1227 + driver, PTR_ERR(tfm));
1228 + return PTR_ERR(tfm);
1231 + if (desc->suite.tls.enc.vecs) {
1232 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1233 + desc->suite.tls.enc.count);
1238 + if (!err && desc->suite.tls.dec.vecs)
1239 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1240 + desc->suite.tls.dec.count);
1243 + crypto_free_aead(tfm);
1247 static int test_cipher(struct crypto_cipher *tfm, int enc,
1248 - struct cipher_testvec *template, unsigned int tcount)
1249 + const struct cipher_testvec *template,
1250 + unsigned int tcount)
1252 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1253 unsigned int i, j, k;
1254 @@ -1066,7 +1306,8 @@ out_nobuf:
1257 static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1258 - struct cipher_testvec *template, unsigned int tcount,
1259 + const struct cipher_testvec *template,
1260 + unsigned int tcount,
1261 const bool diff_dst, const int align_offset)
1264 @@ -1079,12 +1320,16 @@ static int __test_skcipher(struct crypto
1266 struct tcrypt_result result;
1268 - char iv[MAX_IVLEN];
1270 char *xbuf[XBUFSIZE];
1271 char *xoutbuf[XBUFSIZE];
1273 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1275 + iv = kmalloc(MAX_IVLEN, GFP_KERNEL);
1279 if (testmgr_alloc_buf(xbuf))
1282 @@ -1325,12 +1570,14 @@ out:
1283 testmgr_free_buf(xoutbuf);
1285 testmgr_free_buf(xbuf);
1291 static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1292 - struct cipher_testvec *template, unsigned int tcount)
1293 + const struct cipher_testvec *template,
1294 + unsigned int tcount)
1296 unsigned int alignmask;
1298 @@ -1362,8 +1609,10 @@ static int test_skcipher(struct crypto_s
1302 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1303 - struct comp_testvec *dtemplate, int ctcount, int dtcount)
1304 +static int test_comp(struct crypto_comp *tfm,
1305 + const struct comp_testvec *ctemplate,
1306 + const struct comp_testvec *dtemplate,
1307 + int ctcount, int dtcount)
1309 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1311 @@ -1442,7 +1691,154 @@ out:
1315 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1316 +static int test_acomp(struct crypto_acomp *tfm,
1317 + const struct comp_testvec *ctemplate,
1318 + const struct comp_testvec *dtemplate,
1319 + int ctcount, int dtcount)
1321 + const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1325 + struct scatterlist src, dst;
1326 + struct acomp_req *req;
1327 + struct tcrypt_result result;
1329 + output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1333 + for (i = 0; i < ctcount; i++) {
1334 + unsigned int dlen = COMP_BUF_SIZE;
1335 + int ilen = ctemplate[i].inlen;
1338 + input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1344 + memset(output, 0, dlen);
1345 + init_completion(&result.completion);
1346 + sg_init_one(&src, input_vec, ilen);
1347 + sg_init_one(&dst, output, dlen);
1349 + req = acomp_request_alloc(tfm);
1351 + pr_err("alg: acomp: request alloc failed for %s\n",
1358 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1359 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1360 + tcrypt_complete, &result);
1362 + ret = wait_async_op(&result, crypto_acomp_compress(req));
1364 + pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1365 + i + 1, algo, -ret);
1367 + acomp_request_free(req);
1371 + if (req->dlen != ctemplate[i].outlen) {
1372 + pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1373 + i + 1, algo, req->dlen);
1376 + acomp_request_free(req);
1380 + if (memcmp(output, ctemplate[i].output, req->dlen)) {
1381 + pr_err("alg: acomp: Compression test %d failed for %s\n",
1383 + hexdump(output, req->dlen);
1386 + acomp_request_free(req);
1391 + acomp_request_free(req);
1394 + for (i = 0; i < dtcount; i++) {
1395 + unsigned int dlen = COMP_BUF_SIZE;
1396 + int ilen = dtemplate[i].inlen;
1399 + input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1405 + memset(output, 0, dlen);
1406 + init_completion(&result.completion);
1407 + sg_init_one(&src, input_vec, ilen);
1408 + sg_init_one(&dst, output, dlen);
1410 + req = acomp_request_alloc(tfm);
1412 + pr_err("alg: acomp: request alloc failed for %s\n",
1419 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1420 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1421 + tcrypt_complete, &result);
1423 + ret = wait_async_op(&result, crypto_acomp_decompress(req));
1425 + pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1426 + i + 1, algo, -ret);
1428 + acomp_request_free(req);
1432 + if (req->dlen != dtemplate[i].outlen) {
1433 + pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1434 + i + 1, algo, req->dlen);
1437 + acomp_request_free(req);
1441 + if (memcmp(output, dtemplate[i].output, req->dlen)) {
1442 + pr_err("alg: acomp: Decompression test %d failed for %s\n",
1444 + hexdump(output, req->dlen);
1447 + acomp_request_free(req);
1452 + acomp_request_free(req);
1462 +static int test_cprng(struct crypto_rng *tfm,
1463 + const struct cprng_testvec *template,
1464 unsigned int tcount)
1466 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1467 @@ -1509,7 +1905,7 @@ static int alg_test_aead(const struct al
1468 struct crypto_aead *tfm;
1471 - tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1472 + tfm = crypto_alloc_aead(driver, type, mask);
1474 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1475 "%ld\n", driver, PTR_ERR(tfm));
1476 @@ -1538,7 +1934,7 @@ static int alg_test_cipher(const struct
1477 struct crypto_cipher *tfm;
1480 - tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1481 + tfm = crypto_alloc_cipher(driver, type, mask);
1483 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1484 "%s: %ld\n", driver, PTR_ERR(tfm));
1485 @@ -1567,7 +1963,7 @@ static int alg_test_skcipher(const struc
1486 struct crypto_skcipher *tfm;
1489 - tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1490 + tfm = crypto_alloc_skcipher(driver, type, mask);
1492 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1493 "%s: %ld\n", driver, PTR_ERR(tfm));
1494 @@ -1593,22 +1989,38 @@ out:
1495 static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1498 - struct crypto_comp *tfm;
1499 + struct crypto_comp *comp;
1500 + struct crypto_acomp *acomp;
1502 + u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1504 - tfm = crypto_alloc_comp(driver, type, mask);
1505 - if (IS_ERR(tfm)) {
1506 - printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1507 - "%ld\n", driver, PTR_ERR(tfm));
1508 - return PTR_ERR(tfm);
1510 + if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1511 + acomp = crypto_alloc_acomp(driver, type, mask);
1512 + if (IS_ERR(acomp)) {
1513 + pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1514 + driver, PTR_ERR(acomp));
1515 + return PTR_ERR(acomp);
1517 + err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1518 + desc->suite.comp.decomp.vecs,
1519 + desc->suite.comp.comp.count,
1520 + desc->suite.comp.decomp.count);
1521 + crypto_free_acomp(acomp);
1523 + comp = crypto_alloc_comp(driver, type, mask);
1524 + if (IS_ERR(comp)) {
1525 + pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1526 + driver, PTR_ERR(comp));
1527 + return PTR_ERR(comp);
1530 - err = test_comp(tfm, desc->suite.comp.comp.vecs,
1531 - desc->suite.comp.decomp.vecs,
1532 - desc->suite.comp.comp.count,
1533 - desc->suite.comp.decomp.count);
1534 + err = test_comp(comp, desc->suite.comp.comp.vecs,
1535 + desc->suite.comp.decomp.vecs,
1536 + desc->suite.comp.comp.count,
1537 + desc->suite.comp.decomp.count);
1539 - crypto_free_comp(tfm);
1540 + crypto_free_comp(comp);
1545 @@ -1618,7 +2030,7 @@ static int alg_test_hash(const struct al
1546 struct crypto_ahash *tfm;
1549 - tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1550 + tfm = crypto_alloc_ahash(driver, type, mask);
1552 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1553 "%ld\n", driver, PTR_ERR(tfm));
1554 @@ -1646,7 +2058,7 @@ static int alg_test_crc32c(const struct
1558 - tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1559 + tfm = crypto_alloc_shash(driver, type, mask);
1561 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1562 "%ld\n", driver, PTR_ERR(tfm));
1563 @@ -1688,7 +2100,7 @@ static int alg_test_cprng(const struct a
1564 struct crypto_rng *rng;
1567 - rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1568 + rng = crypto_alloc_rng(driver, type, mask);
1570 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1571 "%ld\n", driver, PTR_ERR(rng));
1572 @@ -1703,7 +2115,7 @@ static int alg_test_cprng(const struct a
1576 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1577 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1578 const char *driver, u32 type, u32 mask)
1581 @@ -1715,7 +2127,7 @@ static int drbg_cavs_test(struct drbg_te
1585 - drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1586 + drng = crypto_alloc_rng(driver, type, mask);
1588 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1590 @@ -1777,7 +2189,7 @@ static int alg_test_drbg(const struct al
1594 - struct drbg_testvec *template = desc->suite.drbg.vecs;
1595 + const struct drbg_testvec *template = desc->suite.drbg.vecs;
1596 unsigned int tcount = desc->suite.drbg.count;
1598 if (0 == memcmp(driver, "drbg_pr_", 8))
1599 @@ -1796,7 +2208,7 @@ static int alg_test_drbg(const struct al
1603 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1604 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1607 struct kpp_request *req;
1608 @@ -1888,7 +2300,7 @@ free_req:
1611 static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1612 - struct kpp_testvec *vecs, unsigned int tcount)
1613 + const struct kpp_testvec *vecs, unsigned int tcount)
1617 @@ -1909,7 +2321,7 @@ static int alg_test_kpp(const struct alg
1618 struct crypto_kpp *tfm;
1621 - tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1622 + tfm = crypto_alloc_kpp(driver, type, mask);
1624 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1625 driver, PTR_ERR(tfm));
1626 @@ -1924,7 +2336,7 @@ static int alg_test_kpp(const struct alg
1629 static int test_akcipher_one(struct crypto_akcipher *tfm,
1630 - struct akcipher_testvec *vecs)
1631 + const struct akcipher_testvec *vecs)
1633 char *xbuf[XBUFSIZE];
1634 struct akcipher_request *req;
1635 @@ -2044,7 +2456,8 @@ free_xbuf:
1638 static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1639 - struct akcipher_testvec *vecs, unsigned int tcount)
1640 + const struct akcipher_testvec *vecs,
1641 + unsigned int tcount)
1644 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1645 @@ -2068,7 +2481,7 @@ static int alg_test_akcipher(const struc
1646 struct crypto_akcipher *tfm;
1649 - tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1650 + tfm = crypto_alloc_akcipher(driver, type, mask);
1652 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1653 driver, PTR_ERR(tfm));
1654 @@ -2088,112 +2501,23 @@ static int alg_test_null(const struct al
1658 +#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) }
1660 /* Please keep this list sorted by algorithm name. */
1661 static const struct alg_test_desc alg_test_descs[] = {
1663 - .alg = "__cbc-cast5-avx",
1664 - .test = alg_test_null,
1666 - .alg = "__cbc-cast6-avx",
1667 - .test = alg_test_null,
1669 - .alg = "__cbc-serpent-avx",
1670 - .test = alg_test_null,
1672 - .alg = "__cbc-serpent-avx2",
1673 - .test = alg_test_null,
1675 - .alg = "__cbc-serpent-sse2",
1676 - .test = alg_test_null,
1678 - .alg = "__cbc-twofish-avx",
1679 - .test = alg_test_null,
1681 - .alg = "__driver-cbc-aes-aesni",
1682 - .test = alg_test_null,
1683 - .fips_allowed = 1,
1685 - .alg = "__driver-cbc-camellia-aesni",
1686 - .test = alg_test_null,
1688 - .alg = "__driver-cbc-camellia-aesni-avx2",
1689 - .test = alg_test_null,
1691 - .alg = "__driver-cbc-cast5-avx",
1692 - .test = alg_test_null,
1694 - .alg = "__driver-cbc-cast6-avx",
1695 - .test = alg_test_null,
1697 - .alg = "__driver-cbc-serpent-avx",
1698 - .test = alg_test_null,
1700 - .alg = "__driver-cbc-serpent-avx2",
1701 - .test = alg_test_null,
1703 - .alg = "__driver-cbc-serpent-sse2",
1704 - .test = alg_test_null,
1706 - .alg = "__driver-cbc-twofish-avx",
1707 - .test = alg_test_null,
1709 - .alg = "__driver-ecb-aes-aesni",
1710 - .test = alg_test_null,
1711 - .fips_allowed = 1,
1713 - .alg = "__driver-ecb-camellia-aesni",
1714 - .test = alg_test_null,
1716 - .alg = "__driver-ecb-camellia-aesni-avx2",
1717 - .test = alg_test_null,
1719 - .alg = "__driver-ecb-cast5-avx",
1720 - .test = alg_test_null,
1722 - .alg = "__driver-ecb-cast6-avx",
1723 - .test = alg_test_null,
1725 - .alg = "__driver-ecb-serpent-avx",
1726 - .test = alg_test_null,
1728 - .alg = "__driver-ecb-serpent-avx2",
1729 - .test = alg_test_null,
1731 - .alg = "__driver-ecb-serpent-sse2",
1732 - .test = alg_test_null,
1734 - .alg = "__driver-ecb-twofish-avx",
1735 - .test = alg_test_null,
1737 - .alg = "__driver-gcm-aes-aesni",
1738 - .test = alg_test_null,
1739 - .fips_allowed = 1,
1741 - .alg = "__ghash-pclmulqdqni",
1742 - .test = alg_test_null,
1743 - .fips_allowed = 1,
1745 .alg = "ansi_cprng",
1746 .test = alg_test_cprng,
1749 - .vecs = ansi_cprng_aes_tv_template,
1750 - .count = ANSI_CPRNG_AES_TEST_VECTORS
1752 + .cprng = __VECS(ansi_cprng_aes_tv_template)
1755 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1756 .test = alg_test_aead,
1760 - .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1761 - .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1764 - .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1765 - .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1767 + .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1768 + .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1772 @@ -2201,12 +2525,7 @@ static const struct alg_test_desc alg_te
1773 .test = alg_test_aead,
1778 - hmac_sha1_aes_cbc_enc_tv_temp,
1780 - HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1782 + .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1786 @@ -2214,12 +2533,7 @@ static const struct alg_test_desc alg_te
1787 .test = alg_test_aead,
1792 - hmac_sha1_des_cbc_enc_tv_temp,
1794 - HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1796 + .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1800 @@ -2228,12 +2542,7 @@ static const struct alg_test_desc alg_te
1806 - hmac_sha1_des3_ede_cbc_enc_tv_temp,
1808 - HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1810 + .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1814 @@ -2245,18 +2554,8 @@ static const struct alg_test_desc alg_te
1815 .test = alg_test_aead,
1820 - hmac_sha1_ecb_cipher_null_enc_tv_temp,
1822 - HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1826 - hmac_sha1_ecb_cipher_null_dec_tv_temp,
1828 - HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1830 + .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1831 + .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1835 @@ -2268,12 +2567,7 @@ static const struct alg_test_desc alg_te
1836 .test = alg_test_aead,
1841 - hmac_sha224_des_cbc_enc_tv_temp,
1843 - HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1845 + .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1849 @@ -2282,12 +2576,7 @@ static const struct alg_test_desc alg_te
1855 - hmac_sha224_des3_ede_cbc_enc_tv_temp,
1857 - HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1859 + .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1863 @@ -2296,12 +2585,7 @@ static const struct alg_test_desc alg_te
1869 - hmac_sha256_aes_cbc_enc_tv_temp,
1871 - HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1873 + .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1877 @@ -2309,12 +2593,7 @@ static const struct alg_test_desc alg_te
1878 .test = alg_test_aead,
1883 - hmac_sha256_des_cbc_enc_tv_temp,
1885 - HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1887 + .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1891 @@ -2323,12 +2602,7 @@ static const struct alg_test_desc alg_te
1897 - hmac_sha256_des3_ede_cbc_enc_tv_temp,
1899 - HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1901 + .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1905 @@ -2344,12 +2618,7 @@ static const struct alg_test_desc alg_te
1906 .test = alg_test_aead,
1911 - hmac_sha384_des_cbc_enc_tv_temp,
1913 - HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1915 + .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1919 @@ -2358,12 +2627,7 @@ static const struct alg_test_desc alg_te
1925 - hmac_sha384_des3_ede_cbc_enc_tv_temp,
1927 - HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1929 + .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1933 @@ -2380,12 +2644,7 @@ static const struct alg_test_desc alg_te
1934 .test = alg_test_aead,
1939 - hmac_sha512_aes_cbc_enc_tv_temp,
1941 - HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1943 + .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1947 @@ -2393,12 +2652,7 @@ static const struct alg_test_desc alg_te
1948 .test = alg_test_aead,
1953 - hmac_sha512_des_cbc_enc_tv_temp,
1955 - HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1957 + .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1961 @@ -2407,12 +2661,7 @@ static const struct alg_test_desc alg_te
1967 - hmac_sha512_des3_ede_cbc_enc_tv_temp,
1969 - HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1971 + .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1975 @@ -2429,14 +2678,8 @@ static const struct alg_test_desc alg_te
1980 - .vecs = aes_cbc_enc_tv_template,
1981 - .count = AES_CBC_ENC_TEST_VECTORS
1984 - .vecs = aes_cbc_dec_tv_template,
1985 - .count = AES_CBC_DEC_TEST_VECTORS
1987 + .enc = __VECS(aes_cbc_enc_tv_template),
1988 + .dec = __VECS(aes_cbc_dec_tv_template)
1992 @@ -2444,14 +2687,8 @@ static const struct alg_test_desc alg_te
1993 .test = alg_test_skcipher,
1997 - .vecs = anubis_cbc_enc_tv_template,
1998 - .count = ANUBIS_CBC_ENC_TEST_VECTORS
2001 - .vecs = anubis_cbc_dec_tv_template,
2002 - .count = ANUBIS_CBC_DEC_TEST_VECTORS
2004 + .enc = __VECS(anubis_cbc_enc_tv_template),
2005 + .dec = __VECS(anubis_cbc_dec_tv_template)
2009 @@ -2459,14 +2696,8 @@ static const struct alg_test_desc alg_te
2010 .test = alg_test_skcipher,
2014 - .vecs = bf_cbc_enc_tv_template,
2015 - .count = BF_CBC_ENC_TEST_VECTORS
2018 - .vecs = bf_cbc_dec_tv_template,
2019 - .count = BF_CBC_DEC_TEST_VECTORS
2021 + .enc = __VECS(bf_cbc_enc_tv_template),
2022 + .dec = __VECS(bf_cbc_dec_tv_template)
2026 @@ -2474,14 +2705,8 @@ static const struct alg_test_desc alg_te
2027 .test = alg_test_skcipher,
2031 - .vecs = camellia_cbc_enc_tv_template,
2032 - .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2035 - .vecs = camellia_cbc_dec_tv_template,
2036 - .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2038 + .enc = __VECS(camellia_cbc_enc_tv_template),
2039 + .dec = __VECS(camellia_cbc_dec_tv_template)
2043 @@ -2489,14 +2714,8 @@ static const struct alg_test_desc alg_te
2044 .test = alg_test_skcipher,
2048 - .vecs = cast5_cbc_enc_tv_template,
2049 - .count = CAST5_CBC_ENC_TEST_VECTORS
2052 - .vecs = cast5_cbc_dec_tv_template,
2053 - .count = CAST5_CBC_DEC_TEST_VECTORS
2055 + .enc = __VECS(cast5_cbc_enc_tv_template),
2056 + .dec = __VECS(cast5_cbc_dec_tv_template)
2060 @@ -2504,14 +2723,8 @@ static const struct alg_test_desc alg_te
2061 .test = alg_test_skcipher,
2065 - .vecs = cast6_cbc_enc_tv_template,
2066 - .count = CAST6_CBC_ENC_TEST_VECTORS
2069 - .vecs = cast6_cbc_dec_tv_template,
2070 - .count = CAST6_CBC_DEC_TEST_VECTORS
2072 + .enc = __VECS(cast6_cbc_enc_tv_template),
2073 + .dec = __VECS(cast6_cbc_dec_tv_template)
2077 @@ -2519,14 +2732,8 @@ static const struct alg_test_desc alg_te
2078 .test = alg_test_skcipher,
2082 - .vecs = des_cbc_enc_tv_template,
2083 - .count = DES_CBC_ENC_TEST_VECTORS
2086 - .vecs = des_cbc_dec_tv_template,
2087 - .count = DES_CBC_DEC_TEST_VECTORS
2089 + .enc = __VECS(des_cbc_enc_tv_template),
2090 + .dec = __VECS(des_cbc_dec_tv_template)
2094 @@ -2535,14 +2742,8 @@ static const struct alg_test_desc alg_te
2099 - .vecs = des3_ede_cbc_enc_tv_template,
2100 - .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2103 - .vecs = des3_ede_cbc_dec_tv_template,
2104 - .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2106 + .enc = __VECS(des3_ede_cbc_enc_tv_template),
2107 + .dec = __VECS(des3_ede_cbc_dec_tv_template)
2111 @@ -2550,14 +2751,8 @@ static const struct alg_test_desc alg_te
2112 .test = alg_test_skcipher,
2116 - .vecs = serpent_cbc_enc_tv_template,
2117 - .count = SERPENT_CBC_ENC_TEST_VECTORS
2120 - .vecs = serpent_cbc_dec_tv_template,
2121 - .count = SERPENT_CBC_DEC_TEST_VECTORS
2123 + .enc = __VECS(serpent_cbc_enc_tv_template),
2124 + .dec = __VECS(serpent_cbc_dec_tv_template)
2128 @@ -2565,30 +2760,25 @@ static const struct alg_test_desc alg_te
2129 .test = alg_test_skcipher,
2133 - .vecs = tf_cbc_enc_tv_template,
2134 - .count = TF_CBC_ENC_TEST_VECTORS
2137 - .vecs = tf_cbc_dec_tv_template,
2138 - .count = TF_CBC_DEC_TEST_VECTORS
2140 + .enc = __VECS(tf_cbc_enc_tv_template),
2141 + .dec = __VECS(tf_cbc_dec_tv_template)
2145 + .alg = "cbcmac(aes)",
2146 + .fips_allowed = 1,
2147 + .test = alg_test_hash,
2149 + .hash = __VECS(aes_cbcmac_tv_template)
2153 .test = alg_test_aead,
2158 - .vecs = aes_ccm_enc_tv_template,
2159 - .count = AES_CCM_ENC_TEST_VECTORS
2162 - .vecs = aes_ccm_dec_tv_template,
2163 - .count = AES_CCM_DEC_TEST_VECTORS
2165 + .enc = __VECS(aes_ccm_enc_tv_template),
2166 + .dec = __VECS(aes_ccm_dec_tv_template)
2170 @@ -2596,14 +2786,8 @@ static const struct alg_test_desc alg_te
2171 .test = alg_test_skcipher,
2175 - .vecs = chacha20_enc_tv_template,
2176 - .count = CHACHA20_ENC_TEST_VECTORS
2179 - .vecs = chacha20_enc_tv_template,
2180 - .count = CHACHA20_ENC_TEST_VECTORS
2182 + .enc = __VECS(chacha20_enc_tv_template),
2183 + .dec = __VECS(chacha20_enc_tv_template),
2187 @@ -2611,20 +2795,14 @@ static const struct alg_test_desc alg_te
2189 .test = alg_test_hash,
2192 - .vecs = aes_cmac128_tv_template,
2193 - .count = CMAC_AES_TEST_VECTORS
2195 + .hash = __VECS(aes_cmac128_tv_template)
2198 .alg = "cmac(des3_ede)",
2200 .test = alg_test_hash,
2203 - .vecs = des3_ede_cmac64_tv_template,
2204 - .count = CMAC_DES3_EDE_TEST_VECTORS
2206 + .hash = __VECS(des3_ede_cmac64_tv_template)
2209 .alg = "compress_null",
2210 @@ -2633,94 +2811,30 @@ static const struct alg_test_desc alg_te
2212 .test = alg_test_hash,
2215 - .vecs = crc32_tv_template,
2216 - .count = CRC32_TEST_VECTORS
2218 + .hash = __VECS(crc32_tv_template)
2222 .test = alg_test_crc32c,
2226 - .vecs = crc32c_tv_template,
2227 - .count = CRC32C_TEST_VECTORS
2229 + .hash = __VECS(crc32c_tv_template)
2233 .test = alg_test_hash,
2237 - .vecs = crct10dif_tv_template,
2238 - .count = CRCT10DIF_TEST_VECTORS
2240 + .hash = __VECS(crct10dif_tv_template)
2243 - .alg = "cryptd(__driver-cbc-aes-aesni)",
2244 - .test = alg_test_null,
2245 - .fips_allowed = 1,
2247 - .alg = "cryptd(__driver-cbc-camellia-aesni)",
2248 - .test = alg_test_null,
2250 - .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2251 - .test = alg_test_null,
2253 - .alg = "cryptd(__driver-cbc-serpent-avx2)",
2254 - .test = alg_test_null,
2256 - .alg = "cryptd(__driver-ecb-aes-aesni)",
2257 - .test = alg_test_null,
2258 - .fips_allowed = 1,
2260 - .alg = "cryptd(__driver-ecb-camellia-aesni)",
2261 - .test = alg_test_null,
2263 - .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2264 - .test = alg_test_null,
2266 - .alg = "cryptd(__driver-ecb-cast5-avx)",
2267 - .test = alg_test_null,
2269 - .alg = "cryptd(__driver-ecb-cast6-avx)",
2270 - .test = alg_test_null,
2272 - .alg = "cryptd(__driver-ecb-serpent-avx)",
2273 - .test = alg_test_null,
2275 - .alg = "cryptd(__driver-ecb-serpent-avx2)",
2276 - .test = alg_test_null,
2278 - .alg = "cryptd(__driver-ecb-serpent-sse2)",
2279 - .test = alg_test_null,
2281 - .alg = "cryptd(__driver-ecb-twofish-avx)",
2282 - .test = alg_test_null,
2284 - .alg = "cryptd(__driver-gcm-aes-aesni)",
2285 - .test = alg_test_null,
2286 - .fips_allowed = 1,
2288 - .alg = "cryptd(__ghash-pclmulqdqni)",
2289 - .test = alg_test_null,
2290 - .fips_allowed = 1,
2293 .test = alg_test_skcipher,
2298 - .vecs = aes_ctr_enc_tv_template,
2299 - .count = AES_CTR_ENC_TEST_VECTORS
2302 - .vecs = aes_ctr_dec_tv_template,
2303 - .count = AES_CTR_DEC_TEST_VECTORS
2305 + .enc = __VECS(aes_ctr_enc_tv_template),
2306 + .dec = __VECS(aes_ctr_dec_tv_template)
2310 @@ -2728,14 +2842,8 @@ static const struct alg_test_desc alg_te
2311 .test = alg_test_skcipher,
2315 - .vecs = bf_ctr_enc_tv_template,
2316 - .count = BF_CTR_ENC_TEST_VECTORS
2319 - .vecs = bf_ctr_dec_tv_template,
2320 - .count = BF_CTR_DEC_TEST_VECTORS
2322 + .enc = __VECS(bf_ctr_enc_tv_template),
2323 + .dec = __VECS(bf_ctr_dec_tv_template)
2327 @@ -2743,14 +2851,8 @@ static const struct alg_test_desc alg_te
2328 .test = alg_test_skcipher,
2332 - .vecs = camellia_ctr_enc_tv_template,
2333 - .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2336 - .vecs = camellia_ctr_dec_tv_template,
2337 - .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2339 + .enc = __VECS(camellia_ctr_enc_tv_template),
2340 + .dec = __VECS(camellia_ctr_dec_tv_template)
2344 @@ -2758,14 +2860,8 @@ static const struct alg_test_desc alg_te
2345 .test = alg_test_skcipher,
2349 - .vecs = cast5_ctr_enc_tv_template,
2350 - .count = CAST5_CTR_ENC_TEST_VECTORS
2353 - .vecs = cast5_ctr_dec_tv_template,
2354 - .count = CAST5_CTR_DEC_TEST_VECTORS
2356 + .enc = __VECS(cast5_ctr_enc_tv_template),
2357 + .dec = __VECS(cast5_ctr_dec_tv_template)
2361 @@ -2773,14 +2869,8 @@ static const struct alg_test_desc alg_te
2362 .test = alg_test_skcipher,
2366 - .vecs = cast6_ctr_enc_tv_template,
2367 - .count = CAST6_CTR_ENC_TEST_VECTORS
2370 - .vecs = cast6_ctr_dec_tv_template,
2371 - .count = CAST6_CTR_DEC_TEST_VECTORS
2373 + .enc = __VECS(cast6_ctr_enc_tv_template),
2374 + .dec = __VECS(cast6_ctr_dec_tv_template)
2378 @@ -2788,29 +2878,18 @@ static const struct alg_test_desc alg_te
2379 .test = alg_test_skcipher,
2383 - .vecs = des_ctr_enc_tv_template,
2384 - .count = DES_CTR_ENC_TEST_VECTORS
2387 - .vecs = des_ctr_dec_tv_template,
2388 - .count = DES_CTR_DEC_TEST_VECTORS
2390 + .enc = __VECS(des_ctr_enc_tv_template),
2391 + .dec = __VECS(des_ctr_dec_tv_template)
2395 .alg = "ctr(des3_ede)",
2396 .test = alg_test_skcipher,
2397 + .fips_allowed = 1,
2401 - .vecs = des3_ede_ctr_enc_tv_template,
2402 - .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2405 - .vecs = des3_ede_ctr_dec_tv_template,
2406 - .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2408 + .enc = __VECS(des3_ede_ctr_enc_tv_template),
2409 + .dec = __VECS(des3_ede_ctr_dec_tv_template)
2413 @@ -2818,14 +2897,8 @@ static const struct alg_test_desc alg_te
2414 .test = alg_test_skcipher,
2418 - .vecs = serpent_ctr_enc_tv_template,
2419 - .count = SERPENT_CTR_ENC_TEST_VECTORS
2422 - .vecs = serpent_ctr_dec_tv_template,
2423 - .count = SERPENT_CTR_DEC_TEST_VECTORS
2425 + .enc = __VECS(serpent_ctr_enc_tv_template),
2426 + .dec = __VECS(serpent_ctr_dec_tv_template)
2430 @@ -2833,14 +2906,8 @@ static const struct alg_test_desc alg_te
2431 .test = alg_test_skcipher,
2435 - .vecs = tf_ctr_enc_tv_template,
2436 - .count = TF_CTR_ENC_TEST_VECTORS
2439 - .vecs = tf_ctr_dec_tv_template,
2440 - .count = TF_CTR_DEC_TEST_VECTORS
2442 + .enc = __VECS(tf_ctr_enc_tv_template),
2443 + .dec = __VECS(tf_ctr_dec_tv_template)
2447 @@ -2848,14 +2915,8 @@ static const struct alg_test_desc alg_te
2448 .test = alg_test_skcipher,
2452 - .vecs = cts_mode_enc_tv_template,
2453 - .count = CTS_MODE_ENC_TEST_VECTORS
2456 - .vecs = cts_mode_dec_tv_template,
2457 - .count = CTS_MODE_DEC_TEST_VECTORS
2459 + .enc = __VECS(cts_mode_enc_tv_template),
2460 + .dec = __VECS(cts_mode_dec_tv_template)
2464 @@ -2864,14 +2925,8 @@ static const struct alg_test_desc alg_te
2469 - .vecs = deflate_comp_tv_template,
2470 - .count = DEFLATE_COMP_TEST_VECTORS
2473 - .vecs = deflate_decomp_tv_template,
2474 - .count = DEFLATE_DECOMP_TEST_VECTORS
2476 + .comp = __VECS(deflate_comp_tv_template),
2477 + .decomp = __VECS(deflate_decomp_tv_template)
2481 @@ -2879,10 +2934,7 @@ static const struct alg_test_desc alg_te
2482 .test = alg_test_kpp,
2486 - .vecs = dh_tv_template,
2487 - .count = DH_TEST_VECTORS
2489 + .kpp = __VECS(dh_tv_template)
2492 .alg = "digest_null",
2493 @@ -2892,30 +2944,21 @@ static const struct alg_test_desc alg_te
2494 .test = alg_test_drbg,
2498 - .vecs = drbg_nopr_ctr_aes128_tv_template,
2499 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2501 + .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2504 .alg = "drbg_nopr_ctr_aes192",
2505 .test = alg_test_drbg,
2509 - .vecs = drbg_nopr_ctr_aes192_tv_template,
2510 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2512 + .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2515 .alg = "drbg_nopr_ctr_aes256",
2516 .test = alg_test_drbg,
2520 - .vecs = drbg_nopr_ctr_aes256_tv_template,
2521 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2523 + .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2527 @@ -2930,11 +2973,7 @@ static const struct alg_test_desc alg_te
2528 .test = alg_test_drbg,
2532 - .vecs = drbg_nopr_hmac_sha256_tv_template,
2534 - ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2536 + .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2539 /* covered by drbg_nopr_hmac_sha256 test */
2540 @@ -2954,10 +2993,7 @@ static const struct alg_test_desc alg_te
2541 .test = alg_test_drbg,
2545 - .vecs = drbg_nopr_sha256_tv_template,
2546 - .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2548 + .drbg = __VECS(drbg_nopr_sha256_tv_template)
2551 /* covered by drbg_nopr_sha256 test */
2552 @@ -2973,10 +3009,7 @@ static const struct alg_test_desc alg_te
2553 .test = alg_test_drbg,
2557 - .vecs = drbg_pr_ctr_aes128_tv_template,
2558 - .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2560 + .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2563 /* covered by drbg_pr_ctr_aes128 test */
2564 @@ -2996,10 +3029,7 @@ static const struct alg_test_desc alg_te
2565 .test = alg_test_drbg,
2569 - .vecs = drbg_pr_hmac_sha256_tv_template,
2570 - .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2572 + .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2575 /* covered by drbg_pr_hmac_sha256 test */
2576 @@ -3019,10 +3049,7 @@ static const struct alg_test_desc alg_te
2577 .test = alg_test_drbg,
2581 - .vecs = drbg_pr_sha256_tv_template,
2582 - .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2584 + .drbg = __VECS(drbg_pr_sha256_tv_template)
2587 /* covered by drbg_pr_sha256 test */
2588 @@ -3034,23 +3061,13 @@ static const struct alg_test_desc alg_te
2590 .test = alg_test_null,
2592 - .alg = "ecb(__aes-aesni)",
2593 - .test = alg_test_null,
2594 - .fips_allowed = 1,
2597 .test = alg_test_skcipher,
2602 - .vecs = aes_enc_tv_template,
2603 - .count = AES_ENC_TEST_VECTORS
2606 - .vecs = aes_dec_tv_template,
2607 - .count = AES_DEC_TEST_VECTORS
2609 + .enc = __VECS(aes_enc_tv_template),
2610 + .dec = __VECS(aes_dec_tv_template)
2614 @@ -3058,14 +3075,8 @@ static const struct alg_test_desc alg_te
2615 .test = alg_test_skcipher,
2619 - .vecs = anubis_enc_tv_template,
2620 - .count = ANUBIS_ENC_TEST_VECTORS
2623 - .vecs = anubis_dec_tv_template,
2624 - .count = ANUBIS_DEC_TEST_VECTORS
2626 + .enc = __VECS(anubis_enc_tv_template),
2627 + .dec = __VECS(anubis_dec_tv_template)
2631 @@ -3073,14 +3084,8 @@ static const struct alg_test_desc alg_te
2632 .test = alg_test_skcipher,
2636 - .vecs = arc4_enc_tv_template,
2637 - .count = ARC4_ENC_TEST_VECTORS
2640 - .vecs = arc4_dec_tv_template,
2641 - .count = ARC4_DEC_TEST_VECTORS
2643 + .enc = __VECS(arc4_enc_tv_template),
2644 + .dec = __VECS(arc4_dec_tv_template)
2648 @@ -3088,14 +3093,8 @@ static const struct alg_test_desc alg_te
2649 .test = alg_test_skcipher,
2653 - .vecs = bf_enc_tv_template,
2654 - .count = BF_ENC_TEST_VECTORS
2657 - .vecs = bf_dec_tv_template,
2658 - .count = BF_DEC_TEST_VECTORS
2660 + .enc = __VECS(bf_enc_tv_template),
2661 + .dec = __VECS(bf_dec_tv_template)
2665 @@ -3103,14 +3102,8 @@ static const struct alg_test_desc alg_te
2666 .test = alg_test_skcipher,
2670 - .vecs = camellia_enc_tv_template,
2671 - .count = CAMELLIA_ENC_TEST_VECTORS
2674 - .vecs = camellia_dec_tv_template,
2675 - .count = CAMELLIA_DEC_TEST_VECTORS
2677 + .enc = __VECS(camellia_enc_tv_template),
2678 + .dec = __VECS(camellia_dec_tv_template)
2682 @@ -3118,14 +3111,8 @@ static const struct alg_test_desc alg_te
2683 .test = alg_test_skcipher,
2687 - .vecs = cast5_enc_tv_template,
2688 - .count = CAST5_ENC_TEST_VECTORS
2691 - .vecs = cast5_dec_tv_template,
2692 - .count = CAST5_DEC_TEST_VECTORS
2694 + .enc = __VECS(cast5_enc_tv_template),
2695 + .dec = __VECS(cast5_dec_tv_template)
2699 @@ -3133,14 +3120,8 @@ static const struct alg_test_desc alg_te
2700 .test = alg_test_skcipher,
2704 - .vecs = cast6_enc_tv_template,
2705 - .count = CAST6_ENC_TEST_VECTORS
2708 - .vecs = cast6_dec_tv_template,
2709 - .count = CAST6_DEC_TEST_VECTORS
2711 + .enc = __VECS(cast6_enc_tv_template),
2712 + .dec = __VECS(cast6_dec_tv_template)
2716 @@ -3151,14 +3132,8 @@ static const struct alg_test_desc alg_te
2717 .test = alg_test_skcipher,
2721 - .vecs = des_enc_tv_template,
2722 - .count = DES_ENC_TEST_VECTORS
2725 - .vecs = des_dec_tv_template,
2726 - .count = DES_DEC_TEST_VECTORS
2728 + .enc = __VECS(des_enc_tv_template),
2729 + .dec = __VECS(des_dec_tv_template)
2733 @@ -3167,14 +3142,8 @@ static const struct alg_test_desc alg_te
2738 - .vecs = des3_ede_enc_tv_template,
2739 - .count = DES3_EDE_ENC_TEST_VECTORS
2742 - .vecs = des3_ede_dec_tv_template,
2743 - .count = DES3_EDE_DEC_TEST_VECTORS
2745 + .enc = __VECS(des3_ede_enc_tv_template),
2746 + .dec = __VECS(des3_ede_dec_tv_template)
2750 @@ -3197,14 +3166,8 @@ static const struct alg_test_desc alg_te
2751 .test = alg_test_skcipher,
2755 - .vecs = khazad_enc_tv_template,
2756 - .count = KHAZAD_ENC_TEST_VECTORS
2759 - .vecs = khazad_dec_tv_template,
2760 - .count = KHAZAD_DEC_TEST_VECTORS
2762 + .enc = __VECS(khazad_enc_tv_template),
2763 + .dec = __VECS(khazad_dec_tv_template)
2767 @@ -3212,14 +3175,8 @@ static const struct alg_test_desc alg_te
2768 .test = alg_test_skcipher,
2772 - .vecs = seed_enc_tv_template,
2773 - .count = SEED_ENC_TEST_VECTORS
2776 - .vecs = seed_dec_tv_template,
2777 - .count = SEED_DEC_TEST_VECTORS
2779 + .enc = __VECS(seed_enc_tv_template),
2780 + .dec = __VECS(seed_dec_tv_template)
2784 @@ -3227,14 +3184,8 @@ static const struct alg_test_desc alg_te
2785 .test = alg_test_skcipher,
2789 - .vecs = serpent_enc_tv_template,
2790 - .count = SERPENT_ENC_TEST_VECTORS
2793 - .vecs = serpent_dec_tv_template,
2794 - .count = SERPENT_DEC_TEST_VECTORS
2796 + .enc = __VECS(serpent_enc_tv_template),
2797 + .dec = __VECS(serpent_dec_tv_template)
2801 @@ -3242,14 +3193,8 @@ static const struct alg_test_desc alg_te
2802 .test = alg_test_skcipher,
2806 - .vecs = tea_enc_tv_template,
2807 - .count = TEA_ENC_TEST_VECTORS
2810 - .vecs = tea_dec_tv_template,
2811 - .count = TEA_DEC_TEST_VECTORS
2813 + .enc = __VECS(tea_enc_tv_template),
2814 + .dec = __VECS(tea_dec_tv_template)
2818 @@ -3257,14 +3202,8 @@ static const struct alg_test_desc alg_te
2819 .test = alg_test_skcipher,
2823 - .vecs = tnepres_enc_tv_template,
2824 - .count = TNEPRES_ENC_TEST_VECTORS
2827 - .vecs = tnepres_dec_tv_template,
2828 - .count = TNEPRES_DEC_TEST_VECTORS
2830 + .enc = __VECS(tnepres_enc_tv_template),
2831 + .dec = __VECS(tnepres_dec_tv_template)
2835 @@ -3272,14 +3211,8 @@ static const struct alg_test_desc alg_te
2836 .test = alg_test_skcipher,
2840 - .vecs = tf_enc_tv_template,
2841 - .count = TF_ENC_TEST_VECTORS
2844 - .vecs = tf_dec_tv_template,
2845 - .count = TF_DEC_TEST_VECTORS
2847 + .enc = __VECS(tf_enc_tv_template),
2848 + .dec = __VECS(tf_dec_tv_template)
2852 @@ -3287,14 +3220,8 @@ static const struct alg_test_desc alg_te
2853 .test = alg_test_skcipher,
2857 - .vecs = xeta_enc_tv_template,
2858 - .count = XETA_ENC_TEST_VECTORS
2861 - .vecs = xeta_dec_tv_template,
2862 - .count = XETA_DEC_TEST_VECTORS
2864 + .enc = __VECS(xeta_enc_tv_template),
2865 + .dec = __VECS(xeta_dec_tv_template)
2869 @@ -3302,14 +3229,8 @@ static const struct alg_test_desc alg_te
2870 .test = alg_test_skcipher,
2874 - .vecs = xtea_enc_tv_template,
2875 - .count = XTEA_ENC_TEST_VECTORS
2878 - .vecs = xtea_dec_tv_template,
2879 - .count = XTEA_DEC_TEST_VECTORS
2881 + .enc = __VECS(xtea_enc_tv_template),
2882 + .dec = __VECS(xtea_dec_tv_template)
2886 @@ -3317,10 +3238,7 @@ static const struct alg_test_desc alg_te
2887 .test = alg_test_kpp,
2891 - .vecs = ecdh_tv_template,
2892 - .count = ECDH_TEST_VECTORS
2894 + .kpp = __VECS(ecdh_tv_template)
2898 @@ -3328,14 +3246,8 @@ static const struct alg_test_desc alg_te
2903 - .vecs = aes_gcm_enc_tv_template,
2904 - .count = AES_GCM_ENC_TEST_VECTORS
2907 - .vecs = aes_gcm_dec_tv_template,
2908 - .count = AES_GCM_DEC_TEST_VECTORS
2910 + .enc = __VECS(aes_gcm_enc_tv_template),
2911 + .dec = __VECS(aes_gcm_dec_tv_template)
2915 @@ -3343,136 +3255,94 @@ static const struct alg_test_desc alg_te
2916 .test = alg_test_hash,
2920 - .vecs = ghash_tv_template,
2921 - .count = GHASH_TEST_VECTORS
2923 + .hash = __VECS(ghash_tv_template)
2926 .alg = "hmac(crc32)",
2927 .test = alg_test_hash,
2930 - .vecs = bfin_crc_tv_template,
2931 - .count = BFIN_CRC_TEST_VECTORS
2933 + .hash = __VECS(bfin_crc_tv_template)
2937 .test = alg_test_hash,
2940 - .vecs = hmac_md5_tv_template,
2941 - .count = HMAC_MD5_TEST_VECTORS
2943 + .hash = __VECS(hmac_md5_tv_template)
2946 .alg = "hmac(rmd128)",
2947 .test = alg_test_hash,
2950 - .vecs = hmac_rmd128_tv_template,
2951 - .count = HMAC_RMD128_TEST_VECTORS
2953 + .hash = __VECS(hmac_rmd128_tv_template)
2956 .alg = "hmac(rmd160)",
2957 .test = alg_test_hash,
2960 - .vecs = hmac_rmd160_tv_template,
2961 - .count = HMAC_RMD160_TEST_VECTORS
2963 + .hash = __VECS(hmac_rmd160_tv_template)
2966 .alg = "hmac(sha1)",
2967 .test = alg_test_hash,
2971 - .vecs = hmac_sha1_tv_template,
2972 - .count = HMAC_SHA1_TEST_VECTORS
2974 + .hash = __VECS(hmac_sha1_tv_template)
2977 .alg = "hmac(sha224)",
2978 .test = alg_test_hash,
2982 - .vecs = hmac_sha224_tv_template,
2983 - .count = HMAC_SHA224_TEST_VECTORS
2985 + .hash = __VECS(hmac_sha224_tv_template)
2988 .alg = "hmac(sha256)",
2989 .test = alg_test_hash,
2993 - .vecs = hmac_sha256_tv_template,
2994 - .count = HMAC_SHA256_TEST_VECTORS
2996 + .hash = __VECS(hmac_sha256_tv_template)
2999 .alg = "hmac(sha3-224)",
3000 .test = alg_test_hash,
3004 - .vecs = hmac_sha3_224_tv_template,
3005 - .count = HMAC_SHA3_224_TEST_VECTORS
3007 + .hash = __VECS(hmac_sha3_224_tv_template)
3010 .alg = "hmac(sha3-256)",
3011 .test = alg_test_hash,
3015 - .vecs = hmac_sha3_256_tv_template,
3016 - .count = HMAC_SHA3_256_TEST_VECTORS
3018 + .hash = __VECS(hmac_sha3_256_tv_template)
3021 .alg = "hmac(sha3-384)",
3022 .test = alg_test_hash,
3026 - .vecs = hmac_sha3_384_tv_template,
3027 - .count = HMAC_SHA3_384_TEST_VECTORS
3029 + .hash = __VECS(hmac_sha3_384_tv_template)
3032 .alg = "hmac(sha3-512)",
3033 .test = alg_test_hash,
3037 - .vecs = hmac_sha3_512_tv_template,
3038 - .count = HMAC_SHA3_512_TEST_VECTORS
3040 + .hash = __VECS(hmac_sha3_512_tv_template)
3043 .alg = "hmac(sha384)",
3044 .test = alg_test_hash,
3048 - .vecs = hmac_sha384_tv_template,
3049 - .count = HMAC_SHA384_TEST_VECTORS
3051 + .hash = __VECS(hmac_sha384_tv_template)
3054 .alg = "hmac(sha512)",
3055 .test = alg_test_hash,
3059 - .vecs = hmac_sha512_tv_template,
3060 - .count = HMAC_SHA512_TEST_VECTORS
3062 + .hash = __VECS(hmac_sha512_tv_template)
3065 .alg = "jitterentropy_rng",
3066 @@ -3484,14 +3354,8 @@ static const struct alg_test_desc alg_te
3071 - .vecs = aes_kw_enc_tv_template,
3072 - .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3075 - .vecs = aes_kw_dec_tv_template,
3076 - .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3078 + .enc = __VECS(aes_kw_enc_tv_template),
3079 + .dec = __VECS(aes_kw_dec_tv_template)
3083 @@ -3499,14 +3363,8 @@ static const struct alg_test_desc alg_te
3084 .test = alg_test_skcipher,
3088 - .vecs = aes_lrw_enc_tv_template,
3089 - .count = AES_LRW_ENC_TEST_VECTORS
3092 - .vecs = aes_lrw_dec_tv_template,
3093 - .count = AES_LRW_DEC_TEST_VECTORS
3095 + .enc = __VECS(aes_lrw_enc_tv_template),
3096 + .dec = __VECS(aes_lrw_dec_tv_template)
3100 @@ -3514,14 +3372,8 @@ static const struct alg_test_desc alg_te
3101 .test = alg_test_skcipher,
3105 - .vecs = camellia_lrw_enc_tv_template,
3106 - .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3109 - .vecs = camellia_lrw_dec_tv_template,
3110 - .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3112 + .enc = __VECS(camellia_lrw_enc_tv_template),
3113 + .dec = __VECS(camellia_lrw_dec_tv_template)
3117 @@ -3529,14 +3381,8 @@ static const struct alg_test_desc alg_te
3118 .test = alg_test_skcipher,
3122 - .vecs = cast6_lrw_enc_tv_template,
3123 - .count = CAST6_LRW_ENC_TEST_VECTORS
3126 - .vecs = cast6_lrw_dec_tv_template,
3127 - .count = CAST6_LRW_DEC_TEST_VECTORS
3129 + .enc = __VECS(cast6_lrw_enc_tv_template),
3130 + .dec = __VECS(cast6_lrw_dec_tv_template)
3134 @@ -3544,14 +3390,8 @@ static const struct alg_test_desc alg_te
3135 .test = alg_test_skcipher,
3139 - .vecs = serpent_lrw_enc_tv_template,
3140 - .count = SERPENT_LRW_ENC_TEST_VECTORS
3143 - .vecs = serpent_lrw_dec_tv_template,
3144 - .count = SERPENT_LRW_DEC_TEST_VECTORS
3146 + .enc = __VECS(serpent_lrw_enc_tv_template),
3147 + .dec = __VECS(serpent_lrw_dec_tv_template)
3151 @@ -3559,14 +3399,8 @@ static const struct alg_test_desc alg_te
3152 .test = alg_test_skcipher,
3156 - .vecs = tf_lrw_enc_tv_template,
3157 - .count = TF_LRW_ENC_TEST_VECTORS
3160 - .vecs = tf_lrw_dec_tv_template,
3161 - .count = TF_LRW_DEC_TEST_VECTORS
3163 + .enc = __VECS(tf_lrw_enc_tv_template),
3164 + .dec = __VECS(tf_lrw_dec_tv_template)
3168 @@ -3575,14 +3409,8 @@ static const struct alg_test_desc alg_te
3173 - .vecs = lz4_comp_tv_template,
3174 - .count = LZ4_COMP_TEST_VECTORS
3177 - .vecs = lz4_decomp_tv_template,
3178 - .count = LZ4_DECOMP_TEST_VECTORS
3180 + .comp = __VECS(lz4_comp_tv_template),
3181 + .decomp = __VECS(lz4_decomp_tv_template)
3185 @@ -3591,14 +3419,8 @@ static const struct alg_test_desc alg_te
3190 - .vecs = lz4hc_comp_tv_template,
3191 - .count = LZ4HC_COMP_TEST_VECTORS
3194 - .vecs = lz4hc_decomp_tv_template,
3195 - .count = LZ4HC_DECOMP_TEST_VECTORS
3197 + .comp = __VECS(lz4hc_comp_tv_template),
3198 + .decomp = __VECS(lz4hc_decomp_tv_template)
3202 @@ -3607,42 +3429,27 @@ static const struct alg_test_desc alg_te
3207 - .vecs = lzo_comp_tv_template,
3208 - .count = LZO_COMP_TEST_VECTORS
3211 - .vecs = lzo_decomp_tv_template,
3212 - .count = LZO_DECOMP_TEST_VECTORS
3214 + .comp = __VECS(lzo_comp_tv_template),
3215 + .decomp = __VECS(lzo_decomp_tv_template)
3220 .test = alg_test_hash,
3223 - .vecs = md4_tv_template,
3224 - .count = MD4_TEST_VECTORS
3226 + .hash = __VECS(md4_tv_template)
3230 .test = alg_test_hash,
3233 - .vecs = md5_tv_template,
3234 - .count = MD5_TEST_VECTORS
3236 + .hash = __VECS(md5_tv_template)
3239 .alg = "michael_mic",
3240 .test = alg_test_hash,
3243 - .vecs = michael_mic_tv_template,
3244 - .count = MICHAEL_MIC_TEST_VECTORS
3246 + .hash = __VECS(michael_mic_tv_template)
3250 @@ -3650,14 +3457,8 @@ static const struct alg_test_desc alg_te
3255 - .vecs = aes_ofb_enc_tv_template,
3256 - .count = AES_OFB_ENC_TEST_VECTORS
3259 - .vecs = aes_ofb_dec_tv_template,
3260 - .count = AES_OFB_DEC_TEST_VECTORS
3262 + .enc = __VECS(aes_ofb_enc_tv_template),
3263 + .dec = __VECS(aes_ofb_dec_tv_template)
3267 @@ -3665,24 +3466,15 @@ static const struct alg_test_desc alg_te
3268 .test = alg_test_skcipher,
3272 - .vecs = fcrypt_pcbc_enc_tv_template,
3273 - .count = FCRYPT_ENC_TEST_VECTORS
3276 - .vecs = fcrypt_pcbc_dec_tv_template,
3277 - .count = FCRYPT_DEC_TEST_VECTORS
3279 + .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3280 + .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3285 .test = alg_test_hash,
3288 - .vecs = poly1305_tv_template,
3289 - .count = POLY1305_TEST_VECTORS
3291 + .hash = __VECS(poly1305_tv_template)
3294 .alg = "rfc3686(ctr(aes))",
3295 @@ -3690,14 +3482,8 @@ static const struct alg_test_desc alg_te
3300 - .vecs = aes_ctr_rfc3686_enc_tv_template,
3301 - .count = AES_CTR_3686_ENC_TEST_VECTORS
3304 - .vecs = aes_ctr_rfc3686_dec_tv_template,
3305 - .count = AES_CTR_3686_DEC_TEST_VECTORS
3307 + .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3308 + .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3312 @@ -3706,14 +3492,8 @@ static const struct alg_test_desc alg_te
3317 - .vecs = aes_gcm_rfc4106_enc_tv_template,
3318 - .count = AES_GCM_4106_ENC_TEST_VECTORS
3321 - .vecs = aes_gcm_rfc4106_dec_tv_template,
3322 - .count = AES_GCM_4106_DEC_TEST_VECTORS
3324 + .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3325 + .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3329 @@ -3722,14 +3502,8 @@ static const struct alg_test_desc alg_te
3334 - .vecs = aes_ccm_rfc4309_enc_tv_template,
3335 - .count = AES_CCM_4309_ENC_TEST_VECTORS
3338 - .vecs = aes_ccm_rfc4309_dec_tv_template,
3339 - .count = AES_CCM_4309_DEC_TEST_VECTORS
3341 + .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3342 + .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3346 @@ -3737,14 +3511,8 @@ static const struct alg_test_desc alg_te
3347 .test = alg_test_aead,
3351 - .vecs = aes_gcm_rfc4543_enc_tv_template,
3352 - .count = AES_GCM_4543_ENC_TEST_VECTORS
3355 - .vecs = aes_gcm_rfc4543_dec_tv_template,
3356 - .count = AES_GCM_4543_DEC_TEST_VECTORS
3358 + .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3359 + .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3363 @@ -3752,14 +3520,8 @@ static const struct alg_test_desc alg_te
3364 .test = alg_test_aead,
3368 - .vecs = rfc7539_enc_tv_template,
3369 - .count = RFC7539_ENC_TEST_VECTORS
3372 - .vecs = rfc7539_dec_tv_template,
3373 - .count = RFC7539_DEC_TEST_VECTORS
3375 + .enc = __VECS(rfc7539_enc_tv_template),
3376 + .dec = __VECS(rfc7539_dec_tv_template),
3380 @@ -3767,71 +3529,47 @@ static const struct alg_test_desc alg_te
3381 .test = alg_test_aead,
3385 - .vecs = rfc7539esp_enc_tv_template,
3386 - .count = RFC7539ESP_ENC_TEST_VECTORS
3389 - .vecs = rfc7539esp_dec_tv_template,
3390 - .count = RFC7539ESP_DEC_TEST_VECTORS
3392 + .enc = __VECS(rfc7539esp_enc_tv_template),
3393 + .dec = __VECS(rfc7539esp_dec_tv_template),
3398 .test = alg_test_hash,
3401 - .vecs = rmd128_tv_template,
3402 - .count = RMD128_TEST_VECTORS
3404 + .hash = __VECS(rmd128_tv_template)
3408 .test = alg_test_hash,
3411 - .vecs = rmd160_tv_template,
3412 - .count = RMD160_TEST_VECTORS
3414 + .hash = __VECS(rmd160_tv_template)
3418 .test = alg_test_hash,
3421 - .vecs = rmd256_tv_template,
3422 - .count = RMD256_TEST_VECTORS
3424 + .hash = __VECS(rmd256_tv_template)
3428 .test = alg_test_hash,
3431 - .vecs = rmd320_tv_template,
3432 - .count = RMD320_TEST_VECTORS
3434 + .hash = __VECS(rmd320_tv_template)
3438 .test = alg_test_akcipher,
3442 - .vecs = rsa_tv_template,
3443 - .count = RSA_TEST_VECTORS
3445 + .akcipher = __VECS(rsa_tv_template)
3449 .test = alg_test_skcipher,
3453 - .vecs = salsa20_stream_enc_tv_template,
3454 - .count = SALSA20_STREAM_ENC_TEST_VECTORS
3456 + .enc = __VECS(salsa20_stream_enc_tv_template)
3460 @@ -3839,162 +3577,120 @@ static const struct alg_test_desc alg_te
3461 .test = alg_test_hash,
3465 - .vecs = sha1_tv_template,
3466 - .count = SHA1_TEST_VECTORS
3468 + .hash = __VECS(sha1_tv_template)
3472 .test = alg_test_hash,
3476 - .vecs = sha224_tv_template,
3477 - .count = SHA224_TEST_VECTORS
3479 + .hash = __VECS(sha224_tv_template)
3483 .test = alg_test_hash,
3487 - .vecs = sha256_tv_template,
3488 - .count = SHA256_TEST_VECTORS
3490 + .hash = __VECS(sha256_tv_template)
3494 .test = alg_test_hash,
3498 - .vecs = sha3_224_tv_template,
3499 - .count = SHA3_224_TEST_VECTORS
3501 + .hash = __VECS(sha3_224_tv_template)
3505 .test = alg_test_hash,
3509 - .vecs = sha3_256_tv_template,
3510 - .count = SHA3_256_TEST_VECTORS
3512 + .hash = __VECS(sha3_256_tv_template)
3516 .test = alg_test_hash,
3520 - .vecs = sha3_384_tv_template,
3521 - .count = SHA3_384_TEST_VECTORS
3523 + .hash = __VECS(sha3_384_tv_template)
3527 .test = alg_test_hash,
3531 - .vecs = sha3_512_tv_template,
3532 - .count = SHA3_512_TEST_VECTORS
3534 + .hash = __VECS(sha3_512_tv_template)
3538 .test = alg_test_hash,
3542 - .vecs = sha384_tv_template,
3543 - .count = SHA384_TEST_VECTORS
3545 + .hash = __VECS(sha384_tv_template)
3549 .test = alg_test_hash,
3553 - .vecs = sha512_tv_template,
3554 - .count = SHA512_TEST_VECTORS
3556 + .hash = __VECS(sha512_tv_template)
3560 .test = alg_test_hash,
3563 - .vecs = tgr128_tv_template,
3564 - .count = TGR128_TEST_VECTORS
3566 + .hash = __VECS(tgr128_tv_template)
3570 .test = alg_test_hash,
3573 - .vecs = tgr160_tv_template,
3574 - .count = TGR160_TEST_VECTORS
3576 + .hash = __VECS(tgr160_tv_template)
3580 .test = alg_test_hash,
3583 - .vecs = tgr192_tv_template,
3584 - .count = TGR192_TEST_VECTORS
3585 + .hash = __VECS(tgr192_tv_template)
3588 + .alg = "tls10(hmac(sha1),cbc(aes))",
3589 + .test = alg_test_tls,
3592 + .enc = __VECS(tls_enc_tv_template),
3593 + .dec = __VECS(tls_dec_tv_template)
3598 .test = alg_test_hash,
3601 - .vecs = aes_vmac128_tv_template,
3602 - .count = VMAC_AES_TEST_VECTORS
3604 + .hash = __VECS(aes_vmac128_tv_template)
3608 .test = alg_test_hash,
3611 - .vecs = wp256_tv_template,
3612 - .count = WP256_TEST_VECTORS
3614 + .hash = __VECS(wp256_tv_template)
3618 .test = alg_test_hash,
3621 - .vecs = wp384_tv_template,
3622 - .count = WP384_TEST_VECTORS
3624 + .hash = __VECS(wp384_tv_template)
3628 .test = alg_test_hash,
3631 - .vecs = wp512_tv_template,
3632 - .count = WP512_TEST_VECTORS
3634 + .hash = __VECS(wp512_tv_template)
3638 .test = alg_test_hash,
3641 - .vecs = aes_xcbc128_tv_template,
3642 - .count = XCBC_AES_TEST_VECTORS
3644 + .hash = __VECS(aes_xcbc128_tv_template)
3648 @@ -4002,14 +3698,8 @@ static const struct alg_test_desc alg_te
3653 - .vecs = aes_xts_enc_tv_template,
3654 - .count = AES_XTS_ENC_TEST_VECTORS
3657 - .vecs = aes_xts_dec_tv_template,
3658 - .count = AES_XTS_DEC_TEST_VECTORS
3660 + .enc = __VECS(aes_xts_enc_tv_template),
3661 + .dec = __VECS(aes_xts_dec_tv_template)
3665 @@ -4017,14 +3707,8 @@ static const struct alg_test_desc alg_te
3666 .test = alg_test_skcipher,
3670 - .vecs = camellia_xts_enc_tv_template,
3671 - .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3674 - .vecs = camellia_xts_dec_tv_template,
3675 - .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3677 + .enc = __VECS(camellia_xts_enc_tv_template),
3678 + .dec = __VECS(camellia_xts_dec_tv_template)
3682 @@ -4032,14 +3716,8 @@ static const struct alg_test_desc alg_te
3683 .test = alg_test_skcipher,
3687 - .vecs = cast6_xts_enc_tv_template,
3688 - .count = CAST6_XTS_ENC_TEST_VECTORS
3691 - .vecs = cast6_xts_dec_tv_template,
3692 - .count = CAST6_XTS_DEC_TEST_VECTORS
3694 + .enc = __VECS(cast6_xts_enc_tv_template),
3695 + .dec = __VECS(cast6_xts_dec_tv_template)
3699 @@ -4047,14 +3725,8 @@ static const struct alg_test_desc alg_te
3700 .test = alg_test_skcipher,
3704 - .vecs = serpent_xts_enc_tv_template,
3705 - .count = SERPENT_XTS_ENC_TEST_VECTORS
3708 - .vecs = serpent_xts_dec_tv_template,
3709 - .count = SERPENT_XTS_DEC_TEST_VECTORS
3711 + .enc = __VECS(serpent_xts_enc_tv_template),
3712 + .dec = __VECS(serpent_xts_dec_tv_template)
3716 @@ -4062,14 +3734,8 @@ static const struct alg_test_desc alg_te
3717 .test = alg_test_skcipher,
3721 - .vecs = tf_xts_enc_tv_template,
3722 - .count = TF_XTS_ENC_TEST_VECTORS
3725 - .vecs = tf_xts_dec_tv_template,
3726 - .count = TF_XTS_DEC_TEST_VECTORS
3728 + .enc = __VECS(tf_xts_enc_tv_template),
3729 + .dec = __VECS(tf_xts_dec_tv_template)
3733 --- a/crypto/testmgr.h
3734 +++ b/crypto/testmgr.h
3737 struct hash_testvec {
3738 /* only used with keyed hash algorithms */
3743 + const char *plaintext;
3744 + const char *digest;
3745 unsigned char tap[MAX_TAP];
3746 unsigned short psize;
3748 @@ -63,11 +63,11 @@ struct hash_testvec {
3751 struct cipher_testvec {
3759 + const char *iv_out;
3760 + const char *input;
3761 + const char *result;
3762 unsigned short tap[MAX_TAP];
3764 unsigned char also_non_np;
3765 @@ -80,11 +80,11 @@ struct cipher_testvec {
3768 struct aead_testvec {
3776 + const char *input;
3777 + const char *assoc;
3778 + const char *result;
3779 unsigned char tap[MAX_TAP];
3780 unsigned char atap[MAX_TAP];
3782 @@ -99,10 +99,10 @@ struct aead_testvec {
3785 struct cprng_testvec {
3793 + const char *result;
3795 unsigned short dtlen;
3796 unsigned short vlen;
3797 @@ -111,24 +111,38 @@ struct cprng_testvec {
3800 struct drbg_testvec {
3801 - unsigned char *entropy;
3802 + const unsigned char *entropy;
3804 - unsigned char *entpra;
3805 - unsigned char *entprb;
3806 + const unsigned char *entpra;
3807 + const unsigned char *entprb;
3809 - unsigned char *addtla;
3810 - unsigned char *addtlb;
3811 + const unsigned char *addtla;
3812 + const unsigned char *addtlb;
3814 - unsigned char *pers;
3815 + const unsigned char *pers;
3817 - unsigned char *expected;
3818 + const unsigned char *expected;
3822 +struct tls_testvec {
3823 + char *key; /* wrapped keys for encryption and authentication */
3824 + char *iv; /* initialization vector */
3825 + char *input; /* input data */
3826 + char *assoc; /* associated data: seq num, type, version, input len */
3827 + char *result; /* result data */
3828 + unsigned char fail; /* the test failure is expected */
3829 + unsigned char novrfy; /* dec verification failure expected */
3830 + unsigned char klen; /* key length */
3831 + unsigned short ilen; /* input data length */
3832 + unsigned short alen; /* associated data length */
3833 + unsigned short rlen; /* result length */
3836 struct akcipher_testvec {
3837 - unsigned char *key;
3840 + const unsigned char *key;
3841 + const unsigned char *m;
3842 + const unsigned char *c;
3843 unsigned int key_len;
3844 unsigned int m_size;
3845 unsigned int c_size;
3846 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3849 struct kpp_testvec {
3850 - unsigned char *secret;
3851 - unsigned char *b_public;
3852 - unsigned char *expected_a_public;
3853 - unsigned char *expected_ss;
3854 + const unsigned char *secret;
3855 + const unsigned char *b_public;
3856 + const unsigned char *expected_a_public;
3857 + const unsigned char *expected_ss;
3858 unsigned short secret_size;
3859 unsigned short b_public_size;
3860 unsigned short expected_a_public_size;
3861 unsigned short expected_ss_size;
3864 -static char zeroed_string[48];
3865 +static const char zeroed_string[48];
3868 - * RSA test vectors. Borrowed from openSSL.
3869 + * TLS1.0 synthetic test vectors
3871 -#ifdef CONFIG_CRYPTO_FIPS
3872 -#define RSA_TEST_VECTORS 2
3873 +static struct tls_testvec tls_enc_tv_template[] = {
3875 +#ifdef __LITTLE_ENDIAN
3876 + .key = "\x08\x00" /* rta length */
3877 + "\x01\x00" /* rta type */
3879 + .key = "\x00\x08" /* rta length */
3880 + "\x00\x01" /* rta type */
3882 + "\x00\x00\x00\x10" /* enc key length */
3883 + "authenticationkey20benckeyis16_bytes",
3884 + .klen = 8 + 20 + 16,
3885 + .iv = "iv0123456789abcd",
3886 + .input = "Single block msg",
3888 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3889 + "\x00\x03\x01\x00\x10",
3891 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3892 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3893 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3894 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3895 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3896 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3897 + .rlen = 16 + 20 + 12,
3899 +#ifdef __LITTLE_ENDIAN
3900 + .key = "\x08\x00" /* rta length */
3901 + "\x01\x00" /* rta type */
3903 + .key = "\x00\x08" /* rta length */
3904 + "\x00\x01" /* rta type */
3906 + "\x00\x00\x00\x10" /* enc key length */
3907 + "authenticationkey20benckeyis16_bytes",
3908 + .klen = 8 + 20 + 16,
3909 + .iv = "iv0123456789abcd",
3912 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3913 + "\x00\x03\x01\x00\x00",
3915 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3916 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3917 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3918 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3921 +#ifdef __LITTLE_ENDIAN
3922 + .key = "\x08\x00" /* rta length */
3923 + "\x01\x00" /* rta type */
3925 + .key = "\x00\x08" /* rta length */
3926 + "\x00\x01" /* rta type */
3928 + "\x00\x00\x00\x10" /* enc key length */
3929 + "authenticationkey20benckeyis16_bytes",
3930 + .klen = 8 + 20 + 16,
3931 + .iv = "iv0123456789abcd",
3932 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
3933 + " plaintext285 bytes plaintext285 bytes plaintext285"
3934 + " bytes plaintext285 bytes plaintext285 bytes"
3935 + " plaintext285 bytes plaintext285 bytes plaintext285"
3936 + " bytes plaintext285 bytes plaintext285 bytes"
3937 + " plaintext285 bytes plaintext285 bytes plaintext285"
3938 + " bytes plaintext285 bytes plaintext",
3940 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3941 + "\x00\x03\x01\x01\x1d",
3943 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3944 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3945 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3946 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3947 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3948 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3949 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3950 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3951 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3952 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3953 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3954 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3955 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3956 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3957 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3958 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3959 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3960 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3961 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3962 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3963 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3964 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3965 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3966 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3967 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3968 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3969 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3970 + .rlen = 285 + 20 + 15,
3974 +static struct tls_testvec tls_dec_tv_template[] = {
3976 +#ifdef __LITTLE_ENDIAN
3977 + .key = "\x08\x00" /* rta length */
3978 + "\x01\x00" /* rta type */
3980 + .key = "\x00\x08" /* rta length */
3981 + "\x00\x01" /* rta type */
3983 + "\x00\x00\x00\x10" /* enc key length */
3984 + "authenticationkey20benckeyis16_bytes",
3985 + .klen = 8 + 20 + 16,
3986 + .iv = "iv0123456789abcd",
3987 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3988 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3989 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3990 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3991 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3992 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3993 + .ilen = 16 + 20 + 12,
3994 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3995 + "\x00\x03\x01\x00\x30",
3997 + .result = "Single block msg",
4000 +#ifdef __LITTLE_ENDIAN
4001 + .key = "\x08\x00" /* rta length */
4002 + "\x01\x00" /* rta type */
4004 -#define RSA_TEST_VECTORS 5
4005 + .key = "\x00\x08" /* rta length */
4006 + "\x00\x01" /* rta type */
4008 -static struct akcipher_testvec rsa_tv_template[] = {
4009 + "\x00\x00\x00\x10" /* enc key length */
4010 + "authenticationkey20benckeyis16_bytes",
4011 + .klen = 8 + 20 + 16,
4012 + .iv = "iv0123456789abcd",
4013 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
4014 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
4015 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
4016 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
4018 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4019 + "\x00\x03\x01\x00\x20",
4024 +#ifdef __LITTLE_ENDIAN
4025 + .key = "\x08\x00" /* rta length */
4026 + "\x01\x00" /* rta type */
4028 + .key = "\x00\x08" /* rta length */
4029 + "\x00\x01" /* rta type */
4031 + "\x00\x00\x00\x10" /* enc key length */
4032 + "authenticationkey20benckeyis16_bytes",
4033 + .klen = 8 + 20 + 16,
4034 + .iv = "iv0123456789abcd",
4035 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4036 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4037 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4038 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4039 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4040 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4041 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4042 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4043 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4044 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4045 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4046 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4047 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4048 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4049 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4050 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4051 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4052 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4053 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4054 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4055 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4056 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4057 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4058 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4059 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4060 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4061 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4063 + .ilen = 285 + 20 + 15,
4064 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4065 + "\x00\x03\x01\x01\x40",
4067 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4068 + " plaintext285 bytes plaintext285 bytes plaintext285"
4069 + " bytes plaintext285 bytes plaintext285 bytes"
4070 + " plaintext285 bytes plaintext285 bytes plaintext285"
4071 + " bytes plaintext285 bytes plaintext285 bytes"
4072 + " plaintext285 bytes plaintext285 bytes plaintext",
4078 + * RSA test vectors. Borrowed from openSSL.
4080 +static const struct akcipher_testvec rsa_tv_template[] = {
4082 #ifndef CONFIG_CRYPTO_FIPS
4084 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_te
4087 .public_key_vec = true,
4088 +#ifndef CONFIG_CRYPTO_FIPS
4091 "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4092 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_te
4100 -#define DH_TEST_VECTORS 2
4102 -struct kpp_testvec dh_tv_template[] = {
4103 +static const struct kpp_testvec dh_tv_template[] = {
4106 #ifdef __LITTLE_ENDIAN
4107 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4111 -#ifdef CONFIG_CRYPTO_FIPS
4112 -#define ECDH_TEST_VECTORS 1
4114 -#define ECDH_TEST_VECTORS 2
4116 -struct kpp_testvec ecdh_tv_template[] = {
4117 +static const struct kpp_testvec ecdh_tv_template[] = {
4119 #ifndef CONFIG_CRYPTO_FIPS
4121 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] =
4123 * MD4 test vectors from RFC1320
4125 -#define MD4_TEST_VECTORS 7
4127 -static struct hash_testvec md4_tv_template [] = {
4128 +static const struct hash_testvec md4_tv_template[] = {
4131 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4132 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_templa
4136 -#define SHA3_224_TEST_VECTORS 3
4137 -static struct hash_testvec sha3_224_tv_template[] = {
4138 +static const struct hash_testvec sha3_224_tv_template[] = {
4141 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4142 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_t
4146 -#define SHA3_256_TEST_VECTORS 3
4147 -static struct hash_testvec sha3_256_tv_template[] = {
4148 +static const struct hash_testvec sha3_256_tv_template[] = {
4151 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4152 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_t
4156 -#define SHA3_384_TEST_VECTORS 3
4157 -static struct hash_testvec sha3_384_tv_template[] = {
4158 +static const struct hash_testvec sha3_384_tv_template[] = {
4161 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4162 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_t
4166 -#define SHA3_512_TEST_VECTORS 3
4167 -static struct hash_testvec sha3_512_tv_template[] = {
4168 +static const struct hash_testvec sha3_512_tv_template[] = {
4171 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4172 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_t
4174 * MD5 test vectors from RFC1321
4176 -#define MD5_TEST_VECTORS 7
4178 -static struct hash_testvec md5_tv_template[] = {
4179 +static const struct hash_testvec md5_tv_template[] = {
4181 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4182 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4183 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_templa
4185 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4187 -#define RMD128_TEST_VECTORS 10
4189 -static struct hash_testvec rmd128_tv_template[] = {
4190 +static const struct hash_testvec rmd128_tv_template[] = {
4192 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4193 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4194 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_tem
4196 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4198 -#define RMD160_TEST_VECTORS 10
4200 -static struct hash_testvec rmd160_tv_template[] = {
4201 +static const struct hash_testvec rmd160_tv_template[] = {
4203 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4204 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4205 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_tem
4207 * RIPEMD-256 test vectors
4209 -#define RMD256_TEST_VECTORS 8
4211 -static struct hash_testvec rmd256_tv_template[] = {
4212 +static const struct hash_testvec rmd256_tv_template[] = {
4214 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4215 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4216 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_tem
4218 * RIPEMD-320 test vectors
4220 -#define RMD320_TEST_VECTORS 8
4222 -static struct hash_testvec rmd320_tv_template[] = {
4223 +static const struct hash_testvec rmd320_tv_template[] = {
4225 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4226 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4227 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_tem
4231 -#define CRCT10DIF_TEST_VECTORS 3
4232 -static struct hash_testvec crct10dif_tv_template[] = {
4233 +static const struct hash_testvec crct10dif_tv_template[] = {
4235 - .plaintext = "abc",
4237 -#ifdef __LITTLE_ENDIAN
4238 - .digest = "\x3b\x44",
4240 - .digest = "\x44\x3b",
4243 - .plaintext = "1234567890123456789012345678901234567890"
4244 - "123456789012345678901234567890123456789",
4246 -#ifdef __LITTLE_ENDIAN
4247 - .digest = "\x70\x4b",
4249 - .digest = "\x4b\x70",
4253 - "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4255 -#ifdef __LITTLE_ENDIAN
4256 - .digest = "\xe3\x9c",
4258 - .digest = "\x9c\xe3",
4262 + .plaintext = "abc",
4264 + .digest = (u8 *)(u16 []){ 0x443b },
4266 + .plaintext = "1234567890123456789012345678901234567890"
4267 + "123456789012345678901234567890123456789",
4269 + .digest = (u8 *)(u16 []){ 0x4b70 },
4271 + .tap = { 63, 16 },
4273 + .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
4276 + .digest = (u8 *)(u16 []){ 0x9ce3 },
4278 + .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
4280 + .plaintext = "1234567890123456789012345678901234567890"
4281 + "1234567890123456789012345678901234567890"
4282 + "1234567890123456789012345678901234567890"
4283 + "1234567890123456789012345678901234567890"
4284 + "1234567890123456789012345678901234567890"
4285 + "1234567890123456789012345678901234567890"
4286 + "1234567890123456789012345678901234567890"
4287 + "123456789012345678901234567890123456789",
4289 + .digest = (u8 *)(u16 []){ 0x44c6 },
4291 + .plaintext = "1234567890123456789012345678901234567890"
4292 + "1234567890123456789012345678901234567890"
4293 + "1234567890123456789012345678901234567890"
4294 + "1234567890123456789012345678901234567890"
4295 + "1234567890123456789012345678901234567890"
4296 + "1234567890123456789012345678901234567890"
4297 + "1234567890123456789012345678901234567890"
4298 + "123456789012345678901234567890123456789",
4300 + .digest = (u8 *)(u16 []){ 0x44c6 },
4302 + .tap = { 1, 255, 57, 6 },
4306 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_
4307 * SHA1 test vectors from from FIPS PUB 180-1
4308 * Long vector from CAVS 5.0
4310 -#define SHA1_TEST_VECTORS 6
4312 -static struct hash_testvec sha1_tv_template[] = {
4313 +static const struct hash_testvec sha1_tv_template[] = {
4317 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_templ
4319 * SHA224 test vectors from from FIPS PUB 180-2
4321 -#define SHA224_TEST_VECTORS 5
4323 -static struct hash_testvec sha224_tv_template[] = {
4324 +static const struct hash_testvec sha224_tv_template[] = {
4328 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_tem
4330 * SHA256 test vectors from from NIST
4332 -#define SHA256_TEST_VECTORS 5
4334 -static struct hash_testvec sha256_tv_template[] = {
4335 +static const struct hash_testvec sha256_tv_template[] = {
4339 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_tem
4341 * SHA384 test vectors from from NIST and kerneli
4343 -#define SHA384_TEST_VECTORS 6
4345 -static struct hash_testvec sha384_tv_template[] = {
4346 +static const struct hash_testvec sha384_tv_template[] = {
4350 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_tem
4352 * SHA512 test vectors from from NIST and kerneli
4354 -#define SHA512_TEST_VECTORS 6
4356 -static struct hash_testvec sha512_tv_template[] = {
4357 +static const struct hash_testvec sha512_tv_template[] = {
4361 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_tem
4362 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4365 -#define WP512_TEST_VECTORS 8
4367 -static struct hash_testvec wp512_tv_template[] = {
4368 +static const struct hash_testvec wp512_tv_template[] = {
4372 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_temp
4376 -#define WP384_TEST_VECTORS 8
4378 -static struct hash_testvec wp384_tv_template[] = {
4379 +static const struct hash_testvec wp384_tv_template[] = {
4383 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_temp
4387 -#define WP256_TEST_VECTORS 8
4389 -static struct hash_testvec wp256_tv_template[] = {
4390 +static const struct hash_testvec wp256_tv_template[] = {
4394 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_temp
4396 * TIGER test vectors from Tiger website
4398 -#define TGR192_TEST_VECTORS 6
4400 -static struct hash_testvec tgr192_tv_template[] = {
4401 +static const struct hash_testvec tgr192_tv_template[] = {
4405 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_tem
4409 -#define TGR160_TEST_VECTORS 6
4411 -static struct hash_testvec tgr160_tv_template[] = {
4412 +static const struct hash_testvec tgr160_tv_template[] = {
4416 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_tem
4420 -#define TGR128_TEST_VECTORS 6
4422 -static struct hash_testvec tgr128_tv_template[] = {
4423 +static const struct hash_testvec tgr128_tv_template[] = {
4427 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_tem
4431 -#define GHASH_TEST_VECTORS 6
4433 -static struct hash_testvec ghash_tv_template[] =
4434 +static const struct hash_testvec ghash_tv_template[] =
4437 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4438 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_temp
4439 * HMAC-MD5 test vectors from RFC2202
4440 * (These need to be fixed to not use strlen).
4442 -#define HMAC_MD5_TEST_VECTORS 7
4444 -static struct hash_testvec hmac_md5_tv_template[] =
4445 +static const struct hash_testvec hmac_md5_tv_template[] =
4448 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4449 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_t
4451 * HMAC-RIPEMD128 test vectors from RFC2286
4453 -#define HMAC_RMD128_TEST_VECTORS 7
4455 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4456 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4458 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4460 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_t
4462 * HMAC-RIPEMD160 test vectors from RFC2286
4464 -#define HMAC_RMD160_TEST_VECTORS 7
4466 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4467 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4469 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4471 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_t
4473 * HMAC-SHA1 test vectors from RFC2202
4475 -#define HMAC_SHA1_TEST_VECTORS 7
4477 -static struct hash_testvec hmac_sha1_tv_template[] = {
4478 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4480 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4482 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_
4484 * SHA224 HMAC test vectors from RFC4231
4486 -#define HMAC_SHA224_TEST_VECTORS 4
4488 -static struct hash_testvec hmac_sha224_tv_template[] = {
4489 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4491 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4492 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4493 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_t
4494 * HMAC-SHA256 test vectors from
4495 * draft-ietf-ipsec-ciph-sha-256-01.txt
4497 -#define HMAC_SHA256_TEST_VECTORS 10
4499 -static struct hash_testvec hmac_sha256_tv_template[] = {
4500 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4502 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
4503 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4504 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_t
4508 -#define CMAC_AES_TEST_VECTORS 6
4510 -static struct hash_testvec aes_cmac128_tv_template[] = {
4511 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4512 { /* From NIST Special Publication 800-38B, AES-128 */
4513 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4514 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4515 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_t
4519 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4520 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4522 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4523 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4524 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4525 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4526 + .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4527 + "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4531 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4532 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4533 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4534 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4535 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4536 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4538 + .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4539 + "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4545 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4546 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4547 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4548 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4549 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4550 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4551 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4552 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4553 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4554 + "\xad\x2b\x41\x7b\xe6\x6c\x37",
4555 + .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4556 + "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4560 + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4561 + "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4562 + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4563 + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4564 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4565 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4566 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4567 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4568 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4569 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4570 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4571 + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4573 + .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4574 + "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4580 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4581 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4583 * From NIST Special Publication 800-38B, Three Key TDEA
4584 * Corrected test vectors from:
4585 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac
4589 -#define XCBC_AES_TEST_VECTORS 6
4591 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4592 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4594 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4595 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4596 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_t
4600 -#define VMAC_AES_TEST_VECTORS 11
4601 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4602 - '\x02', '\x03', '\x02', '\x02',
4603 - '\x02', '\x04', '\x01', '\x07',
4604 - '\x04', '\x01', '\x04', '\x03',};
4605 -static char vmac_string2[128] = {'a', 'b', 'c',};
4606 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4607 - 'a', 'b', 'c', 'a', 'b', 'c',
4608 - 'a', 'b', 'c', 'a', 'b', 'c',
4609 - 'a', 'b', 'c', 'a', 'b', 'c',
4610 - 'a', 'b', 'c', 'a', 'b', 'c',
4611 - 'a', 'b', 'c', 'a', 'b', 'c',
4612 - 'a', 'b', 'c', 'a', 'b', 'c',
4613 - 'a', 'b', 'c', 'a', 'b', 'c',
4616 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4617 - 'i', 'j', 'l', 'm',
4618 - 'o', 'p', 'r', 's',
4619 - 't', 'u', 'w', 'x', 'z'};
4621 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4622 - 'o', 'l', 'k', ']', '%',
4623 - '9', '2', '7', '!', 'A'};
4625 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4626 - 'i', '!', '#', 'w', '0',
4627 - 'z', '/', '4', 'A', 'n'};
4628 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4629 + '\x02', '\x03', '\x02', '\x02',
4630 + '\x02', '\x04', '\x01', '\x07',
4631 + '\x04', '\x01', '\x04', '\x03',};
4632 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4633 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4634 + 'a', 'b', 'c', 'a', 'b', 'c',
4635 + 'a', 'b', 'c', 'a', 'b', 'c',
4636 + 'a', 'b', 'c', 'a', 'b', 'c',
4637 + 'a', 'b', 'c', 'a', 'b', 'c',
4638 + 'a', 'b', 'c', 'a', 'b', 'c',
4639 + 'a', 'b', 'c', 'a', 'b', 'c',
4640 + 'a', 'b', 'c', 'a', 'b', 'c',
4643 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4644 + 'i', 'j', 'l', 'm',
4645 + 'o', 'p', 'r', 's',
4646 + 't', 'u', 'w', 'x', 'z'};
4648 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4649 + 'o', 'l', 'k', ']', '%',
4650 + '9', '2', '7', '!', 'A'};
4652 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4653 + 'i', '!', '#', 'w', '0',
4654 + 'z', '/', '4', 'A', 'n'};
4656 -static struct hash_testvec aes_vmac128_tv_template[] = {
4657 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4659 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4660 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4661 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_t
4662 * SHA384 HMAC test vectors from RFC4231
4665 -#define HMAC_SHA384_TEST_VECTORS 4
4667 -static struct hash_testvec hmac_sha384_tv_template[] = {
4668 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4670 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4671 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4672 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_t
4673 * SHA512 HMAC test vectors from RFC4231
4676 -#define HMAC_SHA512_TEST_VECTORS 4
4678 -static struct hash_testvec hmac_sha512_tv_template[] = {
4679 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4681 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4682 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4683 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_t
4687 -#define HMAC_SHA3_224_TEST_VECTORS 4
4689 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4690 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4692 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4693 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4694 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224
4698 -#define HMAC_SHA3_256_TEST_VECTORS 4
4700 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4701 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4703 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4704 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4705 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256
4709 -#define HMAC_SHA3_384_TEST_VECTORS 4
4711 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4712 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4714 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4715 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4716 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384
4720 -#define HMAC_SHA3_512_TEST_VECTORS 4
4722 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4723 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4725 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4726 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4727 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512
4728 * Poly1305 test vectors from RFC7539 A.3.
4731 -#define POLY1305_TEST_VECTORS 11
4733 -static struct hash_testvec poly1305_tv_template[] = {
4734 +static const struct hash_testvec poly1305_tv_template[] = {
4735 { /* Test Vector #1 */
4736 .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
4737 "\x00\x00\x00\x00\x00\x00\x00\x00"
4738 @@ -4575,20 +4784,7 @@ static struct hash_testvec poly1305_tv_t
4742 -#define DES_ENC_TEST_VECTORS 11
4743 -#define DES_DEC_TEST_VECTORS 5
4744 -#define DES_CBC_ENC_TEST_VECTORS 6
4745 -#define DES_CBC_DEC_TEST_VECTORS 5
4746 -#define DES_CTR_ENC_TEST_VECTORS 2
4747 -#define DES_CTR_DEC_TEST_VECTORS 2
4748 -#define DES3_EDE_ENC_TEST_VECTORS 4
4749 -#define DES3_EDE_DEC_TEST_VECTORS 4
4750 -#define DES3_EDE_CBC_ENC_TEST_VECTORS 2
4751 -#define DES3_EDE_CBC_DEC_TEST_VECTORS 2
4752 -#define DES3_EDE_CTR_ENC_TEST_VECTORS 2
4753 -#define DES3_EDE_CTR_DEC_TEST_VECTORS 2
4755 -static struct cipher_testvec des_enc_tv_template[] = {
4756 +static const struct cipher_testvec des_enc_tv_template[] = {
4757 { /* From Applied Cryptography */
4758 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4760 @@ -4762,7 +4958,7 @@ static struct cipher_testvec des_enc_tv_
4764 -static struct cipher_testvec des_dec_tv_template[] = {
4765 +static const struct cipher_testvec des_dec_tv_template[] = {
4766 { /* From Applied Cryptography */
4767 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4769 @@ -4872,7 +5068,7 @@ static struct cipher_testvec des_dec_tv_
4773 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4774 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4775 { /* From OpenSSL */
4776 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4778 @@ -4998,7 +5194,7 @@ static struct cipher_testvec des_cbc_enc
4782 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4783 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4785 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4787 @@ -5107,7 +5303,7 @@ static struct cipher_testvec des_cbc_dec
4791 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4792 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4793 { /* Generated with Crypto++ */
4794 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4796 @@ -5253,7 +5449,7 @@ static struct cipher_testvec des_ctr_enc
4800 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4801 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4802 { /* Generated with Crypto++ */
4803 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4805 @@ -5399,7 +5595,7 @@ static struct cipher_testvec des_ctr_dec
4809 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4810 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4811 { /* These are from openssl */
4812 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4813 "\x55\x55\x55\x55\x55\x55\x55\x55"
4814 @@ -5564,7 +5760,7 @@ static struct cipher_testvec des3_ede_en
4818 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4819 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4820 { /* These are from openssl */
4821 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4822 "\x55\x55\x55\x55\x55\x55\x55\x55"
4823 @@ -5729,7 +5925,7 @@ static struct cipher_testvec des3_ede_de
4827 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4828 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4829 { /* Generated from openssl */
4830 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4831 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4832 @@ -5909,7 +6105,7 @@ static struct cipher_testvec des3_ede_cb
4836 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4837 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4838 { /* Generated from openssl */
4839 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4840 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4841 @@ -6089,7 +6285,7 @@ static struct cipher_testvec des3_ede_cb
4845 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4846 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4847 { /* Generated with Crypto++ */
4848 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4849 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4850 @@ -6367,7 +6563,7 @@ static struct cipher_testvec des3_ede_ct
4854 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4855 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4856 { /* Generated with Crypto++ */
4857 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4858 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4859 @@ -6648,14 +6844,7 @@ static struct cipher_testvec des3_ede_ct
4861 * Blowfish test vectors.
4863 -#define BF_ENC_TEST_VECTORS 7
4864 -#define BF_DEC_TEST_VECTORS 7
4865 -#define BF_CBC_ENC_TEST_VECTORS 2
4866 -#define BF_CBC_DEC_TEST_VECTORS 2
4867 -#define BF_CTR_ENC_TEST_VECTORS 2
4868 -#define BF_CTR_DEC_TEST_VECTORS 2
4870 -static struct cipher_testvec bf_enc_tv_template[] = {
4871 +static const struct cipher_testvec bf_enc_tv_template[] = {
4872 { /* DES test vectors from OpenSSL */
4873 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4875 @@ -6847,7 +7036,7 @@ static struct cipher_testvec bf_enc_tv_t
4879 -static struct cipher_testvec bf_dec_tv_template[] = {
4880 +static const struct cipher_testvec bf_dec_tv_template[] = {
4881 { /* DES test vectors from OpenSSL */
4882 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4884 @@ -7039,7 +7228,7 @@ static struct cipher_testvec bf_dec_tv_t
4888 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4889 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4890 { /* From OpenSSL */
4891 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4892 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4893 @@ -7196,7 +7385,7 @@ static struct cipher_testvec bf_cbc_enc_
4897 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4898 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4899 { /* From OpenSSL */
4900 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4901 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4902 @@ -7353,7 +7542,7 @@ static struct cipher_testvec bf_cbc_dec_
4906 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4907 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4908 { /* Generated with Crypto++ */
4909 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4910 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4911 @@ -7765,7 +7954,7 @@ static struct cipher_testvec bf_ctr_enc_
4915 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4916 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4917 { /* Generated with Crypto++ */
4918 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4919 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4920 @@ -8180,18 +8369,7 @@ static struct cipher_testvec bf_ctr_dec_
4922 * Twofish test vectors.
4924 -#define TF_ENC_TEST_VECTORS 4
4925 -#define TF_DEC_TEST_VECTORS 4
4926 -#define TF_CBC_ENC_TEST_VECTORS 5
4927 -#define TF_CBC_DEC_TEST_VECTORS 5
4928 -#define TF_CTR_ENC_TEST_VECTORS 2
4929 -#define TF_CTR_DEC_TEST_VECTORS 2
4930 -#define TF_LRW_ENC_TEST_VECTORS 8
4931 -#define TF_LRW_DEC_TEST_VECTORS 8
4932 -#define TF_XTS_ENC_TEST_VECTORS 5
4933 -#define TF_XTS_DEC_TEST_VECTORS 5
4935 -static struct cipher_testvec tf_enc_tv_template[] = {
4936 +static const struct cipher_testvec tf_enc_tv_template[] = {
4938 .key = zeroed_string,
4940 @@ -8359,7 +8537,7 @@ static struct cipher_testvec tf_enc_tv_t
4944 -static struct cipher_testvec tf_dec_tv_template[] = {
4945 +static const struct cipher_testvec tf_dec_tv_template[] = {
4947 .key = zeroed_string,
4949 @@ -8527,7 +8705,7 @@ static struct cipher_testvec tf_dec_tv_t
4953 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4954 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4955 { /* Generated with Nettle */
4956 .key = zeroed_string,
4958 @@ -8710,7 +8888,7 @@ static struct cipher_testvec tf_cbc_enc_
4962 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4963 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4964 { /* Reverse of the first four above */
4965 .key = zeroed_string,
4967 @@ -8893,7 +9071,7 @@ static struct cipher_testvec tf_cbc_dec_
4971 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4972 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4973 { /* Generated with Crypto++ */
4974 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4975 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4976 @@ -9304,7 +9482,7 @@ static struct cipher_testvec tf_ctr_enc_
4980 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4981 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4982 { /* Generated with Crypto++ */
4983 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4984 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4985 @@ -9715,7 +9893,7 @@ static struct cipher_testvec tf_ctr_dec_
4989 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4990 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4991 /* Generated from AES-LRW test vectors */
4993 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4994 @@ -9967,7 +10145,7 @@ static struct cipher_testvec tf_lrw_enc_
4998 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4999 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
5000 /* Generated from AES-LRW test vectors */
5001 /* same as enc vectors with input and result reversed */
5003 @@ -10220,7 +10398,7 @@ static struct cipher_testvec tf_lrw_dec_
5007 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
5008 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
5009 /* Generated from AES-XTS test vectors */
5011 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5012 @@ -10562,7 +10740,7 @@ static struct cipher_testvec tf_xts_enc_
5016 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
5017 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
5018 /* Generated from AES-XTS test vectors */
5019 /* same as enc vectors with input and result reversed */
5021 @@ -10909,25 +11087,7 @@ static struct cipher_testvec tf_xts_dec_
5022 * Serpent test vectors. These are backwards because Serpent writes
5023 * octet sequences in right-to-left mode.
5025 -#define SERPENT_ENC_TEST_VECTORS 5
5026 -#define SERPENT_DEC_TEST_VECTORS 5
5028 -#define TNEPRES_ENC_TEST_VECTORS 4
5029 -#define TNEPRES_DEC_TEST_VECTORS 4
5031 -#define SERPENT_CBC_ENC_TEST_VECTORS 1
5032 -#define SERPENT_CBC_DEC_TEST_VECTORS 1
5034 -#define SERPENT_CTR_ENC_TEST_VECTORS 2
5035 -#define SERPENT_CTR_DEC_TEST_VECTORS 2
5037 -#define SERPENT_LRW_ENC_TEST_VECTORS 8
5038 -#define SERPENT_LRW_DEC_TEST_VECTORS 8
5040 -#define SERPENT_XTS_ENC_TEST_VECTORS 5
5041 -#define SERPENT_XTS_DEC_TEST_VECTORS 5
5043 -static struct cipher_testvec serpent_enc_tv_template[] = {
5044 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5046 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
5047 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5048 @@ -11103,7 +11263,7 @@ static struct cipher_testvec serpent_enc
5052 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5053 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5054 { /* KeySize=128, PT=0, I=1 */
5055 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5056 "\x00\x00\x00\x00\x00\x00\x00\x00",
5057 @@ -11153,7 +11313,7 @@ static struct cipher_testvec tnepres_enc
5061 -static struct cipher_testvec serpent_dec_tv_template[] = {
5062 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5064 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5065 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5066 @@ -11329,7 +11489,7 @@ static struct cipher_testvec serpent_dec
5070 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5071 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5073 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5074 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5075 @@ -11370,7 +11530,7 @@ static struct cipher_testvec tnepres_dec
5079 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5080 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5081 { /* Generated with Crypto++ */
5082 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5083 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5084 @@ -11511,7 +11671,7 @@ static struct cipher_testvec serpent_cbc
5088 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5089 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5090 { /* Generated with Crypto++ */
5091 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5092 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5093 @@ -11652,7 +11812,7 @@ static struct cipher_testvec serpent_cbc
5097 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5098 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5099 { /* Generated with Crypto++ */
5100 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5101 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5102 @@ -12063,7 +12223,7 @@ static struct cipher_testvec serpent_ctr
5106 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5107 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5108 { /* Generated with Crypto++ */
5109 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5110 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5111 @@ -12474,7 +12634,7 @@ static struct cipher_testvec serpent_ctr
5115 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5116 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5117 /* Generated from AES-LRW test vectors */
5119 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5120 @@ -12726,7 +12886,7 @@ static struct cipher_testvec serpent_lrw
5124 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5125 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5126 /* Generated from AES-LRW test vectors */
5127 /* same as enc vectors with input and result reversed */
5129 @@ -12979,7 +13139,7 @@ static struct cipher_testvec serpent_lrw
5133 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5134 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5135 /* Generated from AES-XTS test vectors */
5137 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5138 @@ -13321,7 +13481,7 @@ static struct cipher_testvec serpent_xts
5142 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5143 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5144 /* Generated from AES-XTS test vectors */
5145 /* same as enc vectors with input and result reversed */
5147 @@ -13665,18 +13825,7 @@ static struct cipher_testvec serpent_xts
5150 /* Cast6 test vectors from RFC 2612 */
5151 -#define CAST6_ENC_TEST_VECTORS 4
5152 -#define CAST6_DEC_TEST_VECTORS 4
5153 -#define CAST6_CBC_ENC_TEST_VECTORS 1
5154 -#define CAST6_CBC_DEC_TEST_VECTORS 1
5155 -#define CAST6_CTR_ENC_TEST_VECTORS 2
5156 -#define CAST6_CTR_DEC_TEST_VECTORS 2
5157 -#define CAST6_LRW_ENC_TEST_VECTORS 1
5158 -#define CAST6_LRW_DEC_TEST_VECTORS 1
5159 -#define CAST6_XTS_ENC_TEST_VECTORS 1
5160 -#define CAST6_XTS_DEC_TEST_VECTORS 1
5162 -static struct cipher_testvec cast6_enc_tv_template[] = {
5163 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5165 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5166 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5167 @@ -13847,7 +13996,7 @@ static struct cipher_testvec cast6_enc_t
5171 -static struct cipher_testvec cast6_dec_tv_template[] = {
5172 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5174 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5175 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5176 @@ -14018,7 +14167,7 @@ static struct cipher_testvec cast6_dec_t
5180 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5181 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5182 { /* Generated from TF test vectors */
5183 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5184 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5185 @@ -14159,7 +14308,7 @@ static struct cipher_testvec cast6_cbc_e
5189 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5190 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5191 { /* Generated from TF test vectors */
5192 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5193 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5194 @@ -14300,7 +14449,7 @@ static struct cipher_testvec cast6_cbc_d
5198 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5199 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5200 { /* Generated from TF test vectors */
5201 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5202 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5203 @@ -14457,7 +14606,7 @@ static struct cipher_testvec cast6_ctr_e
5207 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5208 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5209 { /* Generated from TF test vectors */
5210 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5211 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5212 @@ -14614,7 +14763,7 @@ static struct cipher_testvec cast6_ctr_d
5216 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5217 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5218 { /* Generated from TF test vectors */
5219 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5220 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5221 @@ -14761,7 +14910,7 @@ static struct cipher_testvec cast6_lrw_e
5225 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5226 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5227 { /* Generated from TF test vectors */
5228 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5229 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5230 @@ -14908,7 +15057,7 @@ static struct cipher_testvec cast6_lrw_d
5234 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5235 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5236 { /* Generated from TF test vectors */
5237 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5238 "\x23\x53\x60\x28\x74\x71\x35\x26"
5239 @@ -15057,7 +15206,7 @@ static struct cipher_testvec cast6_xts_e
5243 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5244 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5245 { /* Generated from TF test vectors */
5246 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5247 "\x23\x53\x60\x28\x74\x71\x35\x26"
5248 @@ -15210,39 +15359,7 @@ static struct cipher_testvec cast6_xts_d
5252 -#define AES_ENC_TEST_VECTORS 4
5253 -#define AES_DEC_TEST_VECTORS 4
5254 -#define AES_CBC_ENC_TEST_VECTORS 5
5255 -#define AES_CBC_DEC_TEST_VECTORS 5
5256 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5257 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5258 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5259 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5260 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5261 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5262 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5263 -#define AES_LRW_ENC_TEST_VECTORS 8
5264 -#define AES_LRW_DEC_TEST_VECTORS 8
5265 -#define AES_XTS_ENC_TEST_VECTORS 5
5266 -#define AES_XTS_DEC_TEST_VECTORS 5
5267 -#define AES_CTR_ENC_TEST_VECTORS 5
5268 -#define AES_CTR_DEC_TEST_VECTORS 5
5269 -#define AES_OFB_ENC_TEST_VECTORS 1
5270 -#define AES_OFB_DEC_TEST_VECTORS 1
5271 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5272 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5273 -#define AES_GCM_ENC_TEST_VECTORS 9
5274 -#define AES_GCM_DEC_TEST_VECTORS 8
5275 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5276 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5277 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5278 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5279 -#define AES_CCM_ENC_TEST_VECTORS 8
5280 -#define AES_CCM_DEC_TEST_VECTORS 7
5281 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5282 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5284 -static struct cipher_testvec aes_enc_tv_template[] = {
5285 +static const struct cipher_testvec aes_enc_tv_template[] = {
5286 { /* From FIPS-197 */
5287 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5288 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5289 @@ -15414,7 +15531,7 @@ static struct cipher_testvec aes_enc_tv_
5293 -static struct cipher_testvec aes_dec_tv_template[] = {
5294 +static const struct cipher_testvec aes_dec_tv_template[] = {
5295 { /* From FIPS-197 */
5296 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5297 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5298 @@ -15586,7 +15703,7 @@ static struct cipher_testvec aes_dec_tv_
5302 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5303 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5304 { /* From RFC 3602 */
5305 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5306 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5307 @@ -15808,7 +15925,7 @@ static struct cipher_testvec aes_cbc_enc
5311 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5312 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5313 { /* From RFC 3602 */
5314 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5315 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5316 @@ -16030,7 +16147,7 @@ static struct cipher_testvec aes_cbc_dec
5320 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5321 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5322 { /* Input data from RFC 2410 Case 1 */
5323 #ifdef __LITTLE_ENDIAN
5324 .key = "\x08\x00" /* rta length */
5325 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_
5329 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5330 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5332 #ifdef __LITTLE_ENDIAN
5333 .key = "\x08\x00" /* rta length */
5334 @@ -16114,7 +16231,7 @@ static struct aead_testvec hmac_md5_ecb_
5338 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5339 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5340 { /* RFC 3602 Case 1 */
5341 #ifdef __LITTLE_ENDIAN
5342 .key = "\x08\x00" /* rta length */
5343 @@ -16383,7 +16500,7 @@ static struct aead_testvec hmac_sha1_aes
5347 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5348 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5349 { /* Input data from RFC 2410 Case 1 */
5350 #ifdef __LITTLE_ENDIAN
5351 .key = "\x08\x00" /* rta length */
5352 @@ -16429,7 +16546,7 @@ static struct aead_testvec hmac_sha1_ecb
5356 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5357 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5359 #ifdef __LITTLE_ENDIAN
5360 .key = "\x08\x00" /* rta length */
5361 @@ -16475,7 +16592,7 @@ static struct aead_testvec hmac_sha1_ecb
5365 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5366 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5367 { /* RFC 3602 Case 1 */
5368 #ifdef __LITTLE_ENDIAN
5369 .key = "\x08\x00" /* rta length */
5370 @@ -16758,7 +16875,7 @@ static struct aead_testvec hmac_sha256_a
5374 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5375 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5376 { /* RFC 3602 Case 1 */
5377 #ifdef __LITTLE_ENDIAN
5378 .key = "\x08\x00" /* rta length */
5379 @@ -17097,9 +17214,7 @@ static struct aead_testvec hmac_sha512_a
5383 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5385 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5386 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5387 { /*Generated with cryptopp*/
5388 #ifdef __LITTLE_ENDIAN
5389 .key = "\x08\x00" /* rta length */
5390 @@ -17158,9 +17273,7 @@ static struct aead_testvec hmac_sha1_des
5394 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1
5396 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5397 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5398 { /*Generated with cryptopp*/
5399 #ifdef __LITTLE_ENDIAN
5400 .key = "\x08\x00" /* rta length */
5401 @@ -17219,9 +17332,7 @@ static struct aead_testvec hmac_sha224_d
5405 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1
5407 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5408 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5409 { /*Generated with cryptopp*/
5410 #ifdef __LITTLE_ENDIAN
5411 .key = "\x08\x00" /* rta length */
5412 @@ -17282,9 +17393,7 @@ static struct aead_testvec hmac_sha256_d
5416 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1
5418 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5419 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5420 { /*Generated with cryptopp*/
5421 #ifdef __LITTLE_ENDIAN
5422 .key = "\x08\x00" /* rta length */
5423 @@ -17349,9 +17458,7 @@ static struct aead_testvec hmac_sha384_d
5427 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1
5429 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5430 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5431 { /*Generated with cryptopp*/
5432 #ifdef __LITTLE_ENDIAN
5433 .key = "\x08\x00" /* rta length */
5434 @@ -17420,9 +17527,7 @@ static struct aead_testvec hmac_sha512_d
5438 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1
5440 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5441 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5442 { /*Generated with cryptopp*/
5443 #ifdef __LITTLE_ENDIAN
5444 .key = "\x08\x00" /* rta length */
5445 @@ -17483,9 +17588,7 @@ static struct aead_testvec hmac_sha1_des
5449 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1
5451 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5452 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5453 { /*Generated with cryptopp*/
5454 #ifdef __LITTLE_ENDIAN
5455 .key = "\x08\x00" /* rta length */
5456 @@ -17546,9 +17649,7 @@ static struct aead_testvec hmac_sha224_d
5460 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1
5462 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5463 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5464 { /*Generated with cryptopp*/
5465 #ifdef __LITTLE_ENDIAN
5466 .key = "\x08\x00" /* rta length */
5467 @@ -17611,9 +17712,7 @@ static struct aead_testvec hmac_sha256_d
5471 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1
5473 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5474 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5475 { /*Generated with cryptopp*/
5476 #ifdef __LITTLE_ENDIAN
5477 .key = "\x08\x00" /* rta length */
5478 @@ -17680,9 +17779,7 @@ static struct aead_testvec hmac_sha384_d
5482 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1
5484 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5485 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5486 { /*Generated with cryptopp*/
5487 #ifdef __LITTLE_ENDIAN
5488 .key = "\x08\x00" /* rta length */
5489 @@ -17753,7 +17850,7 @@ static struct aead_testvec hmac_sha512_d
5493 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5494 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5495 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5496 { /* LRW-32-AES 1 */
5497 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5498 @@ -18006,7 +18103,7 @@ static struct cipher_testvec aes_lrw_enc
5502 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5503 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5504 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5505 /* same as enc vectors with input and result reversed */
5506 { /* LRW-32-AES 1 */
5507 @@ -18260,7 +18357,7 @@ static struct cipher_testvec aes_lrw_dec
5511 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5512 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5513 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5515 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5516 @@ -18603,7 +18700,7 @@ static struct cipher_testvec aes_xts_enc
5520 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5521 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5522 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5524 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5525 @@ -18947,7 +19044,7 @@ static struct cipher_testvec aes_xts_dec
5529 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5530 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5531 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5532 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5533 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5534 @@ -19302,7 +19399,7 @@ static struct cipher_testvec aes_ctr_enc
5538 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5539 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5540 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5541 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5542 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5543 @@ -19657,7 +19754,7 @@ static struct cipher_testvec aes_ctr_dec
5547 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5548 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5549 { /* From RFC 3686 */
5550 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5551 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5552 @@ -20789,7 +20886,7 @@ static struct cipher_testvec aes_ctr_rfc
5556 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5557 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5558 { /* From RFC 3686 */
5559 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5560 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5561 @@ -20880,7 +20977,7 @@ static struct cipher_testvec aes_ctr_rfc
5565 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5566 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5567 /* From NIST Special Publication 800-38A, Appendix F.5 */
5569 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5570 @@ -20909,7 +21006,7 @@ static struct cipher_testvec aes_ofb_enc
5574 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5575 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5576 /* From NIST Special Publication 800-38A, Appendix F.5 */
5578 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5579 @@ -20938,7 +21035,7 @@ static struct cipher_testvec aes_ofb_dec
5583 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5584 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5585 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5586 .key = zeroed_string,
5588 @@ -21098,7 +21195,7 @@ static struct aead_testvec aes_gcm_enc_t
5592 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5593 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5594 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5595 .key = zeroed_string,
5597 @@ -21300,7 +21397,7 @@ static struct aead_testvec aes_gcm_dec_t
5601 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5602 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5603 { /* Generated using Crypto++ */
5604 .key = zeroed_string,
5606 @@ -21913,7 +22010,7 @@ static struct aead_testvec aes_gcm_rfc41
5610 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5611 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5612 { /* Generated using Crypto++ */
5613 .key = zeroed_string,
5615 @@ -22527,7 +22624,7 @@ static struct aead_testvec aes_gcm_rfc41
5619 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5620 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5621 { /* From draft-mcgrew-gcm-test-01 */
5622 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5623 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5624 @@ -22558,7 +22655,7 @@ static struct aead_testvec aes_gcm_rfc45
5628 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5629 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5630 { /* From draft-mcgrew-gcm-test-01 */
5631 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5632 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5633 @@ -22617,7 +22714,7 @@ static struct aead_testvec aes_gcm_rfc45
5637 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5638 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5639 { /* From RFC 3610 */
5640 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5641 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5642 @@ -22901,7 +22998,7 @@ static struct aead_testvec aes_ccm_enc_t
5646 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5647 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5648 { /* From RFC 3610 */
5649 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5650 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5651 @@ -23233,7 +23330,7 @@ static struct aead_testvec aes_ccm_dec_t
5652 * These vectors are copied/generated from the ones for rfc4106 with
5653 * the key truncated by one byte..
5655 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5656 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5657 { /* Generated using Crypto++ */
5658 .key = zeroed_string,
5660 @@ -23846,7 +23943,7 @@ static struct aead_testvec aes_ccm_rfc43
5664 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5665 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5666 { /* Generated using Crypto++ */
5667 .key = zeroed_string,
5669 @@ -24462,9 +24559,7 @@ static struct aead_testvec aes_ccm_rfc43
5671 * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5673 -#define RFC7539_ENC_TEST_VECTORS 2
5674 -#define RFC7539_DEC_TEST_VECTORS 2
5675 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5676 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5678 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5679 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5680 @@ -24596,7 +24691,7 @@ static struct aead_testvec rfc7539_enc_t
5684 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5685 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5687 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5688 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5689 @@ -24731,9 +24826,7 @@ static struct aead_testvec rfc7539_dec_t
5691 * draft-irtf-cfrg-chacha20-poly1305
5693 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5694 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5695 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5696 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5698 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5699 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5700 @@ -24821,7 +24914,7 @@ static struct aead_testvec rfc7539esp_en
5704 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5705 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5707 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5708 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5709 @@ -24917,7 +25010,7 @@ static struct aead_testvec rfc7539esp_de
5710 * semiblock of the ciphertext from the test vector. For decryption, iv is
5711 * the first semiblock of the ciphertext.
5713 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5714 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5716 .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5717 "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5718 @@ -24932,7 +25025,7 @@ static struct cipher_testvec aes_kw_enc_
5722 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5723 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5725 .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5726 "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5727 @@ -24955,9 +25048,7 @@ static struct cipher_testvec aes_kw_dec_
5728 * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5729 * Only AES-128 is supported at this time.
5731 -#define ANSI_CPRNG_AES_TEST_VECTORS 6
5733 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5734 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5736 .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5737 "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5738 @@ -25053,7 +25144,7 @@ static struct cprng_testvec ansi_cprng_a
5739 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5740 * w/o personalization string, w/ and w/o additional input string).
5742 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5743 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5745 .entropy = (unsigned char *)
5746 "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5747 @@ -25211,7 +25302,7 @@ static struct drbg_testvec drbg_pr_sha25
5751 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5752 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5754 .entropy = (unsigned char *)
5755 "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5756 @@ -25369,7 +25460,7 @@ static struct drbg_testvec drbg_pr_hmac_
5760 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5761 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5763 .entropy = (unsigned char *)
5764 "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5765 @@ -25493,7 +25584,7 @@ static struct drbg_testvec drbg_pr_ctr_a
5766 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5767 * w/o personalization string, w/ and w/o additional input string).
5769 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5770 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5772 .entropy = (unsigned char *)
5773 "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5774 @@ -25615,7 +25706,7 @@ static struct drbg_testvec drbg_nopr_sha
5778 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5779 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5781 .entropy = (unsigned char *)
5782 "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5783 @@ -25737,7 +25828,7 @@ static struct drbg_testvec drbg_nopr_hma
5787 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5788 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5790 .entropy = (unsigned char *)
5791 "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5792 @@ -25761,7 +25852,7 @@ static struct drbg_testvec drbg_nopr_ctr
5796 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5797 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5799 .entropy = (unsigned char *)
5800 "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5801 @@ -25785,7 +25876,7 @@ static struct drbg_testvec drbg_nopr_ctr
5805 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5806 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5808 .entropy = (unsigned char *)
5809 "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5810 @@ -25874,14 +25965,7 @@ static struct drbg_testvec drbg_nopr_ctr
5813 /* Cast5 test vectors from RFC 2144 */
5814 -#define CAST5_ENC_TEST_VECTORS 4
5815 -#define CAST5_DEC_TEST_VECTORS 4
5816 -#define CAST5_CBC_ENC_TEST_VECTORS 1
5817 -#define CAST5_CBC_DEC_TEST_VECTORS 1
5818 -#define CAST5_CTR_ENC_TEST_VECTORS 2
5819 -#define CAST5_CTR_DEC_TEST_VECTORS 2
5821 -static struct cipher_testvec cast5_enc_tv_template[] = {
5822 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5824 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5825 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5826 @@ -26042,7 +26126,7 @@ static struct cipher_testvec cast5_enc_t
5830 -static struct cipher_testvec cast5_dec_tv_template[] = {
5831 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5833 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5834 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5835 @@ -26203,7 +26287,7 @@ static struct cipher_testvec cast5_dec_t
5839 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5840 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5841 { /* Generated from TF test vectors */
5842 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5843 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5844 @@ -26341,7 +26425,7 @@ static struct cipher_testvec cast5_cbc_e
5848 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5849 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5850 { /* Generated from TF test vectors */
5851 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5852 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5853 @@ -26479,7 +26563,7 @@ static struct cipher_testvec cast5_cbc_d
5857 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5858 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5859 { /* Generated from TF test vectors */
5860 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5861 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5862 @@ -26630,7 +26714,7 @@ static struct cipher_testvec cast5_ctr_e
5866 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5867 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5868 { /* Generated from TF test vectors */
5869 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5870 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5871 @@ -26784,10 +26868,7 @@ static struct cipher_testvec cast5_ctr_d
5873 * ARC4 test vectors from OpenSSL
5875 -#define ARC4_ENC_TEST_VECTORS 7
5876 -#define ARC4_DEC_TEST_VECTORS 7
5878 -static struct cipher_testvec arc4_enc_tv_template[] = {
5879 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5881 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5883 @@ -26853,7 +26934,7 @@ static struct cipher_testvec arc4_enc_tv
5887 -static struct cipher_testvec arc4_dec_tv_template[] = {
5888 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5890 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5892 @@ -26922,10 +27003,7 @@ static struct cipher_testvec arc4_dec_tv
5896 -#define TEA_ENC_TEST_VECTORS 4
5897 -#define TEA_DEC_TEST_VECTORS 4
5899 -static struct cipher_testvec tea_enc_tv_template[] = {
5900 +static const struct cipher_testvec tea_enc_tv_template[] = {
5902 .key = zeroed_string,
5904 @@ -26968,7 +27046,7 @@ static struct cipher_testvec tea_enc_tv_
5908 -static struct cipher_testvec tea_dec_tv_template[] = {
5909 +static const struct cipher_testvec tea_dec_tv_template[] = {
5911 .key = zeroed_string,
5913 @@ -27014,10 +27092,7 @@ static struct cipher_testvec tea_dec_tv_
5917 -#define XTEA_ENC_TEST_VECTORS 4
5918 -#define XTEA_DEC_TEST_VECTORS 4
5920 -static struct cipher_testvec xtea_enc_tv_template[] = {
5921 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5923 .key = zeroed_string,
5925 @@ -27060,7 +27135,7 @@ static struct cipher_testvec xtea_enc_tv
5929 -static struct cipher_testvec xtea_dec_tv_template[] = {
5930 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5932 .key = zeroed_string,
5934 @@ -27106,10 +27181,7 @@ static struct cipher_testvec xtea_dec_tv
5936 * KHAZAD test vectors.
5938 -#define KHAZAD_ENC_TEST_VECTORS 5
5939 -#define KHAZAD_DEC_TEST_VECTORS 5
5941 -static struct cipher_testvec khazad_enc_tv_template[] = {
5942 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5944 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5945 "\x00\x00\x00\x00\x00\x00\x00\x00",
5946 @@ -27155,7 +27227,7 @@ static struct cipher_testvec khazad_enc_
5950 -static struct cipher_testvec khazad_dec_tv_template[] = {
5951 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5953 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5954 "\x00\x00\x00\x00\x00\x00\x00\x00",
5955 @@ -27205,12 +27277,7 @@ static struct cipher_testvec khazad_dec_
5956 * Anubis test vectors.
5959 -#define ANUBIS_ENC_TEST_VECTORS 5
5960 -#define ANUBIS_DEC_TEST_VECTORS 5
5961 -#define ANUBIS_CBC_ENC_TEST_VECTORS 2
5962 -#define ANUBIS_CBC_DEC_TEST_VECTORS 2
5964 -static struct cipher_testvec anubis_enc_tv_template[] = {
5965 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5967 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5968 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5969 @@ -27273,7 +27340,7 @@ static struct cipher_testvec anubis_enc_
5973 -static struct cipher_testvec anubis_dec_tv_template[] = {
5974 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5976 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5977 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5978 @@ -27336,7 +27403,7 @@ static struct cipher_testvec anubis_dec_
5982 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5983 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5985 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5986 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5987 @@ -27371,7 +27438,7 @@ static struct cipher_testvec anubis_cbc_
5991 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5992 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5994 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5995 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5996 @@ -27409,10 +27476,7 @@ static struct cipher_testvec anubis_cbc_
6000 -#define XETA_ENC_TEST_VECTORS 4
6001 -#define XETA_DEC_TEST_VECTORS 4
6003 -static struct cipher_testvec xeta_enc_tv_template[] = {
6004 +static const struct cipher_testvec xeta_enc_tv_template[] = {
6006 .key = zeroed_string,
6008 @@ -27455,7 +27519,7 @@ static struct cipher_testvec xeta_enc_tv
6012 -static struct cipher_testvec xeta_dec_tv_template[] = {
6013 +static const struct cipher_testvec xeta_dec_tv_template[] = {
6015 .key = zeroed_string,
6017 @@ -27501,10 +27565,7 @@ static struct cipher_testvec xeta_dec_tv
6019 * FCrypt test vectors
6021 -#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
6022 -#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
6024 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6025 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6026 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6027 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6029 @@ -27565,7 +27626,7 @@ static struct cipher_testvec fcrypt_pcbc
6033 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6034 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6035 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6036 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6038 @@ -27629,18 +27690,7 @@ static struct cipher_testvec fcrypt_pcbc
6040 * CAMELLIA test vectors.
6042 -#define CAMELLIA_ENC_TEST_VECTORS 4
6043 -#define CAMELLIA_DEC_TEST_VECTORS 4
6044 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6045 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6046 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6047 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6048 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6049 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6050 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6051 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6053 -static struct cipher_testvec camellia_enc_tv_template[] = {
6054 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6056 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6057 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6058 @@ -27940,7 +27990,7 @@ static struct cipher_testvec camellia_en
6062 -static struct cipher_testvec camellia_dec_tv_template[] = {
6063 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6065 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6066 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6067 @@ -28240,7 +28290,7 @@ static struct cipher_testvec camellia_de
6071 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6072 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6074 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6075 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6076 @@ -28536,7 +28586,7 @@ static struct cipher_testvec camellia_cb
6080 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6081 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6083 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6084 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6085 @@ -28832,7 +28882,7 @@ static struct cipher_testvec camellia_cb
6089 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6090 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6091 { /* Generated with Crypto++ */
6092 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6093 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6094 @@ -29499,7 +29549,7 @@ static struct cipher_testvec camellia_ct
6098 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6099 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6100 { /* Generated with Crypto++ */
6101 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6102 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6103 @@ -30166,7 +30216,7 @@ static struct cipher_testvec camellia_ct
6107 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6108 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6109 /* Generated from AES-LRW test vectors */
6111 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6112 @@ -30418,7 +30468,7 @@ static struct cipher_testvec camellia_lr
6116 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6117 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6118 /* Generated from AES-LRW test vectors */
6119 /* same as enc vectors with input and result reversed */
6121 @@ -30671,7 +30721,7 @@ static struct cipher_testvec camellia_lr
6125 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6126 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6127 /* Generated from AES-XTS test vectors */
6129 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6130 @@ -31013,7 +31063,7 @@ static struct cipher_testvec camellia_xt
6134 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6135 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6136 /* Generated from AES-XTS test vectors */
6137 /* same as enc vectors with input and result reversed */
6139 @@ -31359,10 +31409,7 @@ static struct cipher_testvec camellia_xt
6143 -#define SEED_ENC_TEST_VECTORS 4
6144 -#define SEED_DEC_TEST_VECTORS 4
6146 -static struct cipher_testvec seed_enc_tv_template[] = {
6147 +static const struct cipher_testvec seed_enc_tv_template[] = {
6149 .key = zeroed_string,
6151 @@ -31404,7 +31451,7 @@ static struct cipher_testvec seed_enc_tv
6155 -static struct cipher_testvec seed_dec_tv_template[] = {
6156 +static const struct cipher_testvec seed_dec_tv_template[] = {
6158 .key = zeroed_string,
6160 @@ -31446,8 +31493,7 @@ static struct cipher_testvec seed_dec_tv
6164 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6165 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6166 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6168 * Testvectors from verified.test-vectors submitted to ECRYPT.
6169 * They are truncated to size 39, 64, 111, 129 to test a variety
6170 @@ -32616,8 +32662,7 @@ static struct cipher_testvec salsa20_str
6174 -#define CHACHA20_ENC_TEST_VECTORS 4
6175 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6176 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6177 { /* RFC7539 A.2. Test Vector #1 */
6178 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6179 "\x00\x00\x00\x00\x00\x00\x00\x00"
6180 @@ -33128,9 +33173,7 @@ static struct cipher_testvec chacha20_en
6182 * CTS (Cipher Text Stealing) mode tests
6184 -#define CTS_MODE_ENC_TEST_VECTORS 6
6185 -#define CTS_MODE_DEC_TEST_VECTORS 6
6186 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6187 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6188 { /* from rfc3962 */
6190 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6191 @@ -33232,7 +33275,7 @@ static struct cipher_testvec cts_mode_en
6195 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6196 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6197 { /* from rfc3962 */
6199 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6200 @@ -33350,10 +33393,7 @@ struct comp_testvec {
6201 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6204 -#define DEFLATE_COMP_TEST_VECTORS 2
6205 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6207 -static struct comp_testvec deflate_comp_tv_template[] = {
6208 +static const struct comp_testvec deflate_comp_tv_template[] = {
6212 @@ -33389,7 +33429,7 @@ static struct comp_testvec deflate_comp_
6216 -static struct comp_testvec deflate_decomp_tv_template[] = {
6217 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6221 @@ -33428,10 +33468,7 @@ static struct comp_testvec deflate_decom
6223 * LZO test vectors (null-terminated strings).
6225 -#define LZO_COMP_TEST_VECTORS 2
6226 -#define LZO_DECOMP_TEST_VECTORS 2
6228 -static struct comp_testvec lzo_comp_tv_template[] = {
6229 +static const struct comp_testvec lzo_comp_tv_template[] = {
6233 @@ -33471,7 +33508,7 @@ static struct comp_testvec lzo_comp_tv_t
6237 -static struct comp_testvec lzo_decomp_tv_template[] = {
6238 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6242 @@ -33514,7 +33551,7 @@ static struct comp_testvec lzo_decomp_tv
6244 #define MICHAEL_MIC_TEST_VECTORS 6
6246 -static struct hash_testvec michael_mic_tv_template[] = {
6247 +static const struct hash_testvec michael_mic_tv_template[] = {
6249 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6251 @@ -33562,9 +33599,7 @@ static struct hash_testvec michael_mic_t
6253 * CRC32 test vectors
6255 -#define CRC32_TEST_VECTORS 14
6257 -static struct hash_testvec crc32_tv_template[] = {
6258 +static const struct hash_testvec crc32_tv_template[] = {
6260 .key = "\x87\xa9\xcb\xed",
6262 @@ -33996,9 +34031,7 @@ static struct hash_testvec crc32_tv_temp
6264 * CRC32C test vectors
6266 -#define CRC32C_TEST_VECTORS 15
6268 -static struct hash_testvec crc32c_tv_template[] = {
6269 +static const struct hash_testvec crc32c_tv_template[] = {
6272 .digest = "\x00\x00\x00\x00",
6273 @@ -34434,9 +34467,7 @@ static struct hash_testvec crc32c_tv_tem
6275 * Blakcifn CRC test vectors
6277 -#define BFIN_CRC_TEST_VECTORS 6
6279 -static struct hash_testvec bfin_crc_tv_template[] = {
6280 +static const struct hash_testvec bfin_crc_tv_template[] = {
6283 .digest = "\x00\x00\x00\x00",
6284 @@ -34521,9 +34552,6 @@ static struct hash_testvec bfin_crc_tv_t
6288 -#define LZ4_COMP_TEST_VECTORS 1
6289 -#define LZ4_DECOMP_TEST_VECTORS 1
6291 static struct comp_testvec lz4_comp_tv_template[] = {
6294 @@ -34554,9 +34582,6 @@ static struct comp_testvec lz4_decomp_tv
6298 -#define LZ4HC_COMP_TEST_VECTORS 1
6299 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6301 static struct comp_testvec lz4hc_comp_tv_template[] = {
6308 + * Copyright 2013 Freescale Semiconductor, Inc.
6309 + * Copyright 2017 NXP Semiconductor, Inc.
6311 + * This program is free software; you can redistribute it and/or modify it
6312 + * under the terms of the GNU General Public License as published by the Free
6313 + * Software Foundation; either version 2 of the License, or (at your option)
6314 + * any later version.
6318 +#include <crypto/internal/aead.h>
6319 +#include <crypto/internal/hash.h>
6320 +#include <crypto/internal/skcipher.h>
6321 +#include <crypto/authenc.h>
6322 +#include <crypto/null.h>
6323 +#include <crypto/scatterwalk.h>
6324 +#include <linux/err.h>
6325 +#include <linux/init.h>
6326 +#include <linux/module.h>
6327 +#include <linux/rtnetlink.h>
6329 +struct tls_instance_ctx {
6330 + struct crypto_ahash_spawn auth;
6331 + struct crypto_skcipher_spawn enc;
6334 +struct crypto_tls_ctx {
6335 + unsigned int reqoff;
6336 + struct crypto_ahash *auth;
6337 + struct crypto_skcipher *enc;
6338 + struct crypto_skcipher *null;
6341 +struct tls_request_ctx {
6343 + * cryptlen holds the payload length in the case of encryption or
6344 + * payload_len + icv_len + padding_len in case of decryption
6346 + unsigned int cryptlen;
6347 + /* working space for partial results */
6348 + struct scatterlist tmp[2];
6349 + struct scatterlist cipher[2];
6350 + struct scatterlist dst[2];
6355 + struct completion completion;
6359 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6361 + struct async_op *areq = req->data;
6363 + if (err == -EINPROGRESS)
6367 + complete(&areq->completion);
6370 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6371 + unsigned int keylen)
6373 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6374 + struct crypto_ahash *auth = ctx->auth;
6375 + struct crypto_skcipher *enc = ctx->enc;
6376 + struct crypto_authenc_keys keys;
6377 + int err = -EINVAL;
6379 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6382 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6383 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6384 + CRYPTO_TFM_REQ_MASK);
6385 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6386 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6387 + CRYPTO_TFM_RES_MASK);
6392 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6393 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6394 + CRYPTO_TFM_REQ_MASK);
6395 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6396 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6397 + CRYPTO_TFM_RES_MASK);
6403 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6408 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6409 + * @hash: (output) buffer to save the digest into
6410 + * @src: (input) scatterlist with the assoc and payload data
6411 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
6412 + * @req: (input) aead request
6414 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6415 + unsigned int srclen, struct aead_request *req)
6417 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6418 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6419 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6420 + struct async_op ahash_op;
6421 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6422 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6423 + int err = -EBADMSG;
6425 + /* Bail out if the request assoc len is 0 */
6426 + if (!req->assoclen)
6429 + init_completion(&ahash_op.completion);
6431 + /* the hash transform to be executed comes from the original request */
6432 + ahash_request_set_tfm(ahreq, ctx->auth);
6433 + /* prepare the hash request with input data and result pointer */
6434 + ahash_request_set_crypt(ahreq, src, hash, srclen);
6435 + /* set the notifier for when the async hash function returns */
6436 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6437 + tls_async_op_done, &ahash_op);
6439 + /* Calculate the digest on the given data. The result is put in hash */
6440 + err = crypto_ahash_digest(ahreq);
6441 + if (err == -EINPROGRESS) {
6442 + err = wait_for_completion_interruptible(&ahash_op.completion);
6444 + err = ahash_op.err;
6451 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6452 + * @hash: (output) buffer to save the digest and padding into
6453 + * @phashlen: (output) the size of digest + padding
6454 + * @req: (input) aead request
6456 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6457 + struct aead_request *req)
6459 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6460 + unsigned int hash_size = crypto_aead_authsize(tls);
6461 + unsigned int block_size = crypto_aead_blocksize(tls);
6462 + unsigned int srclen = req->cryptlen + hash_size;
6463 + unsigned int icvlen = req->cryptlen + req->assoclen;
6464 + unsigned int padlen;
6467 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
6471 + /* add padding after digest */
6472 + padlen = block_size - (srclen % block_size);
6473 + memset(hash + hash_size, padlen - 1, padlen);
6475 + *phashlen = hash_size + padlen;
6480 +static int crypto_tls_copy_data(struct aead_request *req,
6481 + struct scatterlist *src,
6482 + struct scatterlist *dst,
6485 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6486 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6487 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6489 + skcipher_request_set_tfm(skreq, ctx->null);
6490 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6492 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6494 + return crypto_skcipher_encrypt(skreq);
6497 +static int crypto_tls_encrypt(struct aead_request *req)
6499 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6500 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6501 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6502 + struct skcipher_request *skreq;
6503 + struct scatterlist *cipher = treq_ctx->cipher;
6504 + struct scatterlist *tmp = treq_ctx->tmp;
6505 + struct scatterlist *sg, *src, *dst;
6506 + unsigned int cryptlen, phashlen;
6507 + u8 *hash = treq_ctx->tail;
6511 + * The hash result is saved at the beginning of the tls request ctx
6512 + * and is aligned as required by the hash transform. Enough space was
6513 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
6514 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6515 + * the result is not overwritten by the second (cipher) request.
6517 + hash = (u8 *)ALIGN((unsigned long)hash +
6518 + crypto_ahash_alignmask(ctx->auth),
6519 + crypto_ahash_alignmask(ctx->auth) + 1);
6522 + * STEP 1: create ICV together with necessary padding
6524 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
6529 + * STEP 2: Hash and padding are combined with the payload
6530 + * depending on the form it arrives. Scatter tables must have at least
6531 + * one page of data before chaining with another table and can't have
6532 + * an empty data page. The following code addresses these requirements.
6534 + * If the payload is empty, only the hash is encrypted, otherwise the
6535 + * payload scatterlist is merged with the hash. A special merging case
6536 + * is when the payload has only one page of data. In that case the
6537 + * payload page is moved to another scatterlist and prepared there for
6540 + if (req->cryptlen) {
6541 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6543 + sg_init_table(cipher, 2);
6544 + sg_set_buf(cipher + 1, hash, phashlen);
6546 + if (sg_is_last(src)) {
6547 + sg_set_page(cipher, sg_page(src), req->cryptlen,
6551 + unsigned int rem_len = req->cryptlen;
6553 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6554 + rem_len -= min(rem_len, sg->length);
6556 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6557 + sg_chain(sg, 1, cipher);
6560 + sg_init_one(cipher, hash, phashlen);
6565 + * If src != dst copy the associated data from source to destination.
6566 + * In both cases fast-forward passed the associated data in the dest.
6568 + if (req->src != req->dst) {
6569 + err = crypto_tls_copy_data(req, req->src, req->dst,
6574 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6577 + * STEP 3: encrypt the frame and return the result
6579 + cryptlen = req->cryptlen + phashlen;
6582 + * The hash and the cipher are applied at different times and their
6583 + * requests can use the same memory space without interference
6585 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6586 + skcipher_request_set_tfm(skreq, ctx->enc);
6587 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6588 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6589 + req->base.complete, req->base.data);
6591 + * Apply the cipher transform. The result will be in req->dst when the
6592 + * asynchronuous call terminates
6594 + err = crypto_skcipher_encrypt(skreq);
6599 +static int crypto_tls_decrypt(struct aead_request *req)
6601 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6602 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6603 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6604 + unsigned int cryptlen = req->cryptlen;
6605 + unsigned int hash_size = crypto_aead_authsize(tls);
6606 + unsigned int block_size = crypto_aead_blocksize(tls);
6607 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6608 + struct scatterlist *tmp = treq_ctx->tmp;
6609 + struct scatterlist *src, *dst;
6611 + u8 padding[255]; /* padding can be 0-255 bytes */
6614 + u8 *ihash, *hash = treq_ctx->tail;
6617 + int err = -EINVAL;
6619 + struct async_op ciph_op;
6622 + * Rule out bad packets. The input packet length must be at least one
6623 + * byte more than the hash_size
6625 + if (cryptlen <= hash_size || cryptlen % block_size)
6629 + * Step 1 - Decrypt the source. Fast-forward past the associated data
6630 + * to the encrypted data. The result will be overwritten in place so
6631 + * that the decrypted data will be adjacent to the associated data. The
6632 + * last step (computing the hash) will have it's input data already
6633 + * prepared and ready to be accessed at req->src.
6635 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6638 + init_completion(&ciph_op.completion);
6639 + skcipher_request_set_tfm(skreq, ctx->enc);
6640 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6641 + tls_async_op_done, &ciph_op);
6642 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6643 + err = crypto_skcipher_decrypt(skreq);
6644 + if (err == -EINPROGRESS) {
6645 + err = wait_for_completion_interruptible(&ciph_op.completion);
6647 + err = ciph_op.err;
6653 + * Step 2 - Verify padding
6654 + * Retrieve the last byte of the payload; this is the padding size.
6657 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6659 + /* RFC recommendation for invalid padding size. */
6660 + if (cryptlen < pad_size + hash_size) {
6662 + paderr = -EBADMSG;
6664 + cryptlen -= pad_size;
6665 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6667 + /* Padding content must be equal with pad_size. We verify it all */
6668 + for (i = 0; i < pad_size; i++)
6669 + if (padding[i] != pad_size)
6670 + paderr = -EBADMSG;
6673 + * Step 3 - Verify hash
6674 + * Align the digest result as required by the hash transform. Enough
6675 + * space was allocated in crypto_tls_init_tfm
6677 + hash = (u8 *)ALIGN((unsigned long)hash +
6678 + crypto_ahash_alignmask(ctx->auth),
6679 + crypto_ahash_alignmask(ctx->auth) + 1);
6681 + * Two bytes at the end of the associated data make the length field.
6682 + * It must be updated with the length of the cleartext message before
6683 + * the hash is calculated.
6685 + len_field = sg_virt(req->src) + req->assoclen - 2;
6686 + cryptlen -= hash_size;
6687 + *len_field = htons(cryptlen);
6689 + /* This is the hash from the decrypted packet. Save it for later */
6690 + ihash = hash + hash_size;
6691 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6693 + /* Now compute and compare our ICV with the one from the packet */
6694 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6696 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6698 + if (req->src != req->dst) {
6699 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6705 + /* return the first found error */
6710 + aead_request_complete(req, err);
6714 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6716 + struct aead_instance *inst = aead_alg_instance(tfm);
6717 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6718 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6719 + struct crypto_ahash *auth;
6720 + struct crypto_skcipher *enc;
6721 + struct crypto_skcipher *null;
6724 + auth = crypto_spawn_ahash(&ictx->auth);
6726 + return PTR_ERR(auth);
6728 + enc = crypto_spawn_skcipher(&ictx->enc);
6729 + err = PTR_ERR(enc);
6731 + goto err_free_ahash;
6733 + null = crypto_get_default_null_skcipher2();
6734 + err = PTR_ERR(null);
6736 + goto err_free_skcipher;
6743 + * Allow enough space for two digests. The two digests will be compared
6744 + * during the decryption phase. One will come from the decrypted packet
6745 + * and the other will be calculated. For encryption, one digest is
6746 + * padded (up to a cipher blocksize) and chained with the payload
6748 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6749 + crypto_ahash_alignmask(auth),
6750 + crypto_ahash_alignmask(auth) + 1) +
6751 + max(crypto_ahash_digestsize(auth),
6752 + crypto_skcipher_blocksize(enc));
6754 + crypto_aead_set_reqsize(tfm,
6755 + sizeof(struct tls_request_ctx) +
6757 + max_t(unsigned int,
6758 + crypto_ahash_reqsize(auth) +
6759 + sizeof(struct ahash_request),
6760 + crypto_skcipher_reqsize(enc) +
6761 + sizeof(struct skcipher_request)));
6766 + crypto_free_skcipher(enc);
6768 + crypto_free_ahash(auth);
6772 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6774 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6776 + crypto_free_ahash(ctx->auth);
6777 + crypto_free_skcipher(ctx->enc);
6778 + crypto_put_default_null_skcipher2();
6781 +static void crypto_tls_free(struct aead_instance *inst)
6783 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6785 + crypto_drop_skcipher(&ctx->enc);
6786 + crypto_drop_ahash(&ctx->auth);
6790 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6792 + struct crypto_attr_type *algt;
6793 + struct aead_instance *inst;
6794 + struct hash_alg_common *auth;
6795 + struct crypto_alg *auth_base;
6796 + struct skcipher_alg *enc;
6797 + struct tls_instance_ctx *ctx;
6798 + const char *enc_name;
6801 + algt = crypto_get_attr_type(tb);
6803 + return PTR_ERR(algt);
6805 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6808 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6809 + CRYPTO_ALG_TYPE_AHASH_MASK |
6810 + crypto_requires_sync(algt->type, algt->mask));
6812 + return PTR_ERR(auth);
6814 + auth_base = &auth->base;
6816 + enc_name = crypto_attr_alg_name(tb[2]);
6817 + err = PTR_ERR(enc_name);
6818 + if (IS_ERR(enc_name))
6819 + goto out_put_auth;
6821 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6824 + goto out_put_auth;
6826 + ctx = aead_instance_ctx(inst);
6828 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
6829 + aead_crypto_instance(inst));
6831 + goto err_free_inst;
6833 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6834 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6835 + crypto_requires_sync(algt->type,
6838 + goto err_drop_auth;
6840 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
6842 + err = -ENAMETOOLONG;
6843 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6844 + "tls10(%s,%s)", auth_base->cra_name,
6845 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6846 + goto err_drop_enc;
6848 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6849 + "tls10(%s,%s)", auth_base->cra_driver_name,
6850 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6851 + goto err_drop_enc;
6853 + inst->alg.base.cra_flags = (auth_base->cra_flags |
6854 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6855 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6856 + auth_base->cra_priority;
6857 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6858 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6859 + enc->base.cra_alignmask;
6860 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6862 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6863 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6864 + inst->alg.maxauthsize = auth->digestsize;
6866 + inst->alg.init = crypto_tls_init_tfm;
6867 + inst->alg.exit = crypto_tls_exit_tfm;
6869 + inst->alg.setkey = crypto_tls_setkey;
6870 + inst->alg.encrypt = crypto_tls_encrypt;
6871 + inst->alg.decrypt = crypto_tls_decrypt;
6873 + inst->free = crypto_tls_free;
6875 + err = aead_register_instance(tmpl, inst);
6877 + goto err_drop_enc;
6880 + crypto_mod_put(auth_base);
6884 + crypto_drop_skcipher(&ctx->enc);
6886 + crypto_drop_ahash(&ctx->auth);
6893 +static struct crypto_template crypto_tls_tmpl = {
6895 + .create = crypto_tls_create,
6896 + .module = THIS_MODULE,
6899 +static int __init crypto_tls_module_init(void)
6901 + return crypto_register_template(&crypto_tls_tmpl);
6904 +static void __exit crypto_tls_module_exit(void)
6906 + crypto_unregister_template(&crypto_tls_tmpl);
6909 +module_init(crypto_tls_module_init);
6910 +module_exit(crypto_tls_module_exit);
6912 +MODULE_LICENSE("GPL");
6913 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6914 --- a/drivers/crypto/caam/Kconfig
6915 +++ b/drivers/crypto/caam/Kconfig
6917 +config CRYPTO_DEV_FSL_CAAM_COMMON
6920 config CRYPTO_DEV_FSL_CAAM
6921 - tristate "Freescale CAAM-Multicore driver backend"
6922 + tristate "Freescale CAAM-Multicore platform driver backend"
6923 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6924 + select CRYPTO_DEV_FSL_CAAM_COMMON
6927 Enables the driver module for Freescale's Cryptographic Accelerator
6928 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6929 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6930 To compile this driver as a module, choose M here: the module
6931 will be called caam.
6933 +if CRYPTO_DEV_FSL_CAAM
6935 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6936 + bool "Enable debug output in CAAM driver"
6938 + Selecting this will enable printing of various debug
6939 + information in the CAAM driver.
6941 config CRYPTO_DEV_FSL_CAAM_JR
6942 tristate "Freescale CAAM Job Ring driver backend"
6943 - depends on CRYPTO_DEV_FSL_CAAM
6946 Enables the driver module for Job Rings which are part of
6947 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6948 To compile this driver as a module, choose M here: the module
6949 will be called caam_jr.
6951 +if CRYPTO_DEV_FSL_CAAM_JR
6953 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6955 - depends on CRYPTO_DEV_FSL_CAAM_JR
6959 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6961 config CRYPTO_DEV_FSL_CAAM_INTC
6962 bool "Job Ring interrupt coalescing"
6963 - depends on CRYPTO_DEV_FSL_CAAM_JR
6965 Enable the Job Ring's interrupt coalescing feature.
6967 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
6969 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6970 tristate "Register algorithm implementations with the Crypto API"
6971 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6974 select CRYPTO_AUTHENC
6975 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6976 To compile this as a module, choose M here: the module
6977 will be called caamalg.
6979 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6980 + tristate "Queue Interface as Crypto API backend"
6981 + depends on FSL_SDK_DPA && NET
6983 + select CRYPTO_AUTHENC
6984 + select CRYPTO_BLKCIPHER
6986 + Selecting this will use CAAM Queue Interface (QI) for sending
6987 + & receiving crypto jobs to/from CAAM. This gives better performance
6988 + than job ring interface when the number of cores are more than the
6989 + number of job rings assigned to the kernel. The number of portals
6990 + assigned to the kernel should also be more than the number of
6993 + To compile this as a module, choose M here: the module
6994 + will be called caamalg_qi.
6996 config CRYPTO_DEV_FSL_CAAM_AHASH_API
6997 tristate "Register hash algorithm implementations with Crypto API"
6998 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7002 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
7004 config CRYPTO_DEV_FSL_CAAM_PKC_API
7005 tristate "Register public key cryptography implementations with Crypto API"
7006 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7010 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
7012 config CRYPTO_DEV_FSL_CAAM_RNG_API
7013 tristate "Register caam device for hwrng API"
7014 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7018 @@ -124,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
7019 To compile this as a module, choose M here: the module
7020 will be called caamrng.
7022 -config CRYPTO_DEV_FSL_CAAM_IMX
7023 - def_bool SOC_IMX6 || SOC_IMX7D
7024 - depends on CRYPTO_DEV_FSL_CAAM
7025 +endif # CRYPTO_DEV_FSL_CAAM_JR
7027 -config CRYPTO_DEV_FSL_CAAM_DEBUG
7028 - bool "Enable debug output in CAAM driver"
7029 - depends on CRYPTO_DEV_FSL_CAAM
7031 - Selecting this will enable printing of various debug
7032 - information in the CAAM driver.
7033 +endif # CRYPTO_DEV_FSL_CAAM
7035 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7036 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7037 + depends on FSL_MC_DPIO
7038 + select CRYPTO_DEV_FSL_CAAM_COMMON
7039 + select CRYPTO_BLKCIPHER
7040 + select CRYPTO_AUTHENC
7041 + select CRYPTO_AEAD
7042 + select CRYPTO_HASH
7044 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7045 + It handles DPSECI DPAA2 objects that sit on the Management Complex
7048 + To compile this as a module, choose M here: the module
7049 + will be called dpaa2_caam.
7051 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7052 + def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7053 + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7054 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7056 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
7057 + def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
7058 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7059 --- a/drivers/crypto/caam/Makefile
7060 +++ b/drivers/crypto/caam/Makefile
7061 @@ -5,13 +5,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
7062 ccflags-y := -DDEBUG
7065 +ccflags-y += -DVERSION=\"\"
7067 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7068 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7069 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7070 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7071 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7072 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7073 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7074 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
7075 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7076 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7079 -caam_jr-objs := jr.o key_gen.o error.o
7080 +caam_jr-objs := jr.o key_gen.o
7081 caam_pkc-y := caampkc.o pkc_desc.o
7082 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7083 + ccflags-y += -DCONFIG_CAAM_QI
7087 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7089 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
7090 --- a/drivers/crypto/caam/caamalg.c
7091 +++ b/drivers/crypto/caam/caamalg.c
7093 * caam - Freescale FSL CAAM support for crypto API
7095 * Copyright 2008-2011 Freescale Semiconductor, Inc.
7096 + * Copyright 2016 NXP
7098 * Based on talitos crypto API driver.
7102 #include "sg_sw_sec4.h"
7103 #include "key_gen.h"
7104 +#include "caamalg_desc.h"
7109 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
7110 CTR_RFC3686_NONCE_SIZE + \
7111 SHA512_DIGEST_SIZE * 2)
7112 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7113 -#define CAAM_MAX_IV_LENGTH 16
7115 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7116 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7118 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7121 -/* length of descriptors text */
7122 -#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
7123 -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7124 -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7125 -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 10 * CAAM_CMD_SZ)
7127 -/* Note: Nonce is counted in enckeylen */
7128 -#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
7130 -#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
7131 -#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7132 -#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7134 -#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
7135 -#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7136 -#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7138 -#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
7139 -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7140 -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7142 -#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
7143 -#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7144 -#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7146 -#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
7147 -#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
7149 -#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
7152 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7153 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7155 @@ -112,47 +81,11 @@
7156 #define debug(format, arg...)
7160 -#include <linux/highmem.h>
7162 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7163 - int prefix_type, int rowsize, int groupsize,
7164 - struct scatterlist *sg, size_t tlen, bool ascii,
7167 - struct scatterlist *it;
7172 - for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7174 - * make sure the scatterlist's page
7175 - * has a valid virtual memory mapping
7177 - it_page = kmap_atomic(sg_page(it));
7178 - if (unlikely(!it_page)) {
7179 - printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7183 - buf = it_page + it->offset;
7184 - len = min_t(size_t, tlen, it->length);
7185 - print_hex_dump(level, prefix_str, prefix_type, rowsize,
7186 - groupsize, buf, len, ascii);
7189 - kunmap_atomic(it_page);
7194 static struct list_head alg_list;
7196 struct caam_alg_entry {
7197 int class1_alg_type;
7198 int class2_alg_type;
7203 @@ -163,302 +96,70 @@ struct caam_aead_alg {
7207 -/* Set DK bit in class 1 operation if shared */
7208 -static inline void append_dec_op1(u32 *desc, u32 type)
7210 - u32 *jump_cmd, *uncond_jump_cmd;
7212 - /* DK bit is valid only for AES */
7213 - if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7214 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7219 - jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7220 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7222 - uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7223 - set_jump_tgt_here(desc, jump_cmd);
7224 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7225 - OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7226 - set_jump_tgt_here(desc, uncond_jump_cmd);
7230 - * For aead functions, read payload and write payload,
7231 - * both of which are specified in req->src and req->dst
7233 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7235 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7236 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7237 - KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7241 - * For ablkcipher encrypt and decrypt, read from req->src and
7242 - * write to req->dst
7244 -static inline void ablkcipher_append_src_dst(u32 *desc)
7246 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7247 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7248 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7249 - KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7250 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7254 * per-session context
7257 - struct device *jrdev;
7258 u32 sh_desc_enc[DESC_MAX_USED_LEN];
7259 u32 sh_desc_dec[DESC_MAX_USED_LEN];
7260 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7261 + u8 key[CAAM_MAX_KEY_SIZE];
7262 dma_addr_t sh_desc_enc_dma;
7263 dma_addr_t sh_desc_dec_dma;
7264 dma_addr_t sh_desc_givenc_dma;
7265 - u32 class1_alg_type;
7266 - u32 class2_alg_type;
7268 - u8 key[CAAM_MAX_KEY_SIZE];
7270 - unsigned int enckeylen;
7271 - unsigned int split_key_len;
7272 - unsigned int split_key_pad_len;
7273 + struct device *jrdev;
7274 + struct alginfo adata;
7275 + struct alginfo cdata;
7276 unsigned int authsize;
7279 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7280 - int keys_fit_inline, bool is_rfc3686)
7283 - unsigned int enckeylen = ctx->enckeylen;
7286 - * RFC3686 specific:
7287 - * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7288 - * | enckeylen = encryption key size + nonce size
7291 - enckeylen -= CTR_RFC3686_NONCE_SIZE;
7293 - if (keys_fit_inline) {
7294 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7295 - ctx->split_key_len, CLASS_2 |
7296 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7297 - append_key_as_imm(desc, (void *)ctx->key +
7298 - ctx->split_key_pad_len, enckeylen,
7299 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7301 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7302 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7303 - append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7304 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7307 - /* Load Counter into CONTEXT1 reg */
7309 - nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7311 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7312 - LDST_CLASS_IND_CCB |
7313 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7315 - MOVE_SRC_OUTFIFO |
7316 - MOVE_DEST_CLASS1CTX |
7317 - (16 << MOVE_OFFSET_SHIFT) |
7318 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7322 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7323 - int keys_fit_inline, bool is_rfc3686)
7325 - u32 *key_jump_cmd;
7327 - /* Note: Context registers are saved. */
7328 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7330 - /* Skip if already shared */
7331 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7334 - append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7336 - set_jump_tgt_here(desc, key_jump_cmd);
7339 static int aead_null_set_sh_desc(struct crypto_aead *aead)
7341 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7342 struct device *jrdev = ctx->jrdev;
7343 - bool keys_fit_inline = false;
7344 - u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7345 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
7347 + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7348 + ctx->adata.keylen_pad;
7351 * Job Descriptor and Shared Descriptors
7352 * must all fit into the 64-word Descriptor h/w Buffer
7354 - if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7355 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7356 - keys_fit_inline = true;
7357 + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7358 + ctx->adata.key_inline = true;
7359 + ctx->adata.key_virt = ctx->key;
7361 + ctx->adata.key_inline = false;
7362 + ctx->adata.key_dma = ctx->key_dma;
7365 /* aead_encrypt shared descriptor */
7366 desc = ctx->sh_desc_enc;
7368 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7370 - /* Skip if already shared */
7371 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7373 - if (keys_fit_inline)
7374 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7375 - ctx->split_key_len, CLASS_2 |
7376 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7378 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7379 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7380 - set_jump_tgt_here(desc, key_jump_cmd);
7382 - /* assoclen + cryptlen = seqinlen */
7383 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7385 - /* Prepare to read and write cryptlen + assoclen bytes */
7386 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7387 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7390 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7391 - * thus need to do some magic, i.e. self-patch the descriptor
7394 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7396 - (0x6 << MOVE_LEN_SHIFT));
7397 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7398 - MOVE_DEST_DESCBUF |
7400 - (0x8 << MOVE_LEN_SHIFT));
7402 - /* Class 2 operation */
7403 - append_operation(desc, ctx->class2_alg_type |
7404 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7406 - /* Read and write cryptlen bytes */
7407 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7409 - set_move_tgt_here(desc, read_move_cmd);
7410 - set_move_tgt_here(desc, write_move_cmd);
7411 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7412 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7416 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7417 - LDST_SRCDST_BYTE_CONTEXT);
7419 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7422 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7423 - dev_err(jrdev, "unable to map shared descriptor\n");
7427 - print_hex_dump(KERN_ERR,
7428 - "aead null enc shdesc@"__stringify(__LINE__)": ",
7429 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7430 - desc_bytes(desc), 1);
7432 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
7434 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7435 + desc_bytes(desc), DMA_TO_DEVICE);
7438 * Job Descriptor and Shared Descriptors
7439 * must all fit into the 64-word Descriptor h/w Buffer
7441 - keys_fit_inline = false;
7442 - if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7443 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7444 - keys_fit_inline = true;
7446 - desc = ctx->sh_desc_dec;
7447 + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7448 + ctx->adata.key_inline = true;
7449 + ctx->adata.key_virt = ctx->key;
7451 + ctx->adata.key_inline = false;
7452 + ctx->adata.key_dma = ctx->key_dma;
7455 /* aead_decrypt shared descriptor */
7456 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7458 - /* Skip if already shared */
7459 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7461 - if (keys_fit_inline)
7462 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7463 - ctx->split_key_len, CLASS_2 |
7464 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7466 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7467 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7468 - set_jump_tgt_here(desc, key_jump_cmd);
7470 - /* Class 2 operation */
7471 - append_operation(desc, ctx->class2_alg_type |
7472 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7474 - /* assoclen + cryptlen = seqoutlen */
7475 - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7477 - /* Prepare to read and write cryptlen + assoclen bytes */
7478 - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7479 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7482 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7483 - * thus need to do some magic, i.e. self-patch the descriptor
7486 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7488 - (0x6 << MOVE_LEN_SHIFT));
7489 - write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7490 - MOVE_DEST_DESCBUF |
7492 - (0x8 << MOVE_LEN_SHIFT));
7494 - /* Read and write cryptlen bytes */
7495 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7498 - * Insert a NOP here, since we need at least 4 instructions between
7499 - * code patching the descriptor buffer and the location being patched.
7501 - jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7502 - set_jump_tgt_here(desc, jump_cmd);
7504 - set_move_tgt_here(desc, read_move_cmd);
7505 - set_move_tgt_here(desc, write_move_cmd);
7506 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7507 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7509 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7512 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7513 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7515 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7518 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7519 - dev_err(jrdev, "unable to map shared descriptor\n");
7523 - print_hex_dump(KERN_ERR,
7524 - "aead null dec shdesc@"__stringify(__LINE__)": ",
7525 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7526 - desc_bytes(desc), 1);
7528 + desc = ctx->sh_desc_dec;
7529 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
7531 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7532 + desc_bytes(desc), DMA_TO_DEVICE);
7536 @@ -470,12 +171,12 @@ static int aead_set_sh_desc(struct crypt
7537 unsigned int ivsize = crypto_aead_ivsize(aead);
7538 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7539 struct device *jrdev = ctx->jrdev;
7540 - bool keys_fit_inline;
7541 - u32 geniv, moveiv;
7542 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
7543 u32 ctx1_iv_off = 0;
7546 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7547 + u32 *desc, *nonce = NULL;
7549 + unsigned int data_len[2];
7550 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7551 OP_ALG_AAI_CTR_MOD128);
7552 const bool is_rfc3686 = alg->caam.rfc3686;
7554 @@ -483,7 +184,7 @@ static int aead_set_sh_desc(struct crypt
7557 /* NULL encryption / decryption */
7558 - if (!ctx->enckeylen)
7559 + if (!ctx->cdata.keylen)
7560 return aead_null_set_sh_desc(aead);
7563 @@ -498,8 +199,14 @@ static int aead_set_sh_desc(struct crypt
7565 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7569 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7570 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7571 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7574 + data_len[0] = ctx->adata.keylen_pad;
7575 + data_len[1] = ctx->cdata.keylen;
7577 if (alg->caam.geniv)
7579 @@ -508,146 +215,64 @@ static int aead_set_sh_desc(struct crypt
7580 * Job Descriptor and Shared Descriptors
7581 * must all fit into the 64-word Descriptor h/w Buffer
7583 - keys_fit_inline = false;
7584 - if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7585 - ctx->split_key_pad_len + ctx->enckeylen +
7586 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7587 - CAAM_DESC_BYTES_MAX)
7588 - keys_fit_inline = true;
7590 - /* aead_encrypt shared descriptor */
7591 - desc = ctx->sh_desc_enc;
7593 - /* Note: Context registers are saved. */
7594 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7596 - /* Class 2 operation */
7597 - append_operation(desc, ctx->class2_alg_type |
7598 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7600 - /* Read and write assoclen bytes */
7601 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7602 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7603 + if (desc_inline_query(DESC_AEAD_ENC_LEN +
7604 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7605 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7606 + ARRAY_SIZE(data_len)) < 0)
7609 - /* Skip assoc data */
7610 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7612 + ctx->adata.key_virt = ctx->key;
7614 + ctx->adata.key_dma = ctx->key_dma;
7616 - /* read assoc before reading payload */
7617 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7620 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7622 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7624 - /* Load Counter into CONTEXT1 reg */
7626 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7627 - LDST_SRCDST_BYTE_CONTEXT |
7628 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7629 - LDST_OFFSET_SHIFT));
7631 - /* Class 1 operation */
7632 - append_operation(desc, ctx->class1_alg_type |
7633 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7635 - /* Read and write cryptlen bytes */
7636 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7637 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7638 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7641 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7642 - LDST_SRCDST_BYTE_CONTEXT);
7643 + ctx->adata.key_inline = !!(inl_mask & 1);
7644 + ctx->cdata.key_inline = !!(inl_mask & 2);
7646 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7649 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7650 - dev_err(jrdev, "unable to map shared descriptor\n");
7654 - print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7655 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7656 - desc_bytes(desc), 1);
7658 + /* aead_encrypt shared descriptor */
7659 + desc = ctx->sh_desc_enc;
7660 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7661 + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7662 + false, ctrlpriv->era);
7663 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7664 + desc_bytes(desc), DMA_TO_DEVICE);
7668 * Job Descriptor and Shared Descriptors
7669 * must all fit into the 64-word Descriptor h/w Buffer
7671 - keys_fit_inline = false;
7672 - if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7673 - ctx->split_key_pad_len + ctx->enckeylen +
7674 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7675 - CAAM_DESC_BYTES_MAX)
7676 - keys_fit_inline = true;
7678 - /* aead_decrypt shared descriptor */
7679 - desc = ctx->sh_desc_dec;
7681 - /* Note: Context registers are saved. */
7682 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7684 - /* Class 2 operation */
7685 - append_operation(desc, ctx->class2_alg_type |
7686 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7687 + if (desc_inline_query(DESC_AEAD_DEC_LEN +
7688 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7689 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7690 + ARRAY_SIZE(data_len)) < 0)
7693 - /* Read and write assoclen bytes */
7694 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7695 - if (alg->caam.geniv)
7696 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7698 + ctx->adata.key_virt = ctx->key;
7700 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7701 + ctx->adata.key_dma = ctx->key_dma;
7703 - /* Skip assoc data */
7704 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7706 - /* read assoc before reading payload */
7707 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7710 - if (alg->caam.geniv) {
7711 - append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7712 - LDST_SRCDST_BYTE_CONTEXT |
7713 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
7714 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7715 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7718 - /* Load Counter into CONTEXT1 reg */
7720 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7721 - LDST_SRCDST_BYTE_CONTEXT |
7722 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7723 - LDST_OFFSET_SHIFT));
7725 - /* Choose operation */
7727 - append_operation(desc, ctx->class1_alg_type |
7728 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7730 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7732 - append_dec_op1(desc, ctx->class1_alg_type);
7733 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7735 - /* Read and write cryptlen bytes */
7736 - append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7737 - append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7738 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7741 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7742 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7743 + ctx->adata.key_inline = !!(inl_mask & 1);
7744 + ctx->cdata.key_inline = !!(inl_mask & 2);
7746 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7749 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7750 - dev_err(jrdev, "unable to map shared descriptor\n");
7754 - print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7755 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7756 - desc_bytes(desc), 1);
7758 + /* aead_decrypt shared descriptor */
7759 + desc = ctx->sh_desc_dec;
7760 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7761 + ctx->authsize, alg->caam.geniv, is_rfc3686,
7762 + nonce, ctx1_iv_off, false, ctrlpriv->era);
7763 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7764 + desc_bytes(desc), DMA_TO_DEVICE);
7766 if (!alg->caam.geniv)
7768 @@ -656,115 +281,32 @@ skip_enc:
7769 * Job Descriptor and Shared Descriptors
7770 * must all fit into the 64-word Descriptor h/w Buffer
7772 - keys_fit_inline = false;
7773 - if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7774 - ctx->split_key_pad_len + ctx->enckeylen +
7775 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7776 - CAAM_DESC_BYTES_MAX)
7777 - keys_fit_inline = true;
7779 - /* aead_givencrypt shared descriptor */
7780 - desc = ctx->sh_desc_enc;
7782 - /* Note: Context registers are saved. */
7783 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7789 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7790 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7791 - NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7792 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7793 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7794 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7795 - append_move(desc, MOVE_WAITCOMP |
7796 - MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7797 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7798 - (ivsize << MOVE_LEN_SHIFT));
7799 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7802 - /* Copy IV to class 1 context */
7803 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7804 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7805 - (ivsize << MOVE_LEN_SHIFT));
7807 - /* Return to encryption */
7808 - append_operation(desc, ctx->class2_alg_type |
7809 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7811 - /* Read and write assoclen bytes */
7812 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7813 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7815 - /* ivsize + cryptlen = seqoutlen - authsize */
7816 - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7818 - /* Skip assoc data */
7819 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7821 - /* read assoc before reading payload */
7822 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7825 - /* Copy iv from outfifo to class 2 fifo */
7826 - moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7827 - NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7828 - append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7829 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7830 - append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7831 - LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7833 - /* Load Counter into CONTEXT1 reg */
7835 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7836 - LDST_SRCDST_BYTE_CONTEXT |
7837 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7838 - LDST_OFFSET_SHIFT));
7840 - /* Class 1 operation */
7841 - append_operation(desc, ctx->class1_alg_type |
7842 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7844 - /* Will write ivsize + cryptlen */
7845 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7847 - /* Not need to reload iv */
7848 - append_seq_fifo_load(desc, ivsize,
7849 - FIFOLD_CLASS_SKIP);
7850 + if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7851 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7852 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7853 + ARRAY_SIZE(data_len)) < 0)
7856 - /* Will read cryptlen */
7857 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7859 + ctx->adata.key_virt = ctx->key;
7861 + ctx->adata.key_dma = ctx->key_dma;
7864 - * Wait for IV transfer (ofifo -> class2) to finish before starting
7865 - * ciphertext transfer (ofifo -> external memory).
7867 - wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
7868 - set_jump_tgt_here(desc, wait_cmd);
7870 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7872 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7874 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7875 - FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7876 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7879 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7880 - LDST_SRCDST_BYTE_CONTEXT);
7881 + ctx->adata.key_inline = !!(inl_mask & 1);
7882 + ctx->cdata.key_inline = !!(inl_mask & 2);
7884 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7887 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7888 - dev_err(jrdev, "unable to map shared descriptor\n");
7892 - print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7893 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7894 - desc_bytes(desc), 1);
7896 + /* aead_givencrypt shared descriptor */
7897 + desc = ctx->sh_desc_enc;
7898 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7899 + ctx->authsize, is_rfc3686, nonce,
7900 + ctx1_iv_off, false, ctrlpriv->era);
7901 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7902 + desc_bytes(desc), DMA_TO_DEVICE);
7906 @@ -785,12 +327,12 @@ static int gcm_set_sh_desc(struct crypto
7908 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7909 struct device *jrdev = ctx->jrdev;
7910 - bool keys_fit_inline = false;
7911 - u32 *key_jump_cmd, *zero_payload_jump_cmd,
7912 - *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7913 + unsigned int ivsize = crypto_aead_ivsize(aead);
7915 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7916 + ctx->cdata.keylen;
7918 - if (!ctx->enckeylen || !ctx->authsize)
7919 + if (!ctx->cdata.keylen || !ctx->authsize)
7923 @@ -798,175 +340,35 @@ static int gcm_set_sh_desc(struct crypto
7924 * Job Descriptor and Shared Descriptor
7925 * must fit into the 64-word Descriptor h/w Buffer
7927 - if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7928 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7929 - keys_fit_inline = true;
7930 + if (rem_bytes >= DESC_GCM_ENC_LEN) {
7931 + ctx->cdata.key_inline = true;
7932 + ctx->cdata.key_virt = ctx->key;
7934 + ctx->cdata.key_inline = false;
7935 + ctx->cdata.key_dma = ctx->key_dma;
7938 desc = ctx->sh_desc_enc;
7940 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7942 - /* skip key loading if they are loaded due to sharing */
7943 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7944 - JUMP_COND_SHRD | JUMP_COND_SELF);
7945 - if (keys_fit_inline)
7946 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7947 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7949 - append_key(desc, ctx->key_dma, ctx->enckeylen,
7950 - CLASS_1 | KEY_DEST_CLASS_REG);
7951 - set_jump_tgt_here(desc, key_jump_cmd);
7953 - /* class 1 operation */
7954 - append_operation(desc, ctx->class1_alg_type |
7955 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7957 - /* if assoclen + cryptlen is ZERO, skip to ICV write */
7958 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7959 - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7960 - JUMP_COND_MATH_Z);
7962 - /* if assoclen is ZERO, skip reading the assoc data */
7963 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7964 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7965 - JUMP_COND_MATH_Z);
7967 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7969 - /* skip assoc data */
7970 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7972 - /* cryptlen = seqinlen - assoclen */
7973 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7975 - /* if cryptlen is ZERO jump to zero-payload commands */
7976 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7977 - JUMP_COND_MATH_Z);
7979 - /* read assoc data */
7980 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7981 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7982 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7984 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7986 - /* write encrypted data */
7987 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7989 - /* read payload data */
7990 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7991 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7993 - /* jump the zero-payload commands */
7994 - append_jump(desc, JUMP_TEST_ALL | 2);
7996 - /* zero-payload commands */
7997 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
7999 - /* read assoc data */
8000 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8001 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
8003 - /* There is no input data */
8004 - set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
8007 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8008 - LDST_SRCDST_BYTE_CONTEXT);
8010 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8013 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8014 - dev_err(jrdev, "unable to map shared descriptor\n");
8018 - print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
8019 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8020 - desc_bytes(desc), 1);
8022 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8023 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8024 + desc_bytes(desc), DMA_TO_DEVICE);
8027 * Job Descriptor and Shared Descriptors
8028 * must all fit into the 64-word Descriptor h/w Buffer
8030 - keys_fit_inline = false;
8031 - if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8032 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8033 - keys_fit_inline = true;
8034 + if (rem_bytes >= DESC_GCM_DEC_LEN) {
8035 + ctx->cdata.key_inline = true;
8036 + ctx->cdata.key_virt = ctx->key;
8038 + ctx->cdata.key_inline = false;
8039 + ctx->cdata.key_dma = ctx->key_dma;
8042 desc = ctx->sh_desc_dec;
8044 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8046 - /* skip key loading if they are loaded due to sharing */
8047 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8048 - JUMP_TEST_ALL | JUMP_COND_SHRD |
8050 - if (keys_fit_inline)
8051 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8052 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8054 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8055 - CLASS_1 | KEY_DEST_CLASS_REG);
8056 - set_jump_tgt_here(desc, key_jump_cmd);
8058 - /* class 1 operation */
8059 - append_operation(desc, ctx->class1_alg_type |
8060 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8062 - /* if assoclen is ZERO, skip reading the assoc data */
8063 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8064 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8065 - JUMP_COND_MATH_Z);
8067 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8069 - /* skip assoc data */
8070 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8072 - /* read assoc data */
8073 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8074 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8076 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8078 - /* cryptlen = seqoutlen - assoclen */
8079 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8081 - /* jump to zero-payload command if cryptlen is zero */
8082 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8083 - JUMP_COND_MATH_Z);
8085 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8087 - /* store encrypted data */
8088 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8090 - /* read payload data */
8091 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8092 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8094 - /* zero-payload command */
8095 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
8098 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8099 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8101 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8104 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8105 - dev_err(jrdev, "unable to map shared descriptor\n");
8109 - print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8110 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8111 - desc_bytes(desc), 1);
8113 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8114 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8115 + desc_bytes(desc), DMA_TO_DEVICE);
8119 @@ -985,11 +387,12 @@ static int rfc4106_set_sh_desc(struct cr
8121 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8122 struct device *jrdev = ctx->jrdev;
8123 - bool keys_fit_inline = false;
8124 - u32 *key_jump_cmd;
8125 + unsigned int ivsize = crypto_aead_ivsize(aead);
8127 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8128 + ctx->cdata.keylen;
8130 - if (!ctx->enckeylen || !ctx->authsize)
8131 + if (!ctx->cdata.keylen || !ctx->authsize)
8135 @@ -997,148 +400,37 @@ static int rfc4106_set_sh_desc(struct cr
8136 * Job Descriptor and Shared Descriptor
8137 * must fit into the 64-word Descriptor h/w Buffer
8139 - if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8140 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8141 - keys_fit_inline = true;
8142 + if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8143 + ctx->cdata.key_inline = true;
8144 + ctx->cdata.key_virt = ctx->key;
8146 + ctx->cdata.key_inline = false;
8147 + ctx->cdata.key_dma = ctx->key_dma;
8150 desc = ctx->sh_desc_enc;
8152 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8154 - /* Skip key loading if it is loaded due to sharing */
8155 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8157 - if (keys_fit_inline)
8158 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8159 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8161 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8162 - CLASS_1 | KEY_DEST_CLASS_REG);
8163 - set_jump_tgt_here(desc, key_jump_cmd);
8165 - /* Class 1 operation */
8166 - append_operation(desc, ctx->class1_alg_type |
8167 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8169 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8170 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8172 - /* Read assoc data */
8173 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8174 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8177 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8179 - /* Will read cryptlen bytes */
8180 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8182 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8183 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8185 - /* Skip assoc data */
8186 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8188 - /* cryptlen = seqoutlen - assoclen */
8189 - append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8191 - /* Write encrypted data */
8192 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8194 - /* Read payload data */
8195 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8196 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8199 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8200 - LDST_SRCDST_BYTE_CONTEXT);
8202 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8205 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8206 - dev_err(jrdev, "unable to map shared descriptor\n");
8210 - print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8211 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8212 - desc_bytes(desc), 1);
8214 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8216 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8217 + desc_bytes(desc), DMA_TO_DEVICE);
8220 * Job Descriptor and Shared Descriptors
8221 * must all fit into the 64-word Descriptor h/w Buffer
8223 - keys_fit_inline = false;
8224 - if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8225 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8226 - keys_fit_inline = true;
8227 + if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8228 + ctx->cdata.key_inline = true;
8229 + ctx->cdata.key_virt = ctx->key;
8231 + ctx->cdata.key_inline = false;
8232 + ctx->cdata.key_dma = ctx->key_dma;
8235 desc = ctx->sh_desc_dec;
8237 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8239 - /* Skip key loading if it is loaded due to sharing */
8240 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8241 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8242 - if (keys_fit_inline)
8243 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8244 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8246 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8247 - CLASS_1 | KEY_DEST_CLASS_REG);
8248 - set_jump_tgt_here(desc, key_jump_cmd);
8250 - /* Class 1 operation */
8251 - append_operation(desc, ctx->class1_alg_type |
8252 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8254 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8255 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8257 - /* Read assoc data */
8258 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8259 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8262 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8264 - /* Will read cryptlen bytes */
8265 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8267 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8268 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8270 - /* Skip assoc data */
8271 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8273 - /* Will write cryptlen bytes */
8274 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8276 - /* Store payload data */
8277 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8279 - /* Read encrypted data */
8280 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8281 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8284 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8285 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8287 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8290 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8291 - dev_err(jrdev, "unable to map shared descriptor\n");
8295 - print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8296 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8297 - desc_bytes(desc), 1);
8299 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8301 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8302 + desc_bytes(desc), DMA_TO_DEVICE);
8306 @@ -1158,12 +450,12 @@ static int rfc4543_set_sh_desc(struct cr
8308 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8309 struct device *jrdev = ctx->jrdev;
8310 - bool keys_fit_inline = false;
8311 - u32 *key_jump_cmd;
8312 - u32 *read_move_cmd, *write_move_cmd;
8313 + unsigned int ivsize = crypto_aead_ivsize(aead);
8315 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8316 + ctx->cdata.keylen;
8318 - if (!ctx->enckeylen || !ctx->authsize)
8319 + if (!ctx->cdata.keylen || !ctx->authsize)
8323 @@ -1171,151 +463,37 @@ static int rfc4543_set_sh_desc(struct cr
8324 * Job Descriptor and Shared Descriptor
8325 * must fit into the 64-word Descriptor h/w Buffer
8327 - if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8328 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8329 - keys_fit_inline = true;
8330 + if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8331 + ctx->cdata.key_inline = true;
8332 + ctx->cdata.key_virt = ctx->key;
8334 + ctx->cdata.key_inline = false;
8335 + ctx->cdata.key_dma = ctx->key_dma;
8338 desc = ctx->sh_desc_enc;
8340 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8342 - /* Skip key loading if it is loaded due to sharing */
8343 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8345 - if (keys_fit_inline)
8346 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8347 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8349 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8350 - CLASS_1 | KEY_DEST_CLASS_REG);
8351 - set_jump_tgt_here(desc, key_jump_cmd);
8353 - /* Class 1 operation */
8354 - append_operation(desc, ctx->class1_alg_type |
8355 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8357 - /* assoclen + cryptlen = seqinlen */
8358 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8361 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8362 - * thus need to do some magic, i.e. self-patch the descriptor
8365 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8366 - (0x6 << MOVE_LEN_SHIFT));
8367 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8368 - (0x8 << MOVE_LEN_SHIFT));
8370 - /* Will read assoclen + cryptlen bytes */
8371 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8373 - /* Will write assoclen + cryptlen bytes */
8374 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8376 - /* Read and write assoclen + cryptlen bytes */
8377 - aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8379 - set_move_tgt_here(desc, read_move_cmd);
8380 - set_move_tgt_here(desc, write_move_cmd);
8381 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8382 - /* Move payload data to OFIFO */
8383 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8386 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8387 - LDST_SRCDST_BYTE_CONTEXT);
8389 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8392 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8393 - dev_err(jrdev, "unable to map shared descriptor\n");
8397 - print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8398 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8399 - desc_bytes(desc), 1);
8401 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8403 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8404 + desc_bytes(desc), DMA_TO_DEVICE);
8407 * Job Descriptor and Shared Descriptors
8408 * must all fit into the 64-word Descriptor h/w Buffer
8410 - keys_fit_inline = false;
8411 - if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8412 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8413 - keys_fit_inline = true;
8414 + if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8415 + ctx->cdata.key_inline = true;
8416 + ctx->cdata.key_virt = ctx->key;
8418 + ctx->cdata.key_inline = false;
8419 + ctx->cdata.key_dma = ctx->key_dma;
8422 desc = ctx->sh_desc_dec;
8424 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8426 - /* Skip key loading if it is loaded due to sharing */
8427 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8428 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8429 - if (keys_fit_inline)
8430 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8431 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8433 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8434 - CLASS_1 | KEY_DEST_CLASS_REG);
8435 - set_jump_tgt_here(desc, key_jump_cmd);
8437 - /* Class 1 operation */
8438 - append_operation(desc, ctx->class1_alg_type |
8439 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8441 - /* assoclen + cryptlen = seqoutlen */
8442 - append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8445 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8446 - * thus need to do some magic, i.e. self-patch the descriptor
8449 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8450 - (0x6 << MOVE_LEN_SHIFT));
8451 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8452 - (0x8 << MOVE_LEN_SHIFT));
8454 - /* Will read assoclen + cryptlen bytes */
8455 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8457 - /* Will write assoclen + cryptlen bytes */
8458 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8460 - /* Store payload data */
8461 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8463 - /* In-snoop assoclen + cryptlen data */
8464 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8465 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8467 - set_move_tgt_here(desc, read_move_cmd);
8468 - set_move_tgt_here(desc, write_move_cmd);
8469 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8470 - /* Move payload data to OFIFO */
8471 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8472 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8475 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8476 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8478 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8481 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8482 - dev_err(jrdev, "unable to map shared descriptor\n");
8486 - print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8487 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8488 - desc_bytes(desc), 1);
8490 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8492 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8493 + desc_bytes(desc), DMA_TO_DEVICE);
8497 @@ -1331,74 +509,67 @@ static int rfc4543_setauthsize(struct cr
8501 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8504 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8505 - ctx->split_key_pad_len, key_in, authkeylen,
8509 static int aead_setkey(struct crypto_aead *aead,
8510 const u8 *key, unsigned int keylen)
8512 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8513 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8514 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8515 struct device *jrdev = ctx->jrdev;
8516 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
8517 struct crypto_authenc_keys keys;
8520 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8523 - /* Pick class 2 key length from algorithm submask */
8524 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8525 - OP_ALG_ALGSEL_SHIFT] * 2;
8526 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8528 - if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8532 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8533 keys.authkeylen + keys.enckeylen, keys.enckeylen,
8535 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8536 - ctx->split_key_len, ctx->split_key_pad_len);
8537 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8538 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8541 - ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8543 + * If DKP is supported, use it in the shared descriptor to generate
8546 + if (ctrlpriv->era >= 6) {
8547 + ctx->adata.keylen = keys.authkeylen;
8548 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
8549 + OP_ALG_ALGSEL_MASK);
8551 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8554 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
8555 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
8557 + dma_sync_single_for_device(jrdev, ctx->key_dma,
8558 + ctx->adata.keylen_pad +
8559 + keys.enckeylen, DMA_TO_DEVICE);
8560 + goto skip_split_key;
8563 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8564 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
8570 /* postpend encryption key to auth split key */
8571 - memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8573 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8574 - keys.enckeylen, DMA_TO_DEVICE);
8575 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8576 - dev_err(jrdev, "unable to map key i/o memory\n");
8579 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8580 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8581 + keys.enckeylen, DMA_TO_DEVICE);
8583 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8584 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8585 - ctx->split_key_pad_len + keys.enckeylen, 1);
8586 + ctx->adata.keylen_pad + keys.enckeylen, 1);
8589 - ctx->enckeylen = keys.enckeylen;
8591 - ret = aead_set_sh_desc(aead);
8593 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8594 - keys.enckeylen, DMA_TO_DEVICE);
8599 + ctx->cdata.keylen = keys.enckeylen;
8600 + return aead_set_sh_desc(aead);
8602 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8604 @@ -1409,7 +580,6 @@ static int gcm_setkey(struct crypto_aead
8606 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8607 struct device *jrdev = ctx->jrdev;
8611 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8612 @@ -1417,21 +587,10 @@ static int gcm_setkey(struct crypto_aead
8615 memcpy(ctx->key, key, keylen);
8616 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8618 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8619 - dev_err(jrdev, "unable to map key i/o memory\n");
8622 - ctx->enckeylen = keylen;
8624 - ret = gcm_set_sh_desc(aead);
8626 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8629 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8630 + ctx->cdata.keylen = keylen;
8633 + return gcm_set_sh_desc(aead);
8636 static int rfc4106_setkey(struct crypto_aead *aead,
8637 @@ -1439,7 +598,6 @@ static int rfc4106_setkey(struct crypto_
8639 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8640 struct device *jrdev = ctx->jrdev;
8645 @@ -1455,22 +613,10 @@ static int rfc4106_setkey(struct crypto_
8646 * The last four bytes of the key material are used as the salt value
8647 * in the nonce. Update the AES key length.
8649 - ctx->enckeylen = keylen - 4;
8651 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8653 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8654 - dev_err(jrdev, "unable to map key i/o memory\n");
8658 - ret = rfc4106_set_sh_desc(aead);
8660 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8665 + ctx->cdata.keylen = keylen - 4;
8666 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8668 + return rfc4106_set_sh_desc(aead);
8671 static int rfc4543_setkey(struct crypto_aead *aead,
8672 @@ -1478,7 +624,6 @@ static int rfc4543_setkey(struct crypto_
8674 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8675 struct device *jrdev = ctx->jrdev;
8680 @@ -1494,43 +639,28 @@ static int rfc4543_setkey(struct crypto_
8681 * The last four bytes of the key material are used as the salt value
8682 * in the nonce. Update the AES key length.
8684 - ctx->enckeylen = keylen - 4;
8686 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8688 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8689 - dev_err(jrdev, "unable to map key i/o memory\n");
8693 - ret = rfc4543_set_sh_desc(aead);
8695 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8700 + ctx->cdata.keylen = keylen - 4;
8701 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8703 + return rfc4543_set_sh_desc(aead);
8706 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8707 const u8 *key, unsigned int keylen)
8709 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8710 - struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8711 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8712 const char *alg_name = crypto_tfm_alg_name(tfm);
8713 struct device *jrdev = ctx->jrdev;
8715 - u32 *key_jump_cmd;
8716 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8720 u32 ctx1_iv_off = 0;
8721 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8722 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8723 OP_ALG_AAI_CTR_MOD128);
8724 const bool is_rfc3686 = (ctr_mode &&
8725 (strstr(alg_name, "rfc3686") != NULL));
8727 + memcpy(ctx->key, key, keylen);
8729 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8730 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8731 @@ -1553,215 +683,33 @@ static int ablkcipher_setkey(struct cryp
8732 keylen -= CTR_RFC3686_NONCE_SIZE;
8735 - memcpy(ctx->key, key, keylen);
8736 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8738 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8739 - dev_err(jrdev, "unable to map key i/o memory\n");
8742 - ctx->enckeylen = keylen;
8743 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8744 + ctx->cdata.keylen = keylen;
8745 + ctx->cdata.key_virt = ctx->key;
8746 + ctx->cdata.key_inline = true;
8748 /* ablkcipher_encrypt shared descriptor */
8749 desc = ctx->sh_desc_enc;
8750 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8751 - /* Skip if already shared */
8752 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8755 - /* Load class1 key only */
8756 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8757 - ctx->enckeylen, CLASS_1 |
8758 - KEY_DEST_CLASS_REG);
8759 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8761 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8762 + desc_bytes(desc), DMA_TO_DEVICE);
8764 - /* Load nonce into CONTEXT1 reg */
8766 - nonce = (u8 *)key + keylen;
8767 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8768 - LDST_CLASS_IND_CCB |
8769 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8770 - append_move(desc, MOVE_WAITCOMP |
8771 - MOVE_SRC_OUTFIFO |
8772 - MOVE_DEST_CLASS1CTX |
8773 - (16 << MOVE_OFFSET_SHIFT) |
8774 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8777 - set_jump_tgt_here(desc, key_jump_cmd);
8780 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8781 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8783 - /* Load counter into CONTEXT1 reg */
8785 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8786 - LDST_SRCDST_BYTE_CONTEXT |
8787 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8788 - LDST_OFFSET_SHIFT));
8790 - /* Load operation */
8791 - append_operation(desc, ctx->class1_alg_type |
8792 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8794 - /* Perform operation */
8795 - ablkcipher_append_src_dst(desc);
8797 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8800 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8801 - dev_err(jrdev, "unable to map shared descriptor\n");
8805 - print_hex_dump(KERN_ERR,
8806 - "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8807 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8808 - desc_bytes(desc), 1);
8810 /* ablkcipher_decrypt shared descriptor */
8811 desc = ctx->sh_desc_dec;
8812 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8814 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8815 + desc_bytes(desc), DMA_TO_DEVICE);
8817 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8818 - /* Skip if already shared */
8819 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8822 - /* Load class1 key only */
8823 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8824 - ctx->enckeylen, CLASS_1 |
8825 - KEY_DEST_CLASS_REG);
8827 - /* Load nonce into CONTEXT1 reg */
8829 - nonce = (u8 *)key + keylen;
8830 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8831 - LDST_CLASS_IND_CCB |
8832 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8833 - append_move(desc, MOVE_WAITCOMP |
8834 - MOVE_SRC_OUTFIFO |
8835 - MOVE_DEST_CLASS1CTX |
8836 - (16 << MOVE_OFFSET_SHIFT) |
8837 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8840 - set_jump_tgt_here(desc, key_jump_cmd);
8843 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8844 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8846 - /* Load counter into CONTEXT1 reg */
8848 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8849 - LDST_SRCDST_BYTE_CONTEXT |
8850 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8851 - LDST_OFFSET_SHIFT));
8853 - /* Choose operation */
8855 - append_operation(desc, ctx->class1_alg_type |
8856 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8858 - append_dec_op1(desc, ctx->class1_alg_type);
8860 - /* Perform operation */
8861 - ablkcipher_append_src_dst(desc);
8863 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8866 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8867 - dev_err(jrdev, "unable to map shared descriptor\n");
8872 - print_hex_dump(KERN_ERR,
8873 - "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8874 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8875 - desc_bytes(desc), 1);
8877 /* ablkcipher_givencrypt shared descriptor */
8878 desc = ctx->sh_desc_givenc;
8879 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8881 + dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8882 + desc_bytes(desc), DMA_TO_DEVICE);
8884 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8885 - /* Skip if already shared */
8886 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8889 - /* Load class1 key only */
8890 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8891 - ctx->enckeylen, CLASS_1 |
8892 - KEY_DEST_CLASS_REG);
8894 - /* Load Nonce into CONTEXT1 reg */
8896 - nonce = (u8 *)key + keylen;
8897 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8898 - LDST_CLASS_IND_CCB |
8899 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8900 - append_move(desc, MOVE_WAITCOMP |
8901 - MOVE_SRC_OUTFIFO |
8902 - MOVE_DEST_CLASS1CTX |
8903 - (16 << MOVE_OFFSET_SHIFT) |
8904 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8906 - set_jump_tgt_here(desc, key_jump_cmd);
8909 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8910 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8911 - NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8912 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8913 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8914 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8915 - append_move(desc, MOVE_WAITCOMP |
8917 - MOVE_DEST_CLASS1CTX |
8918 - (crt->ivsize << MOVE_LEN_SHIFT) |
8919 - (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8920 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8922 - /* Copy generated IV to memory */
8923 - append_seq_store(desc, crt->ivsize,
8924 - LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8925 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
8927 - /* Load Counter into CONTEXT1 reg */
8929 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8930 - LDST_SRCDST_BYTE_CONTEXT |
8931 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8932 - LDST_OFFSET_SHIFT));
8935 - append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8936 - (1 << JUMP_OFFSET_SHIFT));
8938 - /* Load operation */
8939 - append_operation(desc, ctx->class1_alg_type |
8940 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8942 - /* Perform operation */
8943 - ablkcipher_append_src_dst(desc);
8945 - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8948 - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8949 - dev_err(jrdev, "unable to map shared descriptor\n");
8953 - print_hex_dump(KERN_ERR,
8954 - "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8955 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8956 - desc_bytes(desc), 1);
8963 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8964 @@ -1769,8 +717,7 @@ static int xts_ablkcipher_setkey(struct
8966 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8967 struct device *jrdev = ctx->jrdev;
8968 - u32 *key_jump_cmd, *desc;
8969 - __be64 sector_size = cpu_to_be64(512);
8972 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
8973 crypto_ablkcipher_set_flags(ablkcipher,
8974 @@ -1780,126 +727,38 @@ static int xts_ablkcipher_setkey(struct
8977 memcpy(ctx->key, key, keylen);
8978 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8979 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8980 - dev_err(jrdev, "unable to map key i/o memory\n");
8983 - ctx->enckeylen = keylen;
8984 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8985 + ctx->cdata.keylen = keylen;
8986 + ctx->cdata.key_virt = ctx->key;
8987 + ctx->cdata.key_inline = true;
8989 /* xts_ablkcipher_encrypt shared descriptor */
8990 desc = ctx->sh_desc_enc;
8991 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8992 - /* Skip if already shared */
8993 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8996 - /* Load class1 keys only */
8997 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8998 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
9000 - /* Load sector size with index 40 bytes (0x28) */
9001 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
9002 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
9003 - append_data(desc, (void *)§or_size, 8);
9005 - set_jump_tgt_here(desc, key_jump_cmd);
9008 - * create sequence for loading the sector index
9009 - * Upper 8B of IV - will be used as sector index
9010 - * Lower 8B of IV - will be discarded
9012 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9013 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9014 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9016 - /* Load operation */
9017 - append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
9020 - /* Perform operation */
9021 - ablkcipher_append_src_dst(desc);
9023 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9025 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
9026 - dev_err(jrdev, "unable to map shared descriptor\n");
9030 - print_hex_dump(KERN_ERR,
9031 - "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
9032 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9034 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
9035 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
9036 + desc_bytes(desc), DMA_TO_DEVICE);
9038 /* xts_ablkcipher_decrypt shared descriptor */
9039 desc = ctx->sh_desc_dec;
9041 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
9042 - /* Skip if already shared */
9043 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
9046 - /* Load class1 key only */
9047 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
9048 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
9050 - /* Load sector size with index 40 bytes (0x28) */
9051 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
9052 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
9053 - append_data(desc, (void *)§or_size, 8);
9055 - set_jump_tgt_here(desc, key_jump_cmd);
9058 - * create sequence for loading the sector index
9059 - * Upper 8B of IV - will be used as sector index
9060 - * Lower 8B of IV - will be discarded
9062 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9063 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9064 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9066 - /* Load operation */
9067 - append_dec_op1(desc, ctx->class1_alg_type);
9069 - /* Perform operation */
9070 - ablkcipher_append_src_dst(desc);
9072 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9074 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9075 - dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9076 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9077 - dev_err(jrdev, "unable to map shared descriptor\n");
9081 - print_hex_dump(KERN_ERR,
9082 - "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9083 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9085 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9086 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9087 + desc_bytes(desc), DMA_TO_DEVICE);
9093 * aead_edesc - s/w-extended aead descriptor
9094 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9095 - * @src_nents: number of segments in input scatterlist
9096 - * @dst_nents: number of segments in output scatterlist
9097 - * @iv_dma: dma address of iv for checking continuity and link table
9098 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9099 + * @src_nents: number of segments in input s/w scatterlist
9100 + * @dst_nents: number of segments in output s/w scatterlist
9101 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9102 * @sec4_sg_dma: bus physical mapped address of h/w link table
9103 + * @sec4_sg: pointer to h/w link table
9104 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9110 - dma_addr_t iv_dma;
9112 dma_addr_t sec4_sg_dma;
9113 struct sec4_sg_entry *sec4_sg;
9114 @@ -1908,12 +767,12 @@ struct aead_edesc {
9117 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9118 - * @src_nents: number of segments in input scatterlist
9119 - * @dst_nents: number of segments in output scatterlist
9120 + * @src_nents: number of segments in input s/w scatterlist
9121 + * @dst_nents: number of segments in output s/w scatterlist
9122 * @iv_dma: dma address of iv for checking continuity and link table
9123 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9124 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9125 * @sec4_sg_dma: bus physical mapped address of h/w link table
9126 + * @sec4_sg: pointer to h/w link table
9127 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9129 struct ablkcipher_edesc {
9130 @@ -1933,10 +792,11 @@ static void caam_unmap(struct device *de
9134 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9135 - dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9137 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9138 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9140 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9141 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9145 @@ -2031,8 +891,7 @@ static void ablkcipher_encrypt_done(stru
9146 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9149 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9150 - offsetof(struct ablkcipher_edesc, hw_desc));
9151 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9154 caam_jr_strstatus(jrdev, err);
9155 @@ -2041,10 +900,10 @@ static void ablkcipher_encrypt_done(stru
9156 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9157 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9158 edesc->src_nents > 1 ? 100 : ivsize, 1);
9159 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9160 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9161 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9163 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9164 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9165 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9167 ablkcipher_unmap(jrdev, edesc, req);
9169 @@ -2074,8 +933,7 @@ static void ablkcipher_decrypt_done(stru
9170 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9173 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9174 - offsetof(struct ablkcipher_edesc, hw_desc));
9175 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9177 caam_jr_strstatus(jrdev, err);
9179 @@ -2083,10 +941,10 @@ static void ablkcipher_decrypt_done(stru
9180 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9181 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9183 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9184 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9185 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9187 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9188 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9189 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9191 ablkcipher_unmap(jrdev, edesc, req);
9193 @@ -2127,7 +985,7 @@ static void init_aead_job(struct aead_re
9194 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9197 - src_dma = sg_dma_address(req->src);
9198 + src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9201 src_dma = edesc->sec4_sg_dma;
9202 @@ -2142,7 +1000,7 @@ static void init_aead_job(struct aead_re
9203 out_options = in_options;
9205 if (unlikely(req->src != req->dst)) {
9206 - if (!edesc->dst_nents) {
9207 + if (edesc->dst_nents == 1) {
9208 dst_dma = sg_dma_address(req->dst);
9211 @@ -2161,9 +1019,6 @@ static void init_aead_job(struct aead_re
9212 append_seq_out_ptr(desc, dst_dma,
9213 req->assoclen + req->cryptlen - authsize,
9216 - /* REG3 = assoclen */
9217 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9220 static void init_gcm_job(struct aead_request *req,
9221 @@ -2178,6 +1033,7 @@ static void init_gcm_job(struct aead_req
9224 init_aead_job(req, edesc, all_contig, encrypt);
9225 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9227 /* BUG This should not be specific to generic GCM. */
9229 @@ -2189,7 +1045,7 @@ static void init_gcm_job(struct aead_req
9230 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9233 - append_data(desc, ctx->key + ctx->enckeylen, 4);
9234 + append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9236 append_data(desc, req->iv, ivsize);
9237 /* End of blank commands */
9238 @@ -2204,7 +1060,8 @@ static void init_authenc_job(struct aead
9239 struct caam_aead_alg, aead);
9240 unsigned int ivsize = crypto_aead_ivsize(aead);
9241 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9242 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9243 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
9244 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9245 OP_ALG_AAI_CTR_MOD128);
9246 const bool is_rfc3686 = alg->caam.rfc3686;
9247 u32 *desc = edesc->hw_desc;
9248 @@ -2227,6 +1084,15 @@ static void init_authenc_job(struct aead
9250 init_aead_job(req, edesc, all_contig, encrypt);
9253 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
9254 + * having DPOVRD as destination.
9256 + if (ctrlpriv->era < 3)
9257 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9259 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
9261 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
9262 append_load_as_imm(desc, req->iv, ivsize,
9264 @@ -2250,16 +1116,15 @@ static void init_ablkcipher_job(u32 *sh_
9265 int len, sec4_sg_index = 0;
9268 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9269 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9270 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9271 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9273 - printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9274 - dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
9275 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9276 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9277 + pr_err("asked=%d, nbytes%d\n",
9278 + (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9280 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
9281 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9282 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9284 len = desc_len(sh_desc);
9285 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9286 @@ -2275,7 +1140,7 @@ static void init_ablkcipher_job(u32 *sh_
9287 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9289 if (likely(req->src == req->dst)) {
9290 - if (!edesc->src_nents && iv_contig) {
9291 + if (edesc->src_nents == 1 && iv_contig) {
9292 dst_dma = sg_dma_address(req->src);
9294 dst_dma = edesc->sec4_sg_dma +
9295 @@ -2283,7 +1148,7 @@ static void init_ablkcipher_job(u32 *sh_
9296 out_options = LDST_SGF;
9299 - if (!edesc->dst_nents) {
9300 + if (edesc->dst_nents == 1) {
9301 dst_dma = sg_dma_address(req->dst);
9303 dst_dma = edesc->sec4_sg_dma +
9304 @@ -2310,20 +1175,18 @@ static void init_ablkcipher_giv_job(u32
9305 int len, sec4_sg_index = 0;
9308 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9309 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9310 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9311 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9313 - dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9314 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9315 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9317 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9318 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9319 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9321 len = desc_len(sh_desc);
9322 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9324 - if (!edesc->src_nents) {
9325 + if (edesc->src_nents == 1) {
9326 src_dma = sg_dma_address(req->src);
9329 @@ -2354,87 +1217,100 @@ static struct aead_edesc *aead_edesc_all
9330 struct crypto_aead *aead = crypto_aead_reqtfm(req);
9331 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9332 struct device *jrdev = ctx->jrdev;
9333 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9334 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9335 - int src_nents, dst_nents = 0;
9336 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9337 + GFP_KERNEL : GFP_ATOMIC;
9338 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9339 struct aead_edesc *edesc;
9341 - bool all_contig = true;
9342 - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9343 + int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9344 unsigned int authsize = ctx->authsize;
9346 if (unlikely(req->dst != req->src)) {
9347 - src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9348 - dst_nents = sg_count(req->dst,
9349 - req->assoclen + req->cryptlen +
9350 - (encrypt ? authsize : (-authsize)));
9352 - src_nents = sg_count(req->src,
9353 - req->assoclen + req->cryptlen +
9354 - (encrypt ? authsize : 0));
9357 - /* Check if data are contiguous. */
9358 - all_contig = !src_nents;
9359 - if (!all_contig) {
9360 - src_nents = src_nents ? : 1;
9361 - sec4_sg_len = src_nents;
9364 - sec4_sg_len += dst_nents;
9366 - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9367 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9369 + if (unlikely(src_nents < 0)) {
9370 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9371 + req->assoclen + req->cryptlen);
9372 + return ERR_PTR(src_nents);
9375 - /* allocate space for base edesc and hw desc commands, link tables */
9376 - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9379 - dev_err(jrdev, "could not allocate extended descriptor\n");
9380 - return ERR_PTR(-ENOMEM);
9381 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9383 + (encrypt ? authsize :
9385 + if (unlikely(dst_nents < 0)) {
9386 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9387 + req->assoclen + req->cryptlen +
9388 + (encrypt ? authsize : (-authsize)));
9389 + return ERR_PTR(dst_nents);
9392 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9394 + (encrypt ? authsize : 0));
9395 + if (unlikely(src_nents < 0)) {
9396 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9397 + req->assoclen + req->cryptlen +
9398 + (encrypt ? authsize : 0));
9399 + return ERR_PTR(src_nents);
9403 if (likely(req->src == req->dst)) {
9404 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9405 - DMA_BIDIRECTIONAL);
9406 - if (unlikely(!sgc)) {
9407 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9408 + DMA_BIDIRECTIONAL);
9409 + if (unlikely(!mapped_src_nents)) {
9410 dev_err(jrdev, "unable to map source\n");
9412 return ERR_PTR(-ENOMEM);
9415 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9417 - if (unlikely(!sgc)) {
9418 - dev_err(jrdev, "unable to map source\n");
9420 - return ERR_PTR(-ENOMEM);
9421 + /* Cover also the case of null (zero length) input data */
9423 + mapped_src_nents = dma_map_sg(jrdev, req->src,
9424 + src_nents, DMA_TO_DEVICE);
9425 + if (unlikely(!mapped_src_nents)) {
9426 + dev_err(jrdev, "unable to map source\n");
9427 + return ERR_PTR(-ENOMEM);
9430 + mapped_src_nents = 0;
9433 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9435 - if (unlikely(!sgc)) {
9436 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9438 + if (unlikely(!mapped_dst_nents)) {
9439 dev_err(jrdev, "unable to map destination\n");
9440 - dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9443 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9444 return ERR_PTR(-ENOMEM);
9448 + sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9449 + sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9450 + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9452 + /* allocate space for base edesc and hw desc commands, link tables */
9453 + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9456 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9458 + return ERR_PTR(-ENOMEM);
9461 edesc->src_nents = src_nents;
9462 edesc->dst_nents = dst_nents;
9463 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9465 - *all_contig_ptr = all_contig;
9466 + *all_contig_ptr = !(mapped_src_nents > 1);
9469 - if (!all_contig) {
9470 - sg_to_sec4_sg_last(req->src, src_nents,
9471 - edesc->sec4_sg + sec4_sg_index, 0);
9472 - sec4_sg_index += src_nents;
9473 + if (mapped_src_nents > 1) {
9474 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9475 + edesc->sec4_sg + sec4_sg_index, 0);
9476 + sec4_sg_index += mapped_src_nents;
9479 - sg_to_sec4_sg_last(req->dst, dst_nents,
9480 + if (mapped_dst_nents > 1) {
9481 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9482 edesc->sec4_sg + sec4_sg_index, 0);
9485 @@ -2587,13 +1463,9 @@ static int aead_decrypt(struct aead_requ
9490 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9491 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9492 - dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9493 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9494 - req->assoclen + req->cryptlen, 1, may_sleep);
9496 + caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9497 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9498 + req->assoclen + req->cryptlen, 1);
9500 /* allocate extended descriptor */
9501 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9502 @@ -2633,51 +1505,80 @@ static struct ablkcipher_edesc *ablkciph
9503 struct device *jrdev = ctx->jrdev;
9504 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9505 GFP_KERNEL : GFP_ATOMIC;
9506 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9507 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9508 struct ablkcipher_edesc *edesc;
9509 dma_addr_t iv_dma = 0;
9510 - bool iv_contig = false;
9513 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9514 - int sec4_sg_index;
9515 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9517 - src_nents = sg_count(req->src, req->nbytes);
9518 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9519 + if (unlikely(src_nents < 0)) {
9520 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9522 + return ERR_PTR(src_nents);
9525 - if (req->dst != req->src)
9526 - dst_nents = sg_count(req->dst, req->nbytes);
9527 + if (req->dst != req->src) {
9528 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9529 + if (unlikely(dst_nents < 0)) {
9530 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9532 + return ERR_PTR(dst_nents);
9536 if (likely(req->src == req->dst)) {
9537 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9538 - DMA_BIDIRECTIONAL);
9539 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9540 + DMA_BIDIRECTIONAL);
9541 + if (unlikely(!mapped_src_nents)) {
9542 + dev_err(jrdev, "unable to map source\n");
9543 + return ERR_PTR(-ENOMEM);
9546 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9548 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9550 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9552 + if (unlikely(!mapped_src_nents)) {
9553 + dev_err(jrdev, "unable to map source\n");
9554 + return ERR_PTR(-ENOMEM);
9557 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9559 + if (unlikely(!mapped_dst_nents)) {
9560 + dev_err(jrdev, "unable to map destination\n");
9561 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9562 + return ERR_PTR(-ENOMEM);
9566 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9567 if (dma_mapping_error(jrdev, iv_dma)) {
9568 dev_err(jrdev, "unable to map IV\n");
9569 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9571 return ERR_PTR(-ENOMEM);
9575 - * Check if iv can be contiguous with source and destination.
9576 - * If so, include it. If not, create scatterlist.
9578 - if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9581 - src_nents = src_nents ? : 1;
9582 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9583 - sizeof(struct sec4_sg_entry);
9584 + if (mapped_src_nents == 1 &&
9585 + iv_dma + ivsize == sg_dma_address(req->src)) {
9589 + in_contig = false;
9590 + sec4_sg_ents = 1 + mapped_src_nents;
9592 + dst_sg_idx = sec4_sg_ents;
9593 + sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9594 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9596 /* allocate space for base edesc and hw desc commands, link tables */
9597 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9600 dev_err(jrdev, "could not allocate extended descriptor\n");
9601 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9602 + iv_dma, ivsize, 0, 0);
9603 return ERR_PTR(-ENOMEM);
9606 @@ -2687,23 +1588,24 @@ static struct ablkcipher_edesc *ablkciph
9607 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9610 - sec4_sg_index = 0;
9613 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9614 - sg_to_sec4_sg_last(req->src, src_nents,
9615 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9616 edesc->sec4_sg + 1, 0);
9617 - sec4_sg_index += 1 + src_nents;
9621 - sg_to_sec4_sg_last(req->dst, dst_nents,
9622 - edesc->sec4_sg + sec4_sg_index, 0);
9623 + if (mapped_dst_nents > 1) {
9624 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9625 + edesc->sec4_sg + dst_sg_idx, 0);
9628 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9629 sec4_sg_bytes, DMA_TO_DEVICE);
9630 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9631 dev_err(jrdev, "unable to map S/G table\n");
9632 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9633 + iv_dma, ivsize, 0, 0);
9635 return ERR_PTR(-ENOMEM);
9638 @@ -2715,7 +1617,7 @@ static struct ablkcipher_edesc *ablkciph
9642 - *iv_contig_out = iv_contig;
9643 + *iv_contig_out = in_contig;
9647 @@ -2806,30 +1708,54 @@ static struct ablkcipher_edesc *ablkciph
9648 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9649 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9650 struct device *jrdev = ctx->jrdev;
9651 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9652 - CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9653 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9654 GFP_KERNEL : GFP_ATOMIC;
9655 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9656 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9657 struct ablkcipher_edesc *edesc;
9658 dma_addr_t iv_dma = 0;
9659 - bool iv_contig = false;
9662 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9663 - int sec4_sg_index;
9665 - src_nents = sg_count(req->src, req->nbytes);
9666 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9668 - if (unlikely(req->dst != req->src))
9669 - dst_nents = sg_count(req->dst, req->nbytes);
9670 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9671 + if (unlikely(src_nents < 0)) {
9672 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9674 + return ERR_PTR(src_nents);
9677 if (likely(req->src == req->dst)) {
9678 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9679 - DMA_BIDIRECTIONAL);
9680 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9681 + DMA_BIDIRECTIONAL);
9682 + if (unlikely(!mapped_src_nents)) {
9683 + dev_err(jrdev, "unable to map source\n");
9684 + return ERR_PTR(-ENOMEM);
9687 + dst_nents = src_nents;
9688 + mapped_dst_nents = src_nents;
9690 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9692 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9694 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9696 + if (unlikely(!mapped_src_nents)) {
9697 + dev_err(jrdev, "unable to map source\n");
9698 + return ERR_PTR(-ENOMEM);
9701 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9702 + if (unlikely(dst_nents < 0)) {
9703 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9705 + return ERR_PTR(dst_nents);
9708 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9710 + if (unlikely(!mapped_dst_nents)) {
9711 + dev_err(jrdev, "unable to map destination\n");
9712 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9713 + return ERR_PTR(-ENOMEM);
9718 @@ -2839,21 +1765,29 @@ static struct ablkcipher_edesc *ablkciph
9719 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9720 if (dma_mapping_error(jrdev, iv_dma)) {
9721 dev_err(jrdev, "unable to map IV\n");
9722 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9724 return ERR_PTR(-ENOMEM);
9727 - if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9730 - dst_nents = dst_nents ? : 1;
9731 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9732 - sizeof(struct sec4_sg_entry);
9733 + sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9734 + dst_sg_idx = sec4_sg_ents;
9735 + if (mapped_dst_nents == 1 &&
9736 + iv_dma + ivsize == sg_dma_address(req->dst)) {
9737 + out_contig = true;
9739 + out_contig = false;
9740 + sec4_sg_ents += 1 + mapped_dst_nents;
9743 /* allocate space for base edesc and hw desc commands, link tables */
9744 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9745 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9748 dev_err(jrdev, "could not allocate extended descriptor\n");
9749 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9750 + iv_dma, ivsize, 0, 0);
9751 return ERR_PTR(-ENOMEM);
9754 @@ -2863,24 +1797,24 @@ static struct ablkcipher_edesc *ablkciph
9755 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9758 - sec4_sg_index = 0;
9760 - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9761 - sec4_sg_index += src_nents;
9763 + if (mapped_src_nents > 1)
9764 + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9768 - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9769 + if (!out_contig) {
9770 + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9772 - sec4_sg_index += 1;
9773 - sg_to_sec4_sg_last(req->dst, dst_nents,
9774 - edesc->sec4_sg + sec4_sg_index, 0);
9775 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9776 + edesc->sec4_sg + dst_sg_idx + 1, 0);
9779 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9780 sec4_sg_bytes, DMA_TO_DEVICE);
9781 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9782 dev_err(jrdev, "unable to map S/G table\n");
9783 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9784 + iv_dma, ivsize, 0, 0);
9786 return ERR_PTR(-ENOMEM);
9788 edesc->iv_dma = iv_dma;
9789 @@ -2892,7 +1826,7 @@ static struct ablkcipher_edesc *ablkciph
9793 - *iv_contig_out = iv_contig;
9794 + *iv_contig_out = out_contig;
9798 @@ -2903,7 +1837,7 @@ static int ablkcipher_givencrypt(struct
9799 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9800 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9801 struct device *jrdev = ctx->jrdev;
9803 + bool iv_contig = false;
9807 @@ -2947,7 +1881,6 @@ struct caam_alg_template {
9809 u32 class1_alg_type;
9810 u32 class2_alg_type;
9814 static struct caam_alg_template driver_algs[] = {
9815 @@ -3132,7 +2065,6 @@ static struct caam_aead_alg driver_aeads
9817 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9818 OP_ALG_AAI_HMAC_PRECOMP,
9819 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9823 @@ -3154,7 +2086,6 @@ static struct caam_aead_alg driver_aeads
9825 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9826 OP_ALG_AAI_HMAC_PRECOMP,
9827 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9831 @@ -3176,7 +2107,6 @@ static struct caam_aead_alg driver_aeads
9833 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9834 OP_ALG_AAI_HMAC_PRECOMP,
9835 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9839 @@ -3198,7 +2128,6 @@ static struct caam_aead_alg driver_aeads
9841 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9842 OP_ALG_AAI_HMAC_PRECOMP,
9843 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9847 @@ -3220,7 +2149,6 @@ static struct caam_aead_alg driver_aeads
9849 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9850 OP_ALG_AAI_HMAC_PRECOMP,
9851 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9855 @@ -3242,7 +2170,6 @@ static struct caam_aead_alg driver_aeads
9857 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9858 OP_ALG_AAI_HMAC_PRECOMP,
9859 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9863 @@ -3264,7 +2191,6 @@ static struct caam_aead_alg driver_aeads
9864 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9865 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9866 OP_ALG_AAI_HMAC_PRECOMP,
9867 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9871 @@ -3287,7 +2213,6 @@ static struct caam_aead_alg driver_aeads
9872 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9873 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9874 OP_ALG_AAI_HMAC_PRECOMP,
9875 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9879 @@ -3310,7 +2235,6 @@ static struct caam_aead_alg driver_aeads
9880 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9881 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9882 OP_ALG_AAI_HMAC_PRECOMP,
9883 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9887 @@ -3333,7 +2257,6 @@ static struct caam_aead_alg driver_aeads
9888 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9889 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9890 OP_ALG_AAI_HMAC_PRECOMP,
9891 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9895 @@ -3356,7 +2279,6 @@ static struct caam_aead_alg driver_aeads
9896 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9897 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9898 OP_ALG_AAI_HMAC_PRECOMP,
9899 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9903 @@ -3379,7 +2301,6 @@ static struct caam_aead_alg driver_aeads
9904 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9905 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9906 OP_ALG_AAI_HMAC_PRECOMP,
9907 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9911 @@ -3402,7 +2323,6 @@ static struct caam_aead_alg driver_aeads
9912 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9913 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9914 OP_ALG_AAI_HMAC_PRECOMP,
9915 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9919 @@ -3425,7 +2345,6 @@ static struct caam_aead_alg driver_aeads
9920 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9921 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9922 OP_ALG_AAI_HMAC_PRECOMP,
9923 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9927 @@ -3448,7 +2367,6 @@ static struct caam_aead_alg driver_aeads
9928 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9929 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9930 OP_ALG_AAI_HMAC_PRECOMP,
9931 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9935 @@ -3471,7 +2389,6 @@ static struct caam_aead_alg driver_aeads
9936 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9937 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9938 OP_ALG_AAI_HMAC_PRECOMP,
9939 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9943 @@ -3494,7 +2411,6 @@ static struct caam_aead_alg driver_aeads
9944 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9945 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9946 OP_ALG_AAI_HMAC_PRECOMP,
9947 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9951 @@ -3517,7 +2433,6 @@ static struct caam_aead_alg driver_aeads
9952 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9953 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9954 OP_ALG_AAI_HMAC_PRECOMP,
9955 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9959 @@ -3540,7 +2455,6 @@ static struct caam_aead_alg driver_aeads
9960 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9961 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9962 OP_ALG_AAI_HMAC_PRECOMP,
9963 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9967 @@ -3563,7 +2477,6 @@ static struct caam_aead_alg driver_aeads
9968 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9969 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9970 OP_ALG_AAI_HMAC_PRECOMP,
9971 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9975 @@ -3587,7 +2500,6 @@ static struct caam_aead_alg driver_aeads
9976 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9977 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9978 OP_ALG_AAI_HMAC_PRECOMP,
9979 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9983 @@ -3611,7 +2523,6 @@ static struct caam_aead_alg driver_aeads
9984 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9985 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9986 OP_ALG_AAI_HMAC_PRECOMP,
9987 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9991 @@ -3635,7 +2546,6 @@ static struct caam_aead_alg driver_aeads
9992 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9993 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9994 OP_ALG_AAI_HMAC_PRECOMP,
9995 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9999 @@ -3659,7 +2569,6 @@ static struct caam_aead_alg driver_aeads
10000 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10001 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10002 OP_ALG_AAI_HMAC_PRECOMP,
10003 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10007 @@ -3683,7 +2592,6 @@ static struct caam_aead_alg driver_aeads
10008 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10009 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10010 OP_ALG_AAI_HMAC_PRECOMP,
10011 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10015 @@ -3707,7 +2615,6 @@ static struct caam_aead_alg driver_aeads
10016 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10017 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10018 OP_ALG_AAI_HMAC_PRECOMP,
10019 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10023 @@ -3731,7 +2638,6 @@ static struct caam_aead_alg driver_aeads
10024 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10025 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10026 OP_ALG_AAI_HMAC_PRECOMP,
10027 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10031 @@ -3755,7 +2661,6 @@ static struct caam_aead_alg driver_aeads
10032 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10033 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10034 OP_ALG_AAI_HMAC_PRECOMP,
10035 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10039 @@ -3779,7 +2684,6 @@ static struct caam_aead_alg driver_aeads
10040 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10041 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10042 OP_ALG_AAI_HMAC_PRECOMP,
10043 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10047 @@ -3803,7 +2707,6 @@ static struct caam_aead_alg driver_aeads
10048 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10049 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10050 OP_ALG_AAI_HMAC_PRECOMP,
10051 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10055 @@ -3826,7 +2729,6 @@ static struct caam_aead_alg driver_aeads
10056 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10057 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10058 OP_ALG_AAI_HMAC_PRECOMP,
10059 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10063 @@ -3849,7 +2751,6 @@ static struct caam_aead_alg driver_aeads
10064 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10065 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10066 OP_ALG_AAI_HMAC_PRECOMP,
10067 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10071 @@ -3872,7 +2773,6 @@ static struct caam_aead_alg driver_aeads
10072 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10073 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10074 OP_ALG_AAI_HMAC_PRECOMP,
10075 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10079 @@ -3895,7 +2795,6 @@ static struct caam_aead_alg driver_aeads
10080 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10081 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10082 OP_ALG_AAI_HMAC_PRECOMP,
10083 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10087 @@ -3918,7 +2817,6 @@ static struct caam_aead_alg driver_aeads
10088 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10089 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10090 OP_ALG_AAI_HMAC_PRECOMP,
10091 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10095 @@ -3941,7 +2839,6 @@ static struct caam_aead_alg driver_aeads
10096 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10097 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10098 OP_ALG_AAI_HMAC_PRECOMP,
10099 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10103 @@ -3964,7 +2861,6 @@ static struct caam_aead_alg driver_aeads
10104 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10105 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10106 OP_ALG_AAI_HMAC_PRECOMP,
10107 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10111 @@ -3987,7 +2883,6 @@ static struct caam_aead_alg driver_aeads
10112 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10113 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10114 OP_ALG_AAI_HMAC_PRECOMP,
10115 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10119 @@ -4010,7 +2905,6 @@ static struct caam_aead_alg driver_aeads
10120 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10121 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10122 OP_ALG_AAI_HMAC_PRECOMP,
10123 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10127 @@ -4033,7 +2927,6 @@ static struct caam_aead_alg driver_aeads
10128 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10129 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10130 OP_ALG_AAI_HMAC_PRECOMP,
10131 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10135 @@ -4056,7 +2949,6 @@ static struct caam_aead_alg driver_aeads
10136 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10137 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10138 OP_ALG_AAI_HMAC_PRECOMP,
10139 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10143 @@ -4079,7 +2971,6 @@ static struct caam_aead_alg driver_aeads
10144 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10145 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10146 OP_ALG_AAI_HMAC_PRECOMP,
10147 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10151 @@ -4104,7 +2995,6 @@ static struct caam_aead_alg driver_aeads
10152 OP_ALG_AAI_CTR_MOD128,
10153 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10154 OP_ALG_AAI_HMAC_PRECOMP,
10155 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10159 @@ -4129,7 +3019,6 @@ static struct caam_aead_alg driver_aeads
10160 OP_ALG_AAI_CTR_MOD128,
10161 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10162 OP_ALG_AAI_HMAC_PRECOMP,
10163 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10167 @@ -4155,7 +3044,6 @@ static struct caam_aead_alg driver_aeads
10168 OP_ALG_AAI_CTR_MOD128,
10169 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10170 OP_ALG_AAI_HMAC_PRECOMP,
10171 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10175 @@ -4180,7 +3068,6 @@ static struct caam_aead_alg driver_aeads
10176 OP_ALG_AAI_CTR_MOD128,
10177 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10178 OP_ALG_AAI_HMAC_PRECOMP,
10179 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10183 @@ -4206,7 +3093,6 @@ static struct caam_aead_alg driver_aeads
10184 OP_ALG_AAI_CTR_MOD128,
10185 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10186 OP_ALG_AAI_HMAC_PRECOMP,
10187 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10191 @@ -4231,7 +3117,6 @@ static struct caam_aead_alg driver_aeads
10192 OP_ALG_AAI_CTR_MOD128,
10193 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10194 OP_ALG_AAI_HMAC_PRECOMP,
10195 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10199 @@ -4257,7 +3142,6 @@ static struct caam_aead_alg driver_aeads
10200 OP_ALG_AAI_CTR_MOD128,
10201 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10202 OP_ALG_AAI_HMAC_PRECOMP,
10203 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10207 @@ -4282,7 +3166,6 @@ static struct caam_aead_alg driver_aeads
10208 OP_ALG_AAI_CTR_MOD128,
10209 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10210 OP_ALG_AAI_HMAC_PRECOMP,
10211 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10215 @@ -4308,7 +3191,6 @@ static struct caam_aead_alg driver_aeads
10216 OP_ALG_AAI_CTR_MOD128,
10217 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10218 OP_ALG_AAI_HMAC_PRECOMP,
10219 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10223 @@ -4333,7 +3215,6 @@ static struct caam_aead_alg driver_aeads
10224 OP_ALG_AAI_CTR_MOD128,
10225 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10226 OP_ALG_AAI_HMAC_PRECOMP,
10227 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10231 @@ -4359,7 +3240,6 @@ static struct caam_aead_alg driver_aeads
10232 OP_ALG_AAI_CTR_MOD128,
10233 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10234 OP_ALG_AAI_HMAC_PRECOMP,
10235 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10239 @@ -4384,7 +3264,6 @@ static struct caam_aead_alg driver_aeads
10240 OP_ALG_AAI_CTR_MOD128,
10241 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10242 OP_ALG_AAI_HMAC_PRECOMP,
10243 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10247 @@ -4399,16 +3278,34 @@ struct caam_crypto_alg {
10249 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10251 + dma_addr_t dma_addr;
10253 ctx->jrdev = caam_jr_alloc();
10254 if (IS_ERR(ctx->jrdev)) {
10255 pr_err("Job Ring Device allocation for transform failed\n");
10256 return PTR_ERR(ctx->jrdev);
10259 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10260 + offsetof(struct caam_ctx,
10261 + sh_desc_enc_dma),
10262 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10263 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10264 + dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10265 + caam_jr_free(ctx->jrdev);
10269 + ctx->sh_desc_enc_dma = dma_addr;
10270 + ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10272 + ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10274 + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10276 /* copy descriptor header template value */
10277 - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10278 - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10279 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10280 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10281 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10285 @@ -4435,25 +3332,9 @@ static int caam_aead_init(struct crypto_
10287 static void caam_exit_common(struct caam_ctx *ctx)
10289 - if (ctx->sh_desc_enc_dma &&
10290 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10291 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10292 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10293 - if (ctx->sh_desc_dec_dma &&
10294 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10295 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10296 - desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10297 - if (ctx->sh_desc_givenc_dma &&
10298 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10299 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10300 - desc_bytes(ctx->sh_desc_givenc),
10302 - if (ctx->key_dma &&
10303 - !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10304 - dma_unmap_single(ctx->jrdev, ctx->key_dma,
10305 - ctx->enckeylen + ctx->split_key_pad_len,
10308 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10309 + offsetof(struct caam_ctx, sh_desc_enc_dma),
10310 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10311 caam_jr_free(ctx->jrdev);
10314 @@ -4529,7 +3410,6 @@ static struct caam_crypto_alg *caam_alg_
10316 t_alg->caam.class1_alg_type = template->class1_alg_type;
10317 t_alg->caam.class2_alg_type = template->class2_alg_type;
10318 - t_alg->caam.alg_op = template->alg_op;
10323 +++ b/drivers/crypto/caam/caamalg_desc.c
10326 + * Shared descriptors for aead, ablkcipher algorithms
10328 + * Copyright 2016 NXP
10331 +#include "compat.h"
10332 +#include "desc_constr.h"
10333 +#include "caamalg_desc.h"
10336 + * For aead functions, read payload and write payload,
10337 + * both of which are specified in req->src and req->dst
10339 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10341 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10342 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10343 + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10346 +/* Set DK bit in class 1 operation if shared */
10347 +static inline void append_dec_op1(u32 *desc, u32 type)
10349 + u32 *jump_cmd, *uncond_jump_cmd;
10351 + /* DK bit is valid only for AES */
10352 + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10353 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10358 + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10359 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10361 + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10362 + set_jump_tgt_here(desc, jump_cmd);
10363 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10364 + OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10365 + set_jump_tgt_here(desc, uncond_jump_cmd);
10369 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10370 + * (non-protocol) with no (null) encryption.
10371 + * @desc: pointer to buffer used for descriptor construction
10372 + * @adata: pointer to authentication transform definitions.
10373 + * A split key is required for SEC Era < 6; the size of the split key
10374 + * is specified in this case. Valid algorithm values - one of
10375 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10376 + * with OP_ALG_AAI_HMAC_PRECOMP.
10377 + * @icvsize: integrity check value (ICV) size (truncated or full)
10380 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10381 + unsigned int icvsize, int era)
10383 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10385 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10387 + /* Skip if already shared */
10388 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10391 + if (adata->key_inline)
10392 + append_key_as_imm(desc, adata->key_virt,
10393 + adata->keylen_pad, adata->keylen,
10394 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10397 + append_key(desc, adata->key_dma, adata->keylen,
10398 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10400 + append_proto_dkp(desc, adata);
10402 + set_jump_tgt_here(desc, key_jump_cmd);
10404 + /* assoclen + cryptlen = seqinlen */
10405 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10407 + /* Prepare to read and write cryptlen + assoclen bytes */
10408 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10409 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10412 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10413 + * thus need to do some magic, i.e. self-patch the descriptor
10416 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10417 + MOVE_DEST_MATH3 |
10418 + (0x6 << MOVE_LEN_SHIFT));
10419 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10420 + MOVE_DEST_DESCBUF |
10422 + (0x8 << MOVE_LEN_SHIFT));
10424 + /* Class 2 operation */
10425 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10428 + /* Read and write cryptlen bytes */
10429 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10431 + set_move_tgt_here(desc, read_move_cmd);
10432 + set_move_tgt_here(desc, write_move_cmd);
10433 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10434 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10438 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10439 + LDST_SRCDST_BYTE_CONTEXT);
10442 + print_hex_dump(KERN_ERR,
10443 + "aead null enc shdesc@" __stringify(__LINE__)": ",
10444 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10447 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10450 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10451 + * (non-protocol) with no (null) decryption.
10452 + * @desc: pointer to buffer used for descriptor construction
10453 + * @adata: pointer to authentication transform definitions.
10454 + * A split key is required for SEC Era < 6; the size of the split key
10455 + * is specified in this case. Valid algorithm values - one of
10456 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10457 + * with OP_ALG_AAI_HMAC_PRECOMP.
10458 + * @icvsize: integrity check value (ICV) size (truncated or full)
10461 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10462 + unsigned int icvsize, int era)
10464 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10466 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10468 + /* Skip if already shared */
10469 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10472 + if (adata->key_inline)
10473 + append_key_as_imm(desc, adata->key_virt,
10474 + adata->keylen_pad, adata->keylen,
10475 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10478 + append_key(desc, adata->key_dma, adata->keylen,
10479 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10481 + append_proto_dkp(desc, adata);
10483 + set_jump_tgt_here(desc, key_jump_cmd);
10485 + /* Class 2 operation */
10486 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10487 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10489 + /* assoclen + cryptlen = seqoutlen */
10490 + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10492 + /* Prepare to read and write cryptlen + assoclen bytes */
10493 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10494 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10497 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10498 + * thus need to do some magic, i.e. self-patch the descriptor
10501 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10502 + MOVE_DEST_MATH2 |
10503 + (0x6 << MOVE_LEN_SHIFT));
10504 + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10505 + MOVE_DEST_DESCBUF |
10507 + (0x8 << MOVE_LEN_SHIFT));
10509 + /* Read and write cryptlen bytes */
10510 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10513 + * Insert a NOP here, since we need at least 4 instructions between
10514 + * code patching the descriptor buffer and the location being patched.
10516 + jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10517 + set_jump_tgt_here(desc, jump_cmd);
10519 + set_move_tgt_here(desc, read_move_cmd);
10520 + set_move_tgt_here(desc, write_move_cmd);
10521 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10522 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10524 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10527 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10528 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10531 + print_hex_dump(KERN_ERR,
10532 + "aead null dec shdesc@" __stringify(__LINE__)": ",
10533 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10536 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10538 +static void init_sh_desc_key_aead(u32 * const desc,
10539 + struct alginfo * const cdata,
10540 + struct alginfo * const adata,
10541 + const bool is_rfc3686, u32 *nonce, int era)
10543 + u32 *key_jump_cmd;
10544 + unsigned int enckeylen = cdata->keylen;
10546 + /* Note: Context registers are saved. */
10547 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10549 + /* Skip if already shared */
10550 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10554 + * RFC3686 specific:
10555 + * | key = {AUTH_KEY, ENC_KEY, NONCE}
10556 + * | enckeylen = encryption key size + nonce size
10559 + enckeylen -= CTR_RFC3686_NONCE_SIZE;
10562 + if (adata->key_inline)
10563 + append_key_as_imm(desc, adata->key_virt,
10564 + adata->keylen_pad, adata->keylen,
10565 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10568 + append_key(desc, adata->key_dma, adata->keylen,
10569 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10571 + append_proto_dkp(desc, adata);
10574 + if (cdata->key_inline)
10575 + append_key_as_imm(desc, cdata->key_virt, enckeylen,
10576 + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10578 + append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10579 + KEY_DEST_CLASS_REG);
10581 + /* Load Counter into CONTEXT1 reg */
10582 + if (is_rfc3686) {
10583 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10584 + LDST_CLASS_IND_CCB |
10585 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10586 + append_move(desc,
10587 + MOVE_SRC_OUTFIFO |
10588 + MOVE_DEST_CLASS1CTX |
10589 + (16 << MOVE_OFFSET_SHIFT) |
10590 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10593 + set_jump_tgt_here(desc, key_jump_cmd);
10597 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10598 + * (non-protocol).
10599 + * @desc: pointer to buffer used for descriptor construction
10600 + * @cdata: pointer to block cipher transform definitions
10601 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10602 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10603 + * @adata: pointer to authentication transform definitions.
10604 + * A split key is required for SEC Era < 6; the size of the split key
10605 + * is specified in this case. Valid algorithm values - one of
10606 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10607 + * with OP_ALG_AAI_HMAC_PRECOMP.
10608 + * @ivsize: initialization vector size
10609 + * @icvsize: integrity check value (ICV) size (truncated or full)
10610 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10611 + * @nonce: pointer to rfc3686 nonce
10612 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10613 + * @is_qi: true when called from caam/qi
10616 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10617 + struct alginfo *adata, unsigned int ivsize,
10618 + unsigned int icvsize, const bool is_rfc3686,
10619 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
10622 + /* Note: Context registers are saved. */
10623 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10625 + /* Class 2 operation */
10626 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10630 + u32 *wait_load_cmd;
10632 + /* REG3 = assoclen */
10633 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10634 + LDST_SRCDST_WORD_DECO_MATH3 |
10635 + (4 << LDST_OFFSET_SHIFT));
10637 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10638 + JUMP_COND_CALM | JUMP_COND_NCP |
10639 + JUMP_COND_NOP | JUMP_COND_NIP |
10641 + set_jump_tgt_here(desc, wait_load_cmd);
10643 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10644 + LDST_SRCDST_BYTE_CONTEXT |
10645 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10648 + /* Read and write assoclen bytes */
10649 + if (is_qi || era < 3) {
10650 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10651 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10653 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10654 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10657 + /* Skip assoc data */
10658 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10660 + /* read assoc before reading payload */
10661 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10664 + /* Load Counter into CONTEXT1 reg */
10666 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10667 + LDST_SRCDST_BYTE_CONTEXT |
10668 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10669 + LDST_OFFSET_SHIFT));
10671 + /* Class 1 operation */
10672 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10675 + /* Read and write cryptlen bytes */
10676 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10677 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10678 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10681 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10682 + LDST_SRCDST_BYTE_CONTEXT);
10685 + print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10686 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10689 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10692 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10693 + * (non-protocol).
10694 + * @desc: pointer to buffer used for descriptor construction
10695 + * @cdata: pointer to block cipher transform definitions
10696 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10697 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10698 + * @adata: pointer to authentication transform definitions.
10699 + * A split key is required for SEC Era < 6; the size of the split key
10700 + * is specified in this case. Valid algorithm values - one of
10701 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10702 + * with OP_ALG_AAI_HMAC_PRECOMP.
10703 + * @ivsize: initialization vector size
10704 + * @icvsize: integrity check value (ICV) size (truncated or full)
10705 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10706 + * @nonce: pointer to rfc3686 nonce
10707 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10708 + * @is_qi: true when called from caam/qi
10711 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10712 + struct alginfo *adata, unsigned int ivsize,
10713 + unsigned int icvsize, const bool geniv,
10714 + const bool is_rfc3686, u32 *nonce,
10715 + const u32 ctx1_iv_off, const bool is_qi, int era)
10717 + /* Note: Context registers are saved. */
10718 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10720 + /* Class 2 operation */
10721 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10722 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10725 + u32 *wait_load_cmd;
10727 + /* REG3 = assoclen */
10728 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10729 + LDST_SRCDST_WORD_DECO_MATH3 |
10730 + (4 << LDST_OFFSET_SHIFT));
10732 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10733 + JUMP_COND_CALM | JUMP_COND_NCP |
10734 + JUMP_COND_NOP | JUMP_COND_NIP |
10736 + set_jump_tgt_here(desc, wait_load_cmd);
10739 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10740 + LDST_SRCDST_BYTE_CONTEXT |
10741 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10744 + /* Read and write assoclen bytes */
10745 + if (is_qi || era < 3) {
10746 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10748 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
10751 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
10754 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10756 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
10759 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
10763 + /* Skip assoc data */
10764 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10766 + /* read assoc before reading payload */
10767 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10771 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10772 + LDST_SRCDST_BYTE_CONTEXT |
10773 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10774 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10775 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10778 + /* Load Counter into CONTEXT1 reg */
10780 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10781 + LDST_SRCDST_BYTE_CONTEXT |
10782 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10783 + LDST_OFFSET_SHIFT));
10785 + /* Choose operation */
10787 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10790 + append_dec_op1(desc, cdata->algtype);
10792 + /* Read and write cryptlen bytes */
10793 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10794 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10795 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10798 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10799 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10802 + print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10803 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10806 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10809 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10810 + * (non-protocol) with HW-generated initialization
10812 + * @desc: pointer to buffer used for descriptor construction
10813 + * @cdata: pointer to block cipher transform definitions
10814 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10815 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10816 + * @adata: pointer to authentication transform definitions.
10817 + * A split key is required for SEC Era < 6; the size of the split key
10818 + * is specified in this case. Valid algorithm values - one of
10819 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10820 + * with OP_ALG_AAI_HMAC_PRECOMP.
10821 + * @ivsize: initialization vector size
10822 + * @icvsize: integrity check value (ICV) size (truncated or full)
10823 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10824 + * @nonce: pointer to rfc3686 nonce
10825 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10826 + * @is_qi: true when called from caam/qi
10829 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10830 + struct alginfo *adata, unsigned int ivsize,
10831 + unsigned int icvsize, const bool is_rfc3686,
10832 + u32 *nonce, const u32 ctx1_iv_off,
10833 + const bool is_qi, int era)
10835 + u32 geniv, moveiv;
10837 + /* Note: Context registers are saved. */
10838 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10841 + u32 *wait_load_cmd;
10843 + /* REG3 = assoclen */
10844 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10845 + LDST_SRCDST_WORD_DECO_MATH3 |
10846 + (4 << LDST_OFFSET_SHIFT));
10848 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10849 + JUMP_COND_CALM | JUMP_COND_NCP |
10850 + JUMP_COND_NOP | JUMP_COND_NIP |
10852 + set_jump_tgt_here(desc, wait_load_cmd);
10855 + if (is_rfc3686) {
10857 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10858 + LDST_SRCDST_BYTE_CONTEXT |
10859 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10864 + /* Generate IV */
10865 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10866 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10867 + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10868 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10869 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10870 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10871 + append_move(desc, MOVE_WAITCOMP |
10872 + MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10873 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10874 + (ivsize << MOVE_LEN_SHIFT));
10875 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10878 + /* Copy IV to class 1 context */
10879 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10880 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10881 + (ivsize << MOVE_LEN_SHIFT));
10883 + /* Return to encryption */
10884 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10887 + /* Read and write assoclen bytes */
10888 + if (is_qi || era < 3) {
10889 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10890 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10892 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10893 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10896 + /* Skip assoc data */
10897 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10899 + /* read assoc before reading payload */
10900 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10903 + /* Copy iv from outfifo to class 2 fifo */
10904 + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10905 + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10906 + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10907 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10908 + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10909 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10911 + /* Load Counter into CONTEXT1 reg */
10913 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10914 + LDST_SRCDST_BYTE_CONTEXT |
10915 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10916 + LDST_OFFSET_SHIFT));
10918 + /* Class 1 operation */
10919 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10922 + /* Will write ivsize + cryptlen */
10923 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10925 + /* Not need to reload iv */
10926 + append_seq_fifo_load(desc, ivsize,
10927 + FIFOLD_CLASS_SKIP);
10929 + /* Will read cryptlen */
10930 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10931 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10932 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10933 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10936 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10937 + LDST_SRCDST_BYTE_CONTEXT);
10940 + print_hex_dump(KERN_ERR,
10941 + "aead givenc shdesc@" __stringify(__LINE__)": ",
10942 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10945 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10948 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10949 + * @desc: pointer to buffer used for descriptor construction
10950 + * @cdata: pointer to block cipher transform definitions
10951 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10952 + * with OP_ALG_AAI_CBC
10953 + * @adata: pointer to authentication transform definitions.
10954 + * A split key is required for SEC Era < 6; the size of the split key
10955 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
10956 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10957 + * @assoclen: associated data length
10958 + * @ivsize: initialization vector size
10959 + * @authsize: authentication data size
10960 + * @blocksize: block cipher size
10963 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10964 + struct alginfo *adata, unsigned int assoclen,
10965 + unsigned int ivsize, unsigned int authsize,
10966 + unsigned int blocksize, int era)
10968 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
10969 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10972 + * Compute the index (in bytes) for the LOAD with destination of
10973 + * Class 1 Data Size Register and for the LOAD that generates padding
10975 + if (adata->key_inline) {
10976 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10977 + cdata->keylen - 4 * CAAM_CMD_SZ;
10978 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10979 + cdata->keylen - 2 * CAAM_CMD_SZ;
10981 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10983 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10987 + stidx = 1 << HDR_START_IDX_SHIFT;
10988 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10990 + /* skip key loading if they are loaded due to sharing */
10991 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10995 + if (adata->key_inline)
10996 + append_key_as_imm(desc, adata->key_virt,
10997 + adata->keylen_pad, adata->keylen,
10998 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
11001 + append_key(desc, adata->key_dma, adata->keylen,
11002 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
11004 + append_proto_dkp(desc, adata);
11007 + if (cdata->key_inline)
11008 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11009 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11011 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11012 + KEY_DEST_CLASS_REG);
11014 + set_jump_tgt_here(desc, key_jump_cmd);
11016 + /* class 2 operation */
11017 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11019 + /* class 1 operation */
11020 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11023 + /* payloadlen = input data length - (assoclen + ivlen) */
11024 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
11026 + /* math1 = payloadlen + icvlen */
11027 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
11029 + /* padlen = block_size - math1 % block_size */
11030 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
11031 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
11033 + /* cryptlen = payloadlen + icvlen + padlen */
11034 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
11037 + * update immediate data with the padding length value
11038 + * for the LOAD in the class 1 data size register.
11040 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
11041 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
11042 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
11043 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
11045 + /* overwrite PL field for the padding iNFO FIFO entry */
11046 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
11047 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
11048 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
11049 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
11051 + /* store encrypted payload, icv and padding */
11052 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11054 + /* if payload length is zero, jump to zero-payload commands */
11055 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
11056 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11057 + JUMP_COND_MATH_Z);
11059 + /* load iv in context1 */
11060 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11061 + LDST_CLASS_1_CCB | ivsize);
11063 + /* read assoc for authentication */
11064 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
11065 + FIFOLD_TYPE_MSG);
11066 + /* insnoop payload */
11067 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
11068 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
11070 + /* jump the zero-payload commands */
11071 + append_jump(desc, JUMP_TEST_ALL | 3);
11073 + /* zero-payload commands */
11074 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11076 + /* load iv in context1 */
11077 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11078 + LDST_CLASS_1_CCB | ivsize);
11080 + /* assoc data is the only data for authentication */
11081 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
11082 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
11084 + /* send icv to encryption */
11085 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
11088 + /* update class 1 data size register with padding length */
11089 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
11090 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
11092 + /* generate padding and send it to encryption */
11093 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
11094 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
11095 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
11096 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
11099 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
11100 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11101 + desc_bytes(desc), 1);
11104 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
11107 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
11108 + * @desc: pointer to buffer used for descriptor construction
11109 + * @cdata: pointer to block cipher transform definitions
11110 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
11111 + * with OP_ALG_AAI_CBC
11112 + * @adata: pointer to authentication transform definitions.
11113 + * A split key is required for SEC Era < 6; the size of the split key
11114 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
11115 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
11116 + * @assoclen: associated data length
11117 + * @ivsize: initialization vector size
11118 + * @authsize: authentication data size
11119 + * @blocksize: block cipher size
11122 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
11123 + struct alginfo *adata, unsigned int assoclen,
11124 + unsigned int ivsize, unsigned int authsize,
11125 + unsigned int blocksize, int era)
11127 + u32 stidx, jumpback;
11128 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
11130 + * Pointer Size bool determines the size of address pointers.
11131 + * false - Pointers fit in one 32-bit word.
11132 + * true - Pointers fit in two 32-bit words.
11134 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
11136 + stidx = 1 << HDR_START_IDX_SHIFT;
11137 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
11139 + /* skip key loading if they are loaded due to sharing */
11140 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11144 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
11145 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
11147 + append_proto_dkp(desc, adata);
11149 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11150 + KEY_DEST_CLASS_REG);
11152 + set_jump_tgt_here(desc, key_jump_cmd);
11154 + /* class 2 operation */
11155 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11156 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11157 + /* class 1 operation */
11158 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11161 + /* VSIL = input data length - 2 * block_size */
11162 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11166 + * payloadlen + icvlen + padlen = input data length - (assoclen +
11169 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11171 + /* skip data to the last but one cipher block */
11172 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11174 + /* load iv for the last cipher block */
11175 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11176 + LDST_CLASS_1_CCB | ivsize);
11178 + /* read last cipher block */
11179 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11180 + FIFOLD_TYPE_LAST1 | blocksize);
11182 + /* move decrypted block into math0 and math1 */
11183 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11186 + /* reset AES CHA */
11187 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11188 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11190 + /* rewind input sequence */
11191 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11193 + /* key1 is in decryption form */
11194 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11195 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11197 + /* load iv in context1 */
11198 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11199 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11201 + /* read sequence number */
11202 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11203 + /* load Type, Version and Len fields in math0 */
11204 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11205 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11207 + /* compute (padlen - 1) */
11208 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11210 + /* math2 = icvlen + (padlen - 1) + 1 */
11211 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11213 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11215 + /* VSOL = payloadlen + icvlen + padlen */
11216 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11218 +#ifdef __LITTLE_ENDIAN
11219 + append_moveb(desc, MOVE_WAITCOMP |
11220 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11222 + /* update Len field */
11223 + append_math_sub(desc, REG0, REG0, REG2, 8);
11225 + /* store decrypted payload, icv and padding */
11226 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11228 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11229 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11231 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11232 + JUMP_COND_MATH_Z);
11234 + /* send Type, Version and Len(pre ICV) fields to authentication */
11235 + append_move(desc, MOVE_WAITCOMP |
11236 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11237 + (3 << MOVE_OFFSET_SHIFT) | 5);
11239 + /* outsnooping payload */
11240 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11241 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11243 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11245 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11246 + /* send Type, Version and Len(pre ICV) fields to authentication */
11247 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11248 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11249 + (3 << MOVE_OFFSET_SHIFT) | 5);
11251 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
11252 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11254 + /* load icvlen and padlen */
11255 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11256 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11258 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11259 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11262 + * Start a new input sequence using the SEQ OUT PTR command options,
11263 + * pointer and length used when the current output sequence was defined.
11267 + * Move the lower 32 bits of Shared Descriptor address, the
11268 + * SEQ OUT PTR command, Output Pointer (2 words) and
11269 + * Output Length into math registers.
11271 +#ifdef __LITTLE_ENDIAN
11272 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11273 + MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
11276 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11277 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11280 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11281 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
11282 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11283 + /* Append a JUMP command after the copied fields */
11284 + jumpback = CMD_JUMP | (char)-9;
11285 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11286 + LDST_SRCDST_WORD_DECO_MATH2 |
11287 + (4 << LDST_OFFSET_SHIFT));
11288 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11289 + /* Move the updated fields back to the Job Descriptor */
11290 +#ifdef __LITTLE_ENDIAN
11291 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11292 + MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
11295 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11296 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11300 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11301 + * and then jump back to the next command from the
11302 + * Shared Descriptor.
11304 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11307 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11308 + * Output Length into math registers.
11310 +#ifdef __LITTLE_ENDIAN
11311 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11312 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11315 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11316 + MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
11319 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11320 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
11321 + ~(((u64)(CMD_SEQ_IN_PTR ^
11322 + CMD_SEQ_OUT_PTR)) << 32));
11323 + /* Append a JUMP command after the copied fields */
11324 + jumpback = CMD_JUMP | (char)-7;
11325 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11326 + LDST_SRCDST_WORD_DECO_MATH1 |
11327 + (4 << LDST_OFFSET_SHIFT));
11328 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11329 + /* Move the updated fields back to the Job Descriptor */
11330 +#ifdef __LITTLE_ENDIAN
11331 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11332 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11335 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11336 + MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
11340 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11341 + * and then jump back to the next command from the
11342 + * Shared Descriptor.
11344 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11347 + /* skip payload */
11348 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11350 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11351 + FIFOLD_TYPE_LAST2 | authsize);
11354 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11355 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11356 + desc_bytes(desc), 1);
11359 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11362 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11363 + * @desc: pointer to buffer used for descriptor construction
11364 + * @cdata: pointer to block cipher transform definitions
11365 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11366 + * @ivsize: initialization vector size
11367 + * @icvsize: integrity check value (ICV) size (truncated or full)
11368 + * @is_qi: true when called from caam/qi
11370 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11371 + unsigned int ivsize, unsigned int icvsize,
11372 + const bool is_qi)
11374 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11375 + *zero_assoc_jump_cmd2;
11377 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11379 + /* skip key loading if they are loaded due to sharing */
11380 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11382 + if (cdata->key_inline)
11383 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11384 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11386 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11387 + KEY_DEST_CLASS_REG);
11388 + set_jump_tgt_here(desc, key_jump_cmd);
11390 + /* class 1 operation */
11391 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11395 + u32 *wait_load_cmd;
11397 + /* REG3 = assoclen */
11398 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11399 + LDST_SRCDST_WORD_DECO_MATH3 |
11400 + (4 << LDST_OFFSET_SHIFT));
11402 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11403 + JUMP_COND_CALM | JUMP_COND_NCP |
11404 + JUMP_COND_NOP | JUMP_COND_NIP |
11406 + set_jump_tgt_here(desc, wait_load_cmd);
11408 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11411 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11415 + /* if assoclen + cryptlen is ZERO, skip to ICV write */
11416 + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11417 + JUMP_COND_MATH_Z);
11420 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11421 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11423 + /* if assoclen is ZERO, skip reading the assoc data */
11424 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11425 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11426 + JUMP_COND_MATH_Z);
11428 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11430 + /* skip assoc data */
11431 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11433 + /* cryptlen = seqinlen - assoclen */
11434 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11436 + /* if cryptlen is ZERO jump to zero-payload commands */
11437 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11438 + JUMP_COND_MATH_Z);
11440 + /* read assoc data */
11441 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11442 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11443 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11445 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11447 + /* write encrypted data */
11448 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11450 + /* read payload data */
11451 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11452 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11454 + /* jump to ICV writing */
11456 + append_jump(desc, JUMP_TEST_ALL | 4);
11458 + append_jump(desc, JUMP_TEST_ALL | 2);
11460 + /* zero-payload commands */
11461 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11463 + /* read assoc data */
11464 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11465 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11467 + /* jump to ICV writing */
11468 + append_jump(desc, JUMP_TEST_ALL | 2);
11470 + /* There is no input data */
11471 + set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11474 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11475 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11476 + FIFOLD_TYPE_LAST1);
11479 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11480 + LDST_SRCDST_BYTE_CONTEXT);
11483 + print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11484 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11487 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11490 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11491 + * @desc: pointer to buffer used for descriptor construction
11492 + * @cdata: pointer to block cipher transform definitions
11493 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11494 + * @ivsize: initialization vector size
11495 + * @icvsize: integrity check value (ICV) size (truncated or full)
11496 + * @is_qi: true when called from caam/qi
11498 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11499 + unsigned int ivsize, unsigned int icvsize,
11500 + const bool is_qi)
11502 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11504 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11506 + /* skip key loading if they are loaded due to sharing */
11507 + key_jump_cmd = append_jump(desc, JUMP_JSL |
11508 + JUMP_TEST_ALL | JUMP_COND_SHRD);
11509 + if (cdata->key_inline)
11510 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11511 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11513 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11514 + KEY_DEST_CLASS_REG);
11515 + set_jump_tgt_here(desc, key_jump_cmd);
11517 + /* class 1 operation */
11518 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11519 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11522 + u32 *wait_load_cmd;
11524 + /* REG3 = assoclen */
11525 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11526 + LDST_SRCDST_WORD_DECO_MATH3 |
11527 + (4 << LDST_OFFSET_SHIFT));
11529 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11530 + JUMP_COND_CALM | JUMP_COND_NCP |
11531 + JUMP_COND_NOP | JUMP_COND_NIP |
11533 + set_jump_tgt_here(desc, wait_load_cmd);
11535 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11536 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11539 + /* if assoclen is ZERO, skip reading the assoc data */
11540 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11541 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11542 + JUMP_COND_MATH_Z);
11544 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11546 + /* skip assoc data */
11547 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11549 + /* read assoc data */
11550 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11551 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11553 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11555 + /* cryptlen = seqoutlen - assoclen */
11556 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11558 + /* jump to zero-payload command if cryptlen is zero */
11559 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11560 + JUMP_COND_MATH_Z);
11562 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11564 + /* store encrypted data */
11565 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11567 + /* read payload data */
11568 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11569 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11571 + /* zero-payload command */
11572 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11575 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11576 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11579 + print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11580 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11583 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11586 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11587 + * (non-protocol).
11588 + * @desc: pointer to buffer used for descriptor construction
11589 + * @cdata: pointer to block cipher transform definitions
11590 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11591 + * @ivsize: initialization vector size
11592 + * @icvsize: integrity check value (ICV) size (truncated or full)
11593 + * @is_qi: true when called from caam/qi
11595 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11596 + unsigned int ivsize, unsigned int icvsize,
11597 + const bool is_qi)
11599 + u32 *key_jump_cmd;
11601 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11603 + /* Skip key loading if it is loaded due to sharing */
11604 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11606 + if (cdata->key_inline)
11607 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11608 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11610 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11611 + KEY_DEST_CLASS_REG);
11612 + set_jump_tgt_here(desc, key_jump_cmd);
11614 + /* Class 1 operation */
11615 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11619 + u32 *wait_load_cmd;
11621 + /* REG3 = assoclen */
11622 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11623 + LDST_SRCDST_WORD_DECO_MATH3 |
11624 + (4 << LDST_OFFSET_SHIFT));
11626 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11627 + JUMP_COND_CALM | JUMP_COND_NCP |
11628 + JUMP_COND_NOP | JUMP_COND_NIP |
11630 + set_jump_tgt_here(desc, wait_load_cmd);
11632 + /* Read salt and IV */
11633 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11634 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11636 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11637 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11640 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11641 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11643 + /* Read assoc data */
11644 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11645 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11648 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11650 + /* Will read cryptlen bytes */
11651 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11653 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11654 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11656 + /* Skip assoc data */
11657 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11659 + /* cryptlen = seqoutlen - assoclen */
11660 + append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11662 + /* Write encrypted data */
11663 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11665 + /* Read payload data */
11666 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11667 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11670 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11671 + LDST_SRCDST_BYTE_CONTEXT);
11674 + print_hex_dump(KERN_ERR,
11675 + "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11676 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11679 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11682 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11683 + * (non-protocol).
11684 + * @desc: pointer to buffer used for descriptor construction
11685 + * @cdata: pointer to block cipher transform definitions
11686 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11687 + * @ivsize: initialization vector size
11688 + * @icvsize: integrity check value (ICV) size (truncated or full)
11689 + * @is_qi: true when called from caam/qi
11691 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11692 + unsigned int ivsize, unsigned int icvsize,
11693 + const bool is_qi)
11695 + u32 *key_jump_cmd;
11697 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11699 + /* Skip key loading if it is loaded due to sharing */
11700 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11702 + if (cdata->key_inline)
11703 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11704 + cdata->keylen, CLASS_1 |
11705 + KEY_DEST_CLASS_REG);
11707 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11708 + KEY_DEST_CLASS_REG);
11709 + set_jump_tgt_here(desc, key_jump_cmd);
11711 + /* Class 1 operation */
11712 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11713 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11716 + u32 *wait_load_cmd;
11718 + /* REG3 = assoclen */
11719 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11720 + LDST_SRCDST_WORD_DECO_MATH3 |
11721 + (4 << LDST_OFFSET_SHIFT));
11723 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11724 + JUMP_COND_CALM | JUMP_COND_NCP |
11725 + JUMP_COND_NOP | JUMP_COND_NIP |
11727 + set_jump_tgt_here(desc, wait_load_cmd);
11729 + /* Read salt and IV */
11730 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11731 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11733 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11734 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11737 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11738 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11740 + /* Read assoc data */
11741 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11742 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11745 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11747 + /* Will read cryptlen bytes */
11748 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11750 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11751 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11753 + /* Skip assoc data */
11754 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11756 + /* Will write cryptlen bytes */
11757 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11759 + /* Store payload data */
11760 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11762 + /* Read encrypted data */
11763 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11764 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11767 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11768 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11771 + print_hex_dump(KERN_ERR,
11772 + "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11773 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11776 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11779 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11780 + * (non-protocol).
11781 + * @desc: pointer to buffer used for descriptor construction
11782 + * @cdata: pointer to block cipher transform definitions
11783 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11784 + * @ivsize: initialization vector size
11785 + * @icvsize: integrity check value (ICV) size (truncated or full)
11786 + * @is_qi: true when called from caam/qi
11788 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11789 + unsigned int ivsize, unsigned int icvsize,
11790 + const bool is_qi)
11792 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11794 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11796 + /* Skip key loading if it is loaded due to sharing */
11797 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11799 + if (cdata->key_inline)
11800 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11801 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11803 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11804 + KEY_DEST_CLASS_REG);
11805 + set_jump_tgt_here(desc, key_jump_cmd);
11807 + /* Class 1 operation */
11808 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11812 + /* assoclen is not needed, skip it */
11813 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11815 + /* Read salt and IV */
11816 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11817 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11819 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11820 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11823 + /* assoclen + cryptlen = seqinlen */
11824 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11827 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11828 + * thus need to do some magic, i.e. self-patch the descriptor
11831 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11832 + (0x6 << MOVE_LEN_SHIFT));
11833 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11834 + (0x8 << MOVE_LEN_SHIFT));
11836 + /* Will read assoclen + cryptlen bytes */
11837 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11839 + /* Will write assoclen + cryptlen bytes */
11840 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11842 + /* Read and write assoclen + cryptlen bytes */
11843 + aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11845 + set_move_tgt_here(desc, read_move_cmd);
11846 + set_move_tgt_here(desc, write_move_cmd);
11847 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11848 + /* Move payload data to OFIFO */
11849 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11852 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11853 + LDST_SRCDST_BYTE_CONTEXT);
11856 + print_hex_dump(KERN_ERR,
11857 + "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11858 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11861 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11864 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11865 + * (non-protocol).
11866 + * @desc: pointer to buffer used for descriptor construction
11867 + * @cdata: pointer to block cipher transform definitions
11868 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11869 + * @ivsize: initialization vector size
11870 + * @icvsize: integrity check value (ICV) size (truncated or full)
11871 + * @is_qi: true when called from caam/qi
11873 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11874 + unsigned int ivsize, unsigned int icvsize,
11875 + const bool is_qi)
11877 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11879 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11881 + /* Skip key loading if it is loaded due to sharing */
11882 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11884 + if (cdata->key_inline)
11885 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11886 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11888 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11889 + KEY_DEST_CLASS_REG);
11890 + set_jump_tgt_here(desc, key_jump_cmd);
11892 + /* Class 1 operation */
11893 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11894 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11897 + /* assoclen is not needed, skip it */
11898 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11900 + /* Read salt and IV */
11901 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11902 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11904 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11905 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11908 + /* assoclen + cryptlen = seqoutlen */
11909 + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11912 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11913 + * thus need to do some magic, i.e. self-patch the descriptor
11916 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11917 + (0x6 << MOVE_LEN_SHIFT));
11918 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11919 + (0x8 << MOVE_LEN_SHIFT));
11921 + /* Will read assoclen + cryptlen bytes */
11922 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11924 + /* Will write assoclen + cryptlen bytes */
11925 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11927 + /* Store payload data */
11928 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11930 + /* In-snoop assoclen + cryptlen data */
11931 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11932 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11934 + set_move_tgt_here(desc, read_move_cmd);
11935 + set_move_tgt_here(desc, write_move_cmd);
11936 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11937 + /* Move payload data to OFIFO */
11938 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11939 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11942 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11943 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11946 + print_hex_dump(KERN_ERR,
11947 + "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11948 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11951 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11954 + * For ablkcipher encrypt and decrypt, read from req->src and
11955 + * write to req->dst
11957 +static inline void ablkcipher_append_src_dst(u32 *desc)
11959 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11960 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11961 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11962 + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11963 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11967 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11968 + * @desc: pointer to buffer used for descriptor construction
11969 + * @cdata: pointer to block cipher transform definitions
11970 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11971 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11972 + * @ivsize: initialization vector size
11973 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11974 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11976 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11977 + unsigned int ivsize, const bool is_rfc3686,
11978 + const u32 ctx1_iv_off)
11980 + u32 *key_jump_cmd;
11982 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11983 + /* Skip if already shared */
11984 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11987 + /* Load class1 key only */
11988 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11989 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11991 + /* Load nonce into CONTEXT1 reg */
11992 + if (is_rfc3686) {
11993 + const u8 *nonce = cdata->key_virt + cdata->keylen;
11995 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11996 + LDST_CLASS_IND_CCB |
11997 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11998 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11999 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12000 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12003 + set_jump_tgt_here(desc, key_jump_cmd);
12006 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12007 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12009 + /* Load counter into CONTEXT1 reg */
12011 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12012 + LDST_SRCDST_BYTE_CONTEXT |
12013 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12014 + LDST_OFFSET_SHIFT));
12016 + /* Load operation */
12017 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12020 + /* Perform operation */
12021 + ablkcipher_append_src_dst(desc);
12024 + print_hex_dump(KERN_ERR,
12025 + "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
12026 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12029 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
12032 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
12033 + * @desc: pointer to buffer used for descriptor construction
12034 + * @cdata: pointer to block cipher transform definitions
12035 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
12036 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
12037 + * @ivsize: initialization vector size
12038 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
12039 + * @ctx1_iv_off: IV offset in CONTEXT1 register
12041 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12042 + unsigned int ivsize, const bool is_rfc3686,
12043 + const u32 ctx1_iv_off)
12045 + u32 *key_jump_cmd;
12047 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12048 + /* Skip if already shared */
12049 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12052 + /* Load class1 key only */
12053 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12054 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12056 + /* Load nonce into CONTEXT1 reg */
12057 + if (is_rfc3686) {
12058 + const u8 *nonce = cdata->key_virt + cdata->keylen;
12060 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12061 + LDST_CLASS_IND_CCB |
12062 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12063 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12064 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12065 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12068 + set_jump_tgt_here(desc, key_jump_cmd);
12071 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12072 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12074 + /* Load counter into CONTEXT1 reg */
12076 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12077 + LDST_SRCDST_BYTE_CONTEXT |
12078 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12079 + LDST_OFFSET_SHIFT));
12081 + /* Choose operation */
12083 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12086 + append_dec_op1(desc, cdata->algtype);
12088 + /* Perform operation */
12089 + ablkcipher_append_src_dst(desc);
12092 + print_hex_dump(KERN_ERR,
12093 + "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
12094 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12097 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
12100 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
12101 + * with HW-generated initialization vector.
12102 + * @desc: pointer to buffer used for descriptor construction
12103 + * @cdata: pointer to block cipher transform definitions
12104 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
12105 + * with OP_ALG_AAI_CBC.
12106 + * @ivsize: initialization vector size
12107 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
12108 + * @ctx1_iv_off: IV offset in CONTEXT1 register
12110 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12111 + unsigned int ivsize, const bool is_rfc3686,
12112 + const u32 ctx1_iv_off)
12114 + u32 *key_jump_cmd, geniv;
12116 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12117 + /* Skip if already shared */
12118 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12121 + /* Load class1 key only */
12122 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12123 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12125 + /* Load Nonce into CONTEXT1 reg */
12126 + if (is_rfc3686) {
12127 + const u8 *nonce = cdata->key_virt + cdata->keylen;
12129 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12130 + LDST_CLASS_IND_CCB |
12131 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12132 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12133 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12134 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12136 + set_jump_tgt_here(desc, key_jump_cmd);
12138 + /* Generate IV */
12139 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
12140 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
12141 + (ivsize << NFIFOENTRY_DLEN_SHIFT);
12142 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
12143 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
12144 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
12145 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
12146 + MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
12147 + (ctx1_iv_off << MOVE_OFFSET_SHIFT));
12148 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
12150 + /* Copy generated IV to memory */
12151 + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12152 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12154 + /* Load Counter into CONTEXT1 reg */
12156 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12157 + LDST_SRCDST_BYTE_CONTEXT |
12158 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12159 + LDST_OFFSET_SHIFT));
12162 + append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12163 + (1 << JUMP_OFFSET_SHIFT));
12165 + /* Load operation */
12166 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12169 + /* Perform operation */
12170 + ablkcipher_append_src_dst(desc);
12173 + print_hex_dump(KERN_ERR,
12174 + "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12175 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12178 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12181 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12183 + * @desc: pointer to buffer used for descriptor construction
12184 + * @cdata: pointer to block cipher transform definitions
12185 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12187 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12189 + __be64 sector_size = cpu_to_be64(512);
12190 + u32 *key_jump_cmd;
12192 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12193 + /* Skip if already shared */
12194 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12197 + /* Load class1 keys only */
12198 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12199 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12201 + /* Load sector size with index 40 bytes (0x28) */
12202 + append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB |
12203 + LDST_SRCDST_BYTE_CONTEXT |
12204 + (0x28 << LDST_OFFSET_SHIFT));
12206 + set_jump_tgt_here(desc, key_jump_cmd);
12209 + * create sequence for loading the sector index
12210 + * Upper 8B of IV - will be used as sector index
12211 + * Lower 8B of IV - will be discarded
12213 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12214 + (0x20 << LDST_OFFSET_SHIFT));
12215 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12217 + /* Load operation */
12218 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12221 + /* Perform operation */
12222 + ablkcipher_append_src_dst(desc);
12225 + print_hex_dump(KERN_ERR,
12226 + "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12227 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12230 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12233 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12235 + * @desc: pointer to buffer used for descriptor construction
12236 + * @cdata: pointer to block cipher transform definitions
12237 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12239 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12241 + __be64 sector_size = cpu_to_be64(512);
12242 + u32 *key_jump_cmd;
12244 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12245 + /* Skip if already shared */
12246 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12249 + /* Load class1 key only */
12250 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12251 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12253 + /* Load sector size with index 40 bytes (0x28) */
12254 + append_load_as_imm(desc, (void *)§or_size, 8, LDST_CLASS_1_CCB |
12255 + LDST_SRCDST_BYTE_CONTEXT |
12256 + (0x28 << LDST_OFFSET_SHIFT));
12258 + set_jump_tgt_here(desc, key_jump_cmd);
12261 + * create sequence for loading the sector index
12262 + * Upper 8B of IV - will be used as sector index
12263 + * Lower 8B of IV - will be discarded
12265 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12266 + (0x20 << LDST_OFFSET_SHIFT));
12267 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12269 + /* Load operation */
12270 + append_dec_op1(desc, cdata->algtype);
12272 + /* Perform operation */
12273 + ablkcipher_append_src_dst(desc);
12276 + print_hex_dump(KERN_ERR,
12277 + "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12278 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12281 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12283 +MODULE_LICENSE("GPL");
12284 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12285 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12287 +++ b/drivers/crypto/caam/caamalg_desc.h
12290 + * Shared descriptors for aead, ablkcipher algorithms
12292 + * Copyright 2016 NXP
12295 +#ifndef _CAAMALG_DESC_H_
12296 +#define _CAAMALG_DESC_H_
12298 +/* length of descriptors text */
12299 +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
12300 +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12301 +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12302 +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12303 +#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12304 +#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12305 +#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12307 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
12308 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12310 +/* Note: Nonce is counted in cdata.keylen */
12311 +#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
12313 +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
12314 +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12315 +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12317 +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
12318 +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12319 +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12320 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12321 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12323 +#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
12324 +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12325 +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12326 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12327 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12329 +#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
12330 +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12331 +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12332 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12333 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12335 +#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
12336 +#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
12337 + 20 * CAAM_CMD_SZ)
12338 +#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
12339 + 15 * CAAM_CMD_SZ)
12341 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12342 + unsigned int icvsize, int era);
12344 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12345 + unsigned int icvsize, int era);
12347 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12348 + struct alginfo *adata, unsigned int ivsize,
12349 + unsigned int icvsize, const bool is_rfc3686,
12350 + u32 *nonce, const u32 ctx1_iv_off,
12351 + const bool is_qi, int era);
12353 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12354 + struct alginfo *adata, unsigned int ivsize,
12355 + unsigned int icvsize, const bool geniv,
12356 + const bool is_rfc3686, u32 *nonce,
12357 + const u32 ctx1_iv_off, const bool is_qi, int era);
12359 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12360 + struct alginfo *adata, unsigned int ivsize,
12361 + unsigned int icvsize, const bool is_rfc3686,
12362 + u32 *nonce, const u32 ctx1_iv_off,
12363 + const bool is_qi, int era);
12365 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12366 + struct alginfo *adata, unsigned int assoclen,
12367 + unsigned int ivsize, unsigned int authsize,
12368 + unsigned int blocksize, int era);
12370 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12371 + struct alginfo *adata, unsigned int assoclen,
12372 + unsigned int ivsize, unsigned int authsize,
12373 + unsigned int blocksize, int era);
12375 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12376 + unsigned int ivsize, unsigned int icvsize,
12377 + const bool is_qi);
12379 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12380 + unsigned int ivsize, unsigned int icvsize,
12381 + const bool is_qi);
12383 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12384 + unsigned int ivsize, unsigned int icvsize,
12385 + const bool is_qi);
12387 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12388 + unsigned int ivsize, unsigned int icvsize,
12389 + const bool is_qi);
12391 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12392 + unsigned int ivsize, unsigned int icvsize,
12393 + const bool is_qi);
12395 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12396 + unsigned int ivsize, unsigned int icvsize,
12397 + const bool is_qi);
12399 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12400 + unsigned int ivsize, const bool is_rfc3686,
12401 + const u32 ctx1_iv_off);
12403 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12404 + unsigned int ivsize, const bool is_rfc3686,
12405 + const u32 ctx1_iv_off);
12407 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12408 + unsigned int ivsize, const bool is_rfc3686,
12409 + const u32 ctx1_iv_off);
12411 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12413 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12415 +#endif /* _CAAMALG_DESC_H_ */
12417 +++ b/drivers/crypto/caam/caamalg_qi.c
12420 + * Freescale FSL CAAM support for crypto API over QI backend.
12421 + * Based on caamalg.c
12423 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12424 + * Copyright 2016-2017 NXP
12427 +#include "compat.h"
12430 +#include "intern.h"
12431 +#include "desc_constr.h"
12432 +#include "error.h"
12433 +#include "sg_sw_qm.h"
12434 +#include "key_gen.h"
12437 +#include "caamalg_desc.h"
12442 +#define CAAM_CRA_PRIORITY 2000
12443 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12444 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
12445 + SHA512_DIGEST_SIZE * 2)
12447 +#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
12448 + CAAM_MAX_KEY_SIZE)
12449 +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12451 +struct caam_alg_entry {
12452 + int class1_alg_type;
12453 + int class2_alg_type;
12458 +struct caam_aead_alg {
12459 + struct aead_alg aead;
12460 + struct caam_alg_entry caam;
12465 + * per-session context
12468 + struct device *jrdev;
12469 + u32 sh_desc_enc[DESC_MAX_USED_LEN];
12470 + u32 sh_desc_dec[DESC_MAX_USED_LEN];
12471 + u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12472 + u8 key[CAAM_MAX_KEY_SIZE];
12473 + dma_addr_t key_dma;
12474 + struct alginfo adata;
12475 + struct alginfo cdata;
12476 + unsigned int authsize;
12477 + struct device *qidev;
12478 + spinlock_t lock; /* Protects multiple init of driver context */
12479 + struct caam_drv_ctx *drv_ctx[NUM_OP];
12482 +static int aead_set_sh_desc(struct crypto_aead *aead)
12484 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12485 + typeof(*alg), aead);
12486 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12487 + unsigned int ivsize = crypto_aead_ivsize(aead);
12488 + u32 ctx1_iv_off = 0;
12489 + u32 *nonce = NULL;
12490 + unsigned int data_len[2];
12492 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12493 + OP_ALG_AAI_CTR_MOD128);
12494 + const bool is_rfc3686 = alg->caam.rfc3686;
12495 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
12497 + if (!ctx->cdata.keylen || !ctx->authsize)
12501 + * AES-CTR needs to load IV in CONTEXT1 reg
12502 + * at an offset of 128bits (16bytes)
12503 + * CONTEXT1[255:128] = IV
12506 + ctx1_iv_off = 16;
12509 + * RFC3686 specific:
12510 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12512 + if (is_rfc3686) {
12513 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12514 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12515 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12518 + data_len[0] = ctx->adata.keylen_pad;
12519 + data_len[1] = ctx->cdata.keylen;
12521 + if (alg->caam.geniv)
12524 + /* aead_encrypt shared descriptor */
12525 + if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12526 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12527 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12528 + ARRAY_SIZE(data_len)) < 0)
12531 + if (inl_mask & 1)
12532 + ctx->adata.key_virt = ctx->key;
12534 + ctx->adata.key_dma = ctx->key_dma;
12536 + if (inl_mask & 2)
12537 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12539 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12541 + ctx->adata.key_inline = !!(inl_mask & 1);
12542 + ctx->cdata.key_inline = !!(inl_mask & 2);
12544 + cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12545 + ivsize, ctx->authsize, is_rfc3686, nonce,
12546 + ctx1_iv_off, true, ctrlpriv->era);
12549 + /* aead_decrypt shared descriptor */
12550 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12551 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12552 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12553 + ARRAY_SIZE(data_len)) < 0)
12556 + if (inl_mask & 1)
12557 + ctx->adata.key_virt = ctx->key;
12559 + ctx->adata.key_dma = ctx->key_dma;
12561 + if (inl_mask & 2)
12562 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12564 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12566 + ctx->adata.key_inline = !!(inl_mask & 1);
12567 + ctx->cdata.key_inline = !!(inl_mask & 2);
12569 + cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12570 + ivsize, ctx->authsize, alg->caam.geniv,
12571 + is_rfc3686, nonce, ctx1_iv_off, true,
12574 + if (!alg->caam.geniv)
12575 + goto skip_givenc;
12577 + /* aead_givencrypt shared descriptor */
12578 + if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12579 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12580 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12581 + ARRAY_SIZE(data_len)) < 0)
12584 + if (inl_mask & 1)
12585 + ctx->adata.key_virt = ctx->key;
12587 + ctx->adata.key_dma = ctx->key_dma;
12589 + if (inl_mask & 2)
12590 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12592 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12594 + ctx->adata.key_inline = !!(inl_mask & 1);
12595 + ctx->cdata.key_inline = !!(inl_mask & 2);
12597 + cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12598 + ivsize, ctx->authsize, is_rfc3686, nonce,
12599 + ctx1_iv_off, true, ctrlpriv->era);
12605 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12607 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12609 + ctx->authsize = authsize;
12610 + aead_set_sh_desc(authenc);
12615 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12616 + unsigned int keylen)
12618 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12619 + struct device *jrdev = ctx->jrdev;
12620 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
12621 + struct crypto_authenc_keys keys;
12624 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12628 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12629 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12630 + keys.authkeylen);
12631 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12632 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12636 + * If DKP is supported, use it in the shared descriptor to generate
12639 + if (ctrlpriv->era >= 6) {
12640 + ctx->adata.keylen = keys.authkeylen;
12641 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
12642 + OP_ALG_ALGSEL_MASK);
12644 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
12647 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
12648 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
12650 + dma_sync_single_for_device(jrdev, ctx->key_dma,
12651 + ctx->adata.keylen_pad +
12652 + keys.enckeylen, DMA_TO_DEVICE);
12653 + goto skip_split_key;
12656 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12657 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12662 + /* postpend encryption key to auth split key */
12663 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12664 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12665 + keys.enckeylen, DMA_TO_DEVICE);
12667 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12668 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12669 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12673 + ctx->cdata.keylen = keys.enckeylen;
12675 + ret = aead_set_sh_desc(aead);
12679 + /* Now update the driver contexts with the new shared descriptor */
12680 + if (ctx->drv_ctx[ENCRYPT]) {
12681 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12682 + ctx->sh_desc_enc);
12684 + dev_err(jrdev, "driver enc context update failed\n");
12689 + if (ctx->drv_ctx[DECRYPT]) {
12690 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12691 + ctx->sh_desc_dec);
12693 + dev_err(jrdev, "driver dec context update failed\n");
12700 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12704 +static int tls_set_sh_desc(struct crypto_aead *tls)
12706 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12707 + unsigned int ivsize = crypto_aead_ivsize(tls);
12708 + unsigned int blocksize = crypto_aead_blocksize(tls);
12709 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
12710 + unsigned int data_len[2];
12712 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
12714 + if (!ctx->cdata.keylen || !ctx->authsize)
12718 + * TLS 1.0 encrypt shared descriptor
12719 + * Job Descriptor and Shared Descriptor
12720 + * must fit into the 64-word Descriptor h/w Buffer
12722 + data_len[0] = ctx->adata.keylen_pad;
12723 + data_len[1] = ctx->cdata.keylen;
12725 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12726 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
12729 + if (inl_mask & 1)
12730 + ctx->adata.key_virt = ctx->key;
12732 + ctx->adata.key_dma = ctx->key_dma;
12734 + if (inl_mask & 2)
12735 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12737 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12739 + ctx->adata.key_inline = !!(inl_mask & 1);
12740 + ctx->cdata.key_inline = !!(inl_mask & 2);
12742 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12743 + assoclen, ivsize, ctx->authsize, blocksize,
12747 + * TLS 1.0 decrypt shared descriptor
12748 + * Keys do not fit inline, regardless of algorithms used
12750 + ctx->adata.key_inline = false;
12751 + ctx->adata.key_dma = ctx->key_dma;
12752 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12754 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12755 + assoclen, ivsize, ctx->authsize, blocksize,
12761 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12763 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12765 + ctx->authsize = authsize;
12766 + tls_set_sh_desc(tls);
12771 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12772 + unsigned int keylen)
12774 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12775 + struct device *jrdev = ctx->jrdev;
12776 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
12777 + struct crypto_authenc_keys keys;
12780 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12784 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12785 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12786 + keys.authkeylen);
12787 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12788 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12792 + * If DKP is supported, use it in the shared descriptor to generate
12795 + if (ctrlpriv->era >= 6) {
12796 + ctx->adata.keylen = keys.authkeylen;
12797 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
12798 + OP_ALG_ALGSEL_MASK);
12800 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
12803 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
12804 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
12806 + dma_sync_single_for_device(jrdev, ctx->key_dma,
12807 + ctx->adata.keylen_pad +
12808 + keys.enckeylen, DMA_TO_DEVICE);
12809 + goto skip_split_key;
12812 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12813 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12818 + /* postpend encryption key to auth split key */
12819 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12820 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12821 + keys.enckeylen, DMA_TO_DEVICE);
12824 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12825 + ctx->adata.keylen, ctx->adata.keylen_pad);
12826 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12827 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12828 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12832 + ctx->cdata.keylen = keys.enckeylen;
12834 + ret = tls_set_sh_desc(tls);
12838 + /* Now update the driver contexts with the new shared descriptor */
12839 + if (ctx->drv_ctx[ENCRYPT]) {
12840 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12841 + ctx->sh_desc_enc);
12843 + dev_err(jrdev, "driver enc context update failed\n");
12848 + if (ctx->drv_ctx[DECRYPT]) {
12849 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12850 + ctx->sh_desc_dec);
12852 + dev_err(jrdev, "driver dec context update failed\n");
12859 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12863 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12864 + const u8 *key, unsigned int keylen)
12866 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12867 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
12868 + const char *alg_name = crypto_tfm_alg_name(tfm);
12869 + struct device *jrdev = ctx->jrdev;
12870 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12871 + u32 ctx1_iv_off = 0;
12872 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12873 + OP_ALG_AAI_CTR_MOD128);
12874 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
12877 + memcpy(ctx->key, key, keylen);
12879 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12880 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12883 + * AES-CTR needs to load IV in CONTEXT1 reg
12884 + * at an offset of 128bits (16bytes)
12885 + * CONTEXT1[255:128] = IV
12888 + ctx1_iv_off = 16;
12891 + * RFC3686 specific:
12892 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12893 + * | *key = {KEY, NONCE}
12895 + if (is_rfc3686) {
12896 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12897 + keylen -= CTR_RFC3686_NONCE_SIZE;
12900 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12901 + ctx->cdata.keylen = keylen;
12902 + ctx->cdata.key_virt = ctx->key;
12903 + ctx->cdata.key_inline = true;
12905 + /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
12906 + cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12907 + is_rfc3686, ctx1_iv_off);
12908 + cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12909 + is_rfc3686, ctx1_iv_off);
12910 + cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
12911 + ivsize, is_rfc3686, ctx1_iv_off);
12913 + /* Now update the driver contexts with the new shared descriptor */
12914 + if (ctx->drv_ctx[ENCRYPT]) {
12915 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12916 + ctx->sh_desc_enc);
12918 + dev_err(jrdev, "driver enc context update failed\n");
12923 + if (ctx->drv_ctx[DECRYPT]) {
12924 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12925 + ctx->sh_desc_dec);
12927 + dev_err(jrdev, "driver dec context update failed\n");
12932 + if (ctx->drv_ctx[GIVENCRYPT]) {
12933 + ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
12934 + ctx->sh_desc_givenc);
12936 + dev_err(jrdev, "driver givenc context update failed\n");
12943 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12947 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12948 + const u8 *key, unsigned int keylen)
12950 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12951 + struct device *jrdev = ctx->jrdev;
12954 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
12955 + crypto_ablkcipher_set_flags(ablkcipher,
12956 + CRYPTO_TFM_RES_BAD_KEY_LEN);
12957 + dev_err(jrdev, "key size mismatch\n");
12961 + memcpy(ctx->key, key, keylen);
12962 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12963 + ctx->cdata.keylen = keylen;
12964 + ctx->cdata.key_virt = ctx->key;
12965 + ctx->cdata.key_inline = true;
12967 + /* xts ablkcipher encrypt, decrypt shared descriptors */
12968 + cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
12969 + cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
12971 + /* Now update the driver contexts with the new shared descriptor */
12972 + if (ctx->drv_ctx[ENCRYPT]) {
12973 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12974 + ctx->sh_desc_enc);
12976 + dev_err(jrdev, "driver enc context update failed\n");
12981 + if (ctx->drv_ctx[DECRYPT]) {
12982 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12983 + ctx->sh_desc_dec);
12985 + dev_err(jrdev, "driver dec context update failed\n");
12992 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12997 + * aead_edesc - s/w-extended aead descriptor
12998 + * @src_nents: number of segments in input scatterlist
12999 + * @dst_nents: number of segments in output scatterlist
13000 + * @iv_dma: dma address of iv for checking continuity and link table
13001 + * @qm_sg_bytes: length of dma mapped h/w link table
13002 + * @qm_sg_dma: bus physical mapped address of h/w link table
13003 + * @assoclen: associated data length, in CAAM endianness
13004 + * @assoclen_dma: bus physical mapped address of req->assoclen
13005 + * @drv_req: driver-specific request structure
13006 + * @sgt: the h/w link table
13008 +struct aead_edesc {
13011 + dma_addr_t iv_dma;
13013 + dma_addr_t qm_sg_dma;
13014 + unsigned int assoclen;
13015 + dma_addr_t assoclen_dma;
13016 + struct caam_drv_req drv_req;
13017 +#define CAAM_QI_MAX_AEAD_SG \
13018 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
13019 + sizeof(struct qm_sg_entry))
13020 + struct qm_sg_entry sgt[0];
13024 + * tls_edesc - s/w-extended tls descriptor
13025 + * @src_nents: number of segments in input scatterlist
13026 + * @dst_nents: number of segments in output scatterlist
13027 + * @iv_dma: dma address of iv for checking continuity and link table
13028 + * @qm_sg_bytes: length of dma mapped h/w link table
13029 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
13030 + * @qm_sg_dma: bus physical mapped address of h/w link table
13031 + * @drv_req: driver-specific request structure
13032 + * @sgt: the h/w link table
13034 +struct tls_edesc {
13037 + dma_addr_t iv_dma;
13039 + dma_addr_t qm_sg_dma;
13040 + struct scatterlist tmp[2];
13041 + struct scatterlist *dst;
13042 + struct caam_drv_req drv_req;
13043 + struct qm_sg_entry sgt[0];
13047 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
13048 + * @src_nents: number of segments in input scatterlist
13049 + * @dst_nents: number of segments in output scatterlist
13050 + * @iv_dma: dma address of iv for checking continuity and link table
13051 + * @qm_sg_bytes: length of dma mapped h/w link table
13052 + * @qm_sg_dma: bus physical mapped address of h/w link table
13053 + * @drv_req: driver-specific request structure
13054 + * @sgt: the h/w link table
13056 +struct ablkcipher_edesc {
13059 + dma_addr_t iv_dma;
13061 + dma_addr_t qm_sg_dma;
13062 + struct caam_drv_req drv_req;
13063 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
13064 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
13065 + sizeof(struct qm_sg_entry))
13066 + struct qm_sg_entry sgt[0];
13069 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
13070 + enum optype type)
13073 + * This function is called on the fast path with values of 'type'
13074 + * known at compile time. Invalid arguments are not expected and
13075 + * thus no checks are made.
13077 + struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
13080 + if (unlikely(!drv_ctx)) {
13081 + spin_lock(&ctx->lock);
13083 + /* Read again to check if some other core init drv_ctx */
13084 + drv_ctx = ctx->drv_ctx[type];
13088 + if (type == ENCRYPT)
13089 + desc = ctx->sh_desc_enc;
13090 + else if (type == DECRYPT)
13091 + desc = ctx->sh_desc_dec;
13092 + else /* (type == GIVENCRYPT) */
13093 + desc = ctx->sh_desc_givenc;
13095 + cpu = smp_processor_id();
13096 + drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
13097 + if (likely(!IS_ERR_OR_NULL(drv_ctx)))
13098 + drv_ctx->op_type = type;
13100 + ctx->drv_ctx[type] = drv_ctx;
13103 + spin_unlock(&ctx->lock);
13109 +static void caam_unmap(struct device *dev, struct scatterlist *src,
13110 + struct scatterlist *dst, int src_nents,
13111 + int dst_nents, dma_addr_t iv_dma, int ivsize,
13112 + enum optype op_type, dma_addr_t qm_sg_dma,
13115 + if (dst != src) {
13117 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
13118 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
13120 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
13124 + dma_unmap_single(dev, iv_dma, ivsize,
13125 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
13128 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
13131 +static void aead_unmap(struct device *dev,
13132 + struct aead_edesc *edesc,
13133 + struct aead_request *req)
13135 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13136 + int ivsize = crypto_aead_ivsize(aead);
13138 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13139 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13140 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
13141 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13144 +static void tls_unmap(struct device *dev,
13145 + struct tls_edesc *edesc,
13146 + struct aead_request *req)
13148 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13149 + int ivsize = crypto_aead_ivsize(aead);
13151 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
13152 + edesc->dst_nents, edesc->iv_dma, ivsize,
13153 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
13154 + edesc->qm_sg_bytes);
13157 +static void ablkcipher_unmap(struct device *dev,
13158 + struct ablkcipher_edesc *edesc,
13159 + struct ablkcipher_request *req)
13161 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13162 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13164 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13165 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13166 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
13169 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
13171 + struct device *qidev;
13172 + struct aead_edesc *edesc;
13173 + struct aead_request *aead_req = drv_req->app_ctx;
13174 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13175 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13178 + qidev = caam_ctx->qidev;
13180 + if (unlikely(status)) {
13181 + caam_jr_strstatus(qidev, status);
13185 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13186 + aead_unmap(qidev, edesc, aead_req);
13188 + aead_request_complete(aead_req, ecode);
13189 + qi_cache_free(edesc);
13193 + * allocate and map the aead extended descriptor
13195 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
13198 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13199 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13200 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13201 + typeof(*alg), aead);
13202 + struct device *qidev = ctx->qidev;
13203 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13204 + GFP_KERNEL : GFP_ATOMIC;
13205 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13206 + struct aead_edesc *edesc;
13207 + dma_addr_t qm_sg_dma, iv_dma = 0;
13209 + unsigned int authsize = ctx->authsize;
13210 + int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13211 + int in_len, out_len;
13212 + struct qm_sg_entry *sg_table, *fd_sgt;
13213 + struct caam_drv_ctx *drv_ctx;
13214 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13216 + drv_ctx = get_drv_ctx(ctx, op_type);
13217 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13218 + return (struct aead_edesc *)drv_ctx;
13220 + /* allocate space for base edesc and hw desc commands, link tables */
13221 + edesc = qi_cache_alloc(GFP_DMA | flags);
13222 + if (unlikely(!edesc)) {
13223 + dev_err(qidev, "could not allocate extended descriptor\n");
13224 + return ERR_PTR(-ENOMEM);
13227 + if (likely(req->src == req->dst)) {
13228 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13230 + (encrypt ? authsize : 0));
13231 + if (unlikely(src_nents < 0)) {
13232 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13233 + req->assoclen + req->cryptlen +
13234 + (encrypt ? authsize : 0));
13235 + qi_cache_free(edesc);
13236 + return ERR_PTR(src_nents);
13239 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13240 + DMA_BIDIRECTIONAL);
13241 + if (unlikely(!mapped_src_nents)) {
13242 + dev_err(qidev, "unable to map source\n");
13243 + qi_cache_free(edesc);
13244 + return ERR_PTR(-ENOMEM);
13247 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13249 + if (unlikely(src_nents < 0)) {
13250 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13251 + req->assoclen + req->cryptlen);
13252 + qi_cache_free(edesc);
13253 + return ERR_PTR(src_nents);
13256 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13258 + (encrypt ? authsize :
13260 + if (unlikely(dst_nents < 0)) {
13261 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13262 + req->assoclen + req->cryptlen +
13263 + (encrypt ? authsize : (-authsize)));
13264 + qi_cache_free(edesc);
13265 + return ERR_PTR(dst_nents);
13269 + mapped_src_nents = dma_map_sg(qidev, req->src,
13270 + src_nents, DMA_TO_DEVICE);
13271 + if (unlikely(!mapped_src_nents)) {
13272 + dev_err(qidev, "unable to map source\n");
13273 + qi_cache_free(edesc);
13274 + return ERR_PTR(-ENOMEM);
13277 + mapped_src_nents = 0;
13280 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13281 + DMA_FROM_DEVICE);
13282 + if (unlikely(!mapped_dst_nents)) {
13283 + dev_err(qidev, "unable to map destination\n");
13284 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13285 + qi_cache_free(edesc);
13286 + return ERR_PTR(-ENOMEM);
13290 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13291 + ivsize = crypto_aead_ivsize(aead);
13292 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13293 + if (dma_mapping_error(qidev, iv_dma)) {
13294 + dev_err(qidev, "unable to map IV\n");
13295 + caam_unmap(qidev, req->src, req->dst, src_nents,
13296 + dst_nents, 0, 0, op_type, 0, 0);
13297 + qi_cache_free(edesc);
13298 + return ERR_PTR(-ENOMEM);
13303 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13304 + * Input is not contiguous.
13306 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13307 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13308 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13309 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13310 + qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13311 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13312 + iv_dma, ivsize, op_type, 0, 0);
13313 + qi_cache_free(edesc);
13314 + return ERR_PTR(-ENOMEM);
13316 + sg_table = &edesc->sgt[0];
13317 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13319 + edesc->src_nents = src_nents;
13320 + edesc->dst_nents = dst_nents;
13321 + edesc->iv_dma = iv_dma;
13322 + edesc->drv_req.app_ctx = req;
13323 + edesc->drv_req.cbk = aead_done;
13324 + edesc->drv_req.drv_ctx = drv_ctx;
13326 + edesc->assoclen = cpu_to_caam32(req->assoclen);
13327 + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13329 + if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13330 + dev_err(qidev, "unable to map assoclen\n");
13331 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13332 + iv_dma, ivsize, op_type, 0, 0);
13333 + qi_cache_free(edesc);
13334 + return ERR_PTR(-ENOMEM);
13337 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13340 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13343 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13344 + qm_sg_index += mapped_src_nents;
13346 + if (mapped_dst_nents > 1)
13347 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13350 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13351 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13352 + dev_err(qidev, "unable to map S/G table\n");
13353 + dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13354 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13355 + iv_dma, ivsize, op_type, 0, 0);
13356 + qi_cache_free(edesc);
13357 + return ERR_PTR(-ENOMEM);
13360 + edesc->qm_sg_dma = qm_sg_dma;
13361 + edesc->qm_sg_bytes = qm_sg_bytes;
13363 + out_len = req->assoclen + req->cryptlen +
13364 + (encrypt ? ctx->authsize : (-ctx->authsize));
13365 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13367 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13368 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13370 + if (req->dst == req->src) {
13371 + if (mapped_src_nents == 1)
13372 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13375 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13376 + (1 + !!ivsize) * sizeof(*sg_table),
13378 + } else if (mapped_dst_nents == 1) {
13379 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13382 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13383 + qm_sg_index, out_len, 0);
13389 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13391 + struct aead_edesc *edesc;
13392 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13393 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13396 + if (unlikely(caam_congested))
13399 + /* allocate extended descriptor */
13400 + edesc = aead_edesc_alloc(req, encrypt);
13401 + if (IS_ERR_OR_NULL(edesc))
13402 + return PTR_ERR(edesc);
13404 + /* Create and submit job descriptor */
13405 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13407 + ret = -EINPROGRESS;
13409 + aead_unmap(ctx->qidev, edesc, req);
13410 + qi_cache_free(edesc);
13416 +static int aead_encrypt(struct aead_request *req)
13418 + return aead_crypt(req, true);
13421 +static int aead_decrypt(struct aead_request *req)
13423 + return aead_crypt(req, false);
13426 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13428 + struct device *qidev;
13429 + struct tls_edesc *edesc;
13430 + struct aead_request *aead_req = drv_req->app_ctx;
13431 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13432 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13435 + qidev = caam_ctx->qidev;
13437 + if (unlikely(status)) {
13438 + caam_jr_strstatus(qidev, status);
13442 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13443 + tls_unmap(qidev, edesc, aead_req);
13445 + aead_request_complete(aead_req, ecode);
13446 + qi_cache_free(edesc);
13450 + * allocate and map the tls extended descriptor
13452 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13454 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13455 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13456 + unsigned int blocksize = crypto_aead_blocksize(aead);
13457 + unsigned int padsize, authsize;
13458 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13459 + typeof(*alg), aead);
13460 + struct device *qidev = ctx->qidev;
13461 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13462 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13463 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13464 + struct tls_edesc *edesc;
13465 + dma_addr_t qm_sg_dma, iv_dma = 0;
13467 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13468 + int in_len, out_len;
13469 + struct qm_sg_entry *sg_table, *fd_sgt;
13470 + struct caam_drv_ctx *drv_ctx;
13471 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13472 + struct scatterlist *dst;
13475 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13477 + authsize = ctx->authsize + padsize;
13479 + authsize = ctx->authsize;
13482 + drv_ctx = get_drv_ctx(ctx, op_type);
13483 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13484 + return (struct tls_edesc *)drv_ctx;
13486 + /* allocate space for base edesc and hw desc commands, link tables */
13487 + edesc = qi_cache_alloc(GFP_DMA | flags);
13488 + if (unlikely(!edesc)) {
13489 + dev_err(qidev, "could not allocate extended descriptor\n");
13490 + return ERR_PTR(-ENOMEM);
13493 + if (likely(req->src == req->dst)) {
13494 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13496 + (encrypt ? authsize : 0));
13497 + if (unlikely(src_nents < 0)) {
13498 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13499 + req->assoclen + req->cryptlen +
13500 + (encrypt ? authsize : 0));
13501 + qi_cache_free(edesc);
13502 + return ERR_PTR(src_nents);
13505 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13506 + DMA_BIDIRECTIONAL);
13507 + if (unlikely(!mapped_src_nents)) {
13508 + dev_err(qidev, "unable to map source\n");
13509 + qi_cache_free(edesc);
13510 + return ERR_PTR(-ENOMEM);
13514 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13516 + if (unlikely(src_nents < 0)) {
13517 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13518 + req->assoclen + req->cryptlen);
13519 + qi_cache_free(edesc);
13520 + return ERR_PTR(src_nents);
13523 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13524 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
13525 + (encrypt ? authsize : 0));
13526 + if (unlikely(dst_nents < 0)) {
13527 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13529 + (encrypt ? authsize : 0));
13530 + qi_cache_free(edesc);
13531 + return ERR_PTR(dst_nents);
13535 + mapped_src_nents = dma_map_sg(qidev, req->src,
13536 + src_nents, DMA_TO_DEVICE);
13537 + if (unlikely(!mapped_src_nents)) {
13538 + dev_err(qidev, "unable to map source\n");
13539 + qi_cache_free(edesc);
13540 + return ERR_PTR(-ENOMEM);
13543 + mapped_src_nents = 0;
13546 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13547 + DMA_FROM_DEVICE);
13548 + if (unlikely(!mapped_dst_nents)) {
13549 + dev_err(qidev, "unable to map destination\n");
13550 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13551 + qi_cache_free(edesc);
13552 + return ERR_PTR(-ENOMEM);
13556 + ivsize = crypto_aead_ivsize(aead);
13557 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13558 + if (dma_mapping_error(qidev, iv_dma)) {
13559 + dev_err(qidev, "unable to map IV\n");
13560 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13562 + qi_cache_free(edesc);
13563 + return ERR_PTR(-ENOMEM);
13567 + * Create S/G table: IV, src, dst.
13568 + * Input is not contiguous.
13570 + qm_sg_ents = 1 + mapped_src_nents +
13571 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13572 + sg_table = &edesc->sgt[0];
13573 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13575 + edesc->src_nents = src_nents;
13576 + edesc->dst_nents = dst_nents;
13577 + edesc->dst = dst;
13578 + edesc->iv_dma = iv_dma;
13579 + edesc->drv_req.app_ctx = req;
13580 + edesc->drv_req.cbk = tls_done;
13581 + edesc->drv_req.drv_ctx = drv_ctx;
13583 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13586 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13587 + qm_sg_index += mapped_src_nents;
13589 + if (mapped_dst_nents > 1)
13590 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13593 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13594 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13595 + dev_err(qidev, "unable to map S/G table\n");
13596 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13597 + ivsize, op_type, 0, 0);
13598 + qi_cache_free(edesc);
13599 + return ERR_PTR(-ENOMEM);
13602 + edesc->qm_sg_dma = qm_sg_dma;
13603 + edesc->qm_sg_bytes = qm_sg_bytes;
13605 + out_len = req->cryptlen + (encrypt ? authsize : 0);
13606 + in_len = ivsize + req->assoclen + req->cryptlen;
13608 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13610 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13612 + if (req->dst == req->src)
13613 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13614 + (sg_nents_for_len(req->src, req->assoclen) +
13615 + 1) * sizeof(*sg_table), out_len, 0);
13616 + else if (mapped_dst_nents == 1)
13617 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13619 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13620 + qm_sg_index, out_len, 0);
13625 +static int tls_crypt(struct aead_request *req, bool encrypt)
13627 + struct tls_edesc *edesc;
13628 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13629 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13632 + if (unlikely(caam_congested))
13635 + edesc = tls_edesc_alloc(req, encrypt);
13636 + if (IS_ERR_OR_NULL(edesc))
13637 + return PTR_ERR(edesc);
13639 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13641 + ret = -EINPROGRESS;
13643 + tls_unmap(ctx->qidev, edesc, req);
13644 + qi_cache_free(edesc);
13650 +static int tls_encrypt(struct aead_request *req)
13652 + return tls_crypt(req, true);
13655 +static int tls_decrypt(struct aead_request *req)
13657 + return tls_crypt(req, false);
13660 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
13662 + struct ablkcipher_edesc *edesc;
13663 + struct ablkcipher_request *req = drv_req->app_ctx;
13664 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13665 + struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
13666 + struct device *qidev = caam_ctx->qidev;
13667 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13670 + dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
13673 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13676 + caam_jr_strstatus(qidev, status);
13679 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
13680 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
13681 + edesc->src_nents > 1 ? 100 : ivsize, 1);
13682 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
13683 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
13684 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
13687 + ablkcipher_unmap(qidev, edesc, req);
13688 + qi_cache_free(edesc);
13691 + * The crypto API expects us to set the IV (req->info) to the last
13692 + * ciphertext block. This is used e.g. by the CTS mode.
13694 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
13697 + ablkcipher_request_complete(req, status);
13700 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
13701 + *req, bool encrypt)
13703 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13704 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13705 + struct device *qidev = ctx->qidev;
13706 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13707 + GFP_KERNEL : GFP_ATOMIC;
13708 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13709 + struct ablkcipher_edesc *edesc;
13710 + dma_addr_t iv_dma;
13712 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13713 + int dst_sg_idx, qm_sg_ents;
13714 + struct qm_sg_entry *sg_table, *fd_sgt;
13715 + struct caam_drv_ctx *drv_ctx;
13716 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13718 + drv_ctx = get_drv_ctx(ctx, op_type);
13719 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13720 + return (struct ablkcipher_edesc *)drv_ctx;
13722 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13723 + if (unlikely(src_nents < 0)) {
13724 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13726 + return ERR_PTR(src_nents);
13729 + if (unlikely(req->src != req->dst)) {
13730 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13731 + if (unlikely(dst_nents < 0)) {
13732 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13734 + return ERR_PTR(dst_nents);
13737 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13739 + if (unlikely(!mapped_src_nents)) {
13740 + dev_err(qidev, "unable to map source\n");
13741 + return ERR_PTR(-ENOMEM);
13744 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13745 + DMA_FROM_DEVICE);
13746 + if (unlikely(!mapped_dst_nents)) {
13747 + dev_err(qidev, "unable to map destination\n");
13748 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13749 + return ERR_PTR(-ENOMEM);
13752 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13753 + DMA_BIDIRECTIONAL);
13754 + if (unlikely(!mapped_src_nents)) {
13755 + dev_err(qidev, "unable to map source\n");
13756 + return ERR_PTR(-ENOMEM);
13760 + iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
13761 + if (dma_mapping_error(qidev, iv_dma)) {
13762 + dev_err(qidev, "unable to map IV\n");
13763 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13765 + return ERR_PTR(-ENOMEM);
13768 + if (mapped_src_nents == 1 &&
13769 + iv_dma + ivsize == sg_dma_address(req->src)) {
13770 + in_contig = true;
13773 + in_contig = false;
13774 + qm_sg_ents = 1 + mapped_src_nents;
13776 + dst_sg_idx = qm_sg_ents;
13778 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
13779 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13780 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13781 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13782 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13783 + iv_dma, ivsize, op_type, 0, 0);
13784 + return ERR_PTR(-ENOMEM);
13787 + /* allocate space for base edesc and link tables */
13788 + edesc = qi_cache_alloc(GFP_DMA | flags);
13789 + if (unlikely(!edesc)) {
13790 + dev_err(qidev, "could not allocate extended descriptor\n");
13791 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13792 + iv_dma, ivsize, op_type, 0, 0);
13793 + return ERR_PTR(-ENOMEM);
13796 + edesc->src_nents = src_nents;
13797 + edesc->dst_nents = dst_nents;
13798 + edesc->iv_dma = iv_dma;
13799 + sg_table = &edesc->sgt[0];
13800 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13801 + edesc->drv_req.app_ctx = req;
13802 + edesc->drv_req.cbk = ablkcipher_done;
13803 + edesc->drv_req.drv_ctx = drv_ctx;
13805 + if (!in_contig) {
13806 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13807 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
13810 + if (mapped_dst_nents > 1)
13811 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13814 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13816 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13817 + dev_err(qidev, "unable to map S/G table\n");
13818 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13819 + iv_dma, ivsize, op_type, 0, 0);
13820 + qi_cache_free(edesc);
13821 + return ERR_PTR(-ENOMEM);
13824 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13827 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
13828 + ivsize + req->nbytes, 0);
13830 + dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
13833 + if (req->src == req->dst) {
13835 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
13836 + sizeof(*sg_table), req->nbytes, 0);
13838 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13840 + } else if (mapped_dst_nents > 1) {
13841 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13842 + sizeof(*sg_table), req->nbytes, 0);
13844 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13851 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
13852 + struct skcipher_givcrypt_request *creq)
13854 + struct ablkcipher_request *req = &creq->creq;
13855 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13856 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13857 + struct device *qidev = ctx->qidev;
13858 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13859 + GFP_KERNEL : GFP_ATOMIC;
13860 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
13861 + struct ablkcipher_edesc *edesc;
13862 + dma_addr_t iv_dma;
13864 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13865 + struct qm_sg_entry *sg_table, *fd_sgt;
13866 + int dst_sg_idx, qm_sg_ents;
13867 + struct caam_drv_ctx *drv_ctx;
13869 + drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
13870 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13871 + return (struct ablkcipher_edesc *)drv_ctx;
13873 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13874 + if (unlikely(src_nents < 0)) {
13875 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13877 + return ERR_PTR(src_nents);
13880 + if (unlikely(req->src != req->dst)) {
13881 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13882 + if (unlikely(dst_nents < 0)) {
13883 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13885 + return ERR_PTR(dst_nents);
13888 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13890 + if (unlikely(!mapped_src_nents)) {
13891 + dev_err(qidev, "unable to map source\n");
13892 + return ERR_PTR(-ENOMEM);
13895 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13896 + DMA_FROM_DEVICE);
13897 + if (unlikely(!mapped_dst_nents)) {
13898 + dev_err(qidev, "unable to map destination\n");
13899 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13900 + return ERR_PTR(-ENOMEM);
13903 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13904 + DMA_BIDIRECTIONAL);
13905 + if (unlikely(!mapped_src_nents)) {
13906 + dev_err(qidev, "unable to map source\n");
13907 + return ERR_PTR(-ENOMEM);
13910 + dst_nents = src_nents;
13911 + mapped_dst_nents = src_nents;
13914 + iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
13915 + if (dma_mapping_error(qidev, iv_dma)) {
13916 + dev_err(qidev, "unable to map IV\n");
13917 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13919 + return ERR_PTR(-ENOMEM);
13922 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
13923 + dst_sg_idx = qm_sg_ents;
13924 + if (mapped_dst_nents == 1 &&
13925 + iv_dma + ivsize == sg_dma_address(req->dst)) {
13926 + out_contig = true;
13928 + out_contig = false;
13929 + qm_sg_ents += 1 + mapped_dst_nents;
13932 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13933 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13934 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13935 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13936 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13937 + return ERR_PTR(-ENOMEM);
13940 + /* allocate space for base edesc and link tables */
13941 + edesc = qi_cache_alloc(GFP_DMA | flags);
13943 + dev_err(qidev, "could not allocate extended descriptor\n");
13944 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13945 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13946 + return ERR_PTR(-ENOMEM);
13949 + edesc->src_nents = src_nents;
13950 + edesc->dst_nents = dst_nents;
13951 + edesc->iv_dma = iv_dma;
13952 + sg_table = &edesc->sgt[0];
13953 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13954 + edesc->drv_req.app_ctx = req;
13955 + edesc->drv_req.cbk = ablkcipher_done;
13956 + edesc->drv_req.drv_ctx = drv_ctx;
13958 + if (mapped_src_nents > 1)
13959 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
13961 + if (!out_contig) {
13962 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
13963 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13964 + dst_sg_idx + 1, 0);
13967 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13969 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13970 + dev_err(qidev, "unable to map S/G table\n");
13971 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13972 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13973 + qi_cache_free(edesc);
13974 + return ERR_PTR(-ENOMEM);
13977 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13979 + if (mapped_src_nents > 1)
13980 + dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
13983 + dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
13987 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13988 + sizeof(*sg_table), ivsize + req->nbytes,
13991 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13992 + ivsize + req->nbytes, 0);
13997 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
13999 + struct ablkcipher_edesc *edesc;
14000 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14001 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14004 + if (unlikely(caam_congested))
14007 + /* allocate extended descriptor */
14008 + edesc = ablkcipher_edesc_alloc(req, encrypt);
14009 + if (IS_ERR(edesc))
14010 + return PTR_ERR(edesc);
14012 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
14014 + ret = -EINPROGRESS;
14016 + ablkcipher_unmap(ctx->qidev, edesc, req);
14017 + qi_cache_free(edesc);
14023 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
14025 + return ablkcipher_crypt(req, true);
14028 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
14030 + return ablkcipher_crypt(req, false);
14033 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
14035 + struct ablkcipher_request *req = &creq->creq;
14036 + struct ablkcipher_edesc *edesc;
14037 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14038 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14041 + if (unlikely(caam_congested))
14044 + /* allocate extended descriptor */
14045 + edesc = ablkcipher_giv_edesc_alloc(creq);
14046 + if (IS_ERR(edesc))
14047 + return PTR_ERR(edesc);
14049 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
14051 + ret = -EINPROGRESS;
14053 + ablkcipher_unmap(ctx->qidev, edesc, req);
14054 + qi_cache_free(edesc);
14060 +#define template_ablkcipher template_u.ablkcipher
14061 +struct caam_alg_template {
14062 + char name[CRYPTO_MAX_ALG_NAME];
14063 + char driver_name[CRYPTO_MAX_ALG_NAME];
14064 + unsigned int blocksize;
14067 + struct ablkcipher_alg ablkcipher;
14069 + u32 class1_alg_type;
14070 + u32 class2_alg_type;
14073 +static struct caam_alg_template driver_algs[] = {
14074 + /* ablkcipher descriptor */
14076 + .name = "cbc(aes)",
14077 + .driver_name = "cbc-aes-caam-qi",
14078 + .blocksize = AES_BLOCK_SIZE,
14079 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14080 + .template_ablkcipher = {
14081 + .setkey = ablkcipher_setkey,
14082 + .encrypt = ablkcipher_encrypt,
14083 + .decrypt = ablkcipher_decrypt,
14084 + .givencrypt = ablkcipher_givencrypt,
14085 + .geniv = "<built-in>",
14086 + .min_keysize = AES_MIN_KEY_SIZE,
14087 + .max_keysize = AES_MAX_KEY_SIZE,
14088 + .ivsize = AES_BLOCK_SIZE,
14090 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14093 + .name = "cbc(des3_ede)",
14094 + .driver_name = "cbc-3des-caam-qi",
14095 + .blocksize = DES3_EDE_BLOCK_SIZE,
14096 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14097 + .template_ablkcipher = {
14098 + .setkey = ablkcipher_setkey,
14099 + .encrypt = ablkcipher_encrypt,
14100 + .decrypt = ablkcipher_decrypt,
14101 + .givencrypt = ablkcipher_givencrypt,
14102 + .geniv = "<built-in>",
14103 + .min_keysize = DES3_EDE_KEY_SIZE,
14104 + .max_keysize = DES3_EDE_KEY_SIZE,
14105 + .ivsize = DES3_EDE_BLOCK_SIZE,
14107 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14110 + .name = "cbc(des)",
14111 + .driver_name = "cbc-des-caam-qi",
14112 + .blocksize = DES_BLOCK_SIZE,
14113 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14114 + .template_ablkcipher = {
14115 + .setkey = ablkcipher_setkey,
14116 + .encrypt = ablkcipher_encrypt,
14117 + .decrypt = ablkcipher_decrypt,
14118 + .givencrypt = ablkcipher_givencrypt,
14119 + .geniv = "<built-in>",
14120 + .min_keysize = DES_KEY_SIZE,
14121 + .max_keysize = DES_KEY_SIZE,
14122 + .ivsize = DES_BLOCK_SIZE,
14124 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14127 + .name = "ctr(aes)",
14128 + .driver_name = "ctr-aes-caam-qi",
14130 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14131 + .template_ablkcipher = {
14132 + .setkey = ablkcipher_setkey,
14133 + .encrypt = ablkcipher_encrypt,
14134 + .decrypt = ablkcipher_decrypt,
14135 + .geniv = "chainiv",
14136 + .min_keysize = AES_MIN_KEY_SIZE,
14137 + .max_keysize = AES_MAX_KEY_SIZE,
14138 + .ivsize = AES_BLOCK_SIZE,
14140 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
14143 + .name = "rfc3686(ctr(aes))",
14144 + .driver_name = "rfc3686-ctr-aes-caam-qi",
14146 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14147 + .template_ablkcipher = {
14148 + .setkey = ablkcipher_setkey,
14149 + .encrypt = ablkcipher_encrypt,
14150 + .decrypt = ablkcipher_decrypt,
14151 + .givencrypt = ablkcipher_givencrypt,
14152 + .geniv = "<built-in>",
14153 + .min_keysize = AES_MIN_KEY_SIZE +
14154 + CTR_RFC3686_NONCE_SIZE,
14155 + .max_keysize = AES_MAX_KEY_SIZE +
14156 + CTR_RFC3686_NONCE_SIZE,
14157 + .ivsize = CTR_RFC3686_IV_SIZE,
14159 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
14162 + .name = "xts(aes)",
14163 + .driver_name = "xts-aes-caam-qi",
14164 + .blocksize = AES_BLOCK_SIZE,
14165 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14166 + .template_ablkcipher = {
14167 + .setkey = xts_ablkcipher_setkey,
14168 + .encrypt = ablkcipher_encrypt,
14169 + .decrypt = ablkcipher_decrypt,
14170 + .geniv = "eseqiv",
14171 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
14172 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
14173 + .ivsize = AES_BLOCK_SIZE,
14175 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
14179 +static struct caam_aead_alg driver_aeads[] = {
14180 + /* single-pass ipsec_esp descriptor */
14184 + .cra_name = "authenc(hmac(md5),cbc(aes))",
14185 + .cra_driver_name = "authenc-hmac-md5-"
14186 + "cbc-aes-caam-qi",
14187 + .cra_blocksize = AES_BLOCK_SIZE,
14189 + .setkey = aead_setkey,
14190 + .setauthsize = aead_setauthsize,
14191 + .encrypt = aead_encrypt,
14192 + .decrypt = aead_decrypt,
14193 + .ivsize = AES_BLOCK_SIZE,
14194 + .maxauthsize = MD5_DIGEST_SIZE,
14197 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14198 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14199 + OP_ALG_AAI_HMAC_PRECOMP,
14205 + .cra_name = "echainiv(authenc(hmac(md5),"
14207 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14208 + "cbc-aes-caam-qi",
14209 + .cra_blocksize = AES_BLOCK_SIZE,
14211 + .setkey = aead_setkey,
14212 + .setauthsize = aead_setauthsize,
14213 + .encrypt = aead_encrypt,
14214 + .decrypt = aead_decrypt,
14215 + .ivsize = AES_BLOCK_SIZE,
14216 + .maxauthsize = MD5_DIGEST_SIZE,
14219 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14220 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14221 + OP_ALG_AAI_HMAC_PRECOMP,
14228 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
14229 + .cra_driver_name = "authenc-hmac-sha1-"
14230 + "cbc-aes-caam-qi",
14231 + .cra_blocksize = AES_BLOCK_SIZE,
14233 + .setkey = aead_setkey,
14234 + .setauthsize = aead_setauthsize,
14235 + .encrypt = aead_encrypt,
14236 + .decrypt = aead_decrypt,
14237 + .ivsize = AES_BLOCK_SIZE,
14238 + .maxauthsize = SHA1_DIGEST_SIZE,
14241 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14242 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14243 + OP_ALG_AAI_HMAC_PRECOMP,
14249 + .cra_name = "echainiv(authenc(hmac(sha1),"
14251 + .cra_driver_name = "echainiv-authenc-"
14252 + "hmac-sha1-cbc-aes-caam-qi",
14253 + .cra_blocksize = AES_BLOCK_SIZE,
14255 + .setkey = aead_setkey,
14256 + .setauthsize = aead_setauthsize,
14257 + .encrypt = aead_encrypt,
14258 + .decrypt = aead_decrypt,
14259 + .ivsize = AES_BLOCK_SIZE,
14260 + .maxauthsize = SHA1_DIGEST_SIZE,
14263 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14264 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14265 + OP_ALG_AAI_HMAC_PRECOMP,
14272 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
14273 + .cra_driver_name = "authenc-hmac-sha224-"
14274 + "cbc-aes-caam-qi",
14275 + .cra_blocksize = AES_BLOCK_SIZE,
14277 + .setkey = aead_setkey,
14278 + .setauthsize = aead_setauthsize,
14279 + .encrypt = aead_encrypt,
14280 + .decrypt = aead_decrypt,
14281 + .ivsize = AES_BLOCK_SIZE,
14282 + .maxauthsize = SHA224_DIGEST_SIZE,
14285 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14286 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14287 + OP_ALG_AAI_HMAC_PRECOMP,
14293 + .cra_name = "echainiv(authenc(hmac(sha224),"
14295 + .cra_driver_name = "echainiv-authenc-"
14296 + "hmac-sha224-cbc-aes-caam-qi",
14297 + .cra_blocksize = AES_BLOCK_SIZE,
14299 + .setkey = aead_setkey,
14300 + .setauthsize = aead_setauthsize,
14301 + .encrypt = aead_encrypt,
14302 + .decrypt = aead_decrypt,
14303 + .ivsize = AES_BLOCK_SIZE,
14304 + .maxauthsize = SHA224_DIGEST_SIZE,
14307 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14308 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14309 + OP_ALG_AAI_HMAC_PRECOMP,
14316 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
14317 + .cra_driver_name = "authenc-hmac-sha256-"
14318 + "cbc-aes-caam-qi",
14319 + .cra_blocksize = AES_BLOCK_SIZE,
14321 + .setkey = aead_setkey,
14322 + .setauthsize = aead_setauthsize,
14323 + .encrypt = aead_encrypt,
14324 + .decrypt = aead_decrypt,
14325 + .ivsize = AES_BLOCK_SIZE,
14326 + .maxauthsize = SHA256_DIGEST_SIZE,
14329 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14330 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14331 + OP_ALG_AAI_HMAC_PRECOMP,
14337 + .cra_name = "echainiv(authenc(hmac(sha256),"
14339 + .cra_driver_name = "echainiv-authenc-"
14340 + "hmac-sha256-cbc-aes-"
14342 + .cra_blocksize = AES_BLOCK_SIZE,
14344 + .setkey = aead_setkey,
14345 + .setauthsize = aead_setauthsize,
14346 + .encrypt = aead_encrypt,
14347 + .decrypt = aead_decrypt,
14348 + .ivsize = AES_BLOCK_SIZE,
14349 + .maxauthsize = SHA256_DIGEST_SIZE,
14352 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14353 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14354 + OP_ALG_AAI_HMAC_PRECOMP,
14361 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
14362 + .cra_driver_name = "authenc-hmac-sha384-"
14363 + "cbc-aes-caam-qi",
14364 + .cra_blocksize = AES_BLOCK_SIZE,
14366 + .setkey = aead_setkey,
14367 + .setauthsize = aead_setauthsize,
14368 + .encrypt = aead_encrypt,
14369 + .decrypt = aead_decrypt,
14370 + .ivsize = AES_BLOCK_SIZE,
14371 + .maxauthsize = SHA384_DIGEST_SIZE,
14374 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14375 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14376 + OP_ALG_AAI_HMAC_PRECOMP,
14382 + .cra_name = "echainiv(authenc(hmac(sha384),"
14384 + .cra_driver_name = "echainiv-authenc-"
14385 + "hmac-sha384-cbc-aes-"
14387 + .cra_blocksize = AES_BLOCK_SIZE,
14389 + .setkey = aead_setkey,
14390 + .setauthsize = aead_setauthsize,
14391 + .encrypt = aead_encrypt,
14392 + .decrypt = aead_decrypt,
14393 + .ivsize = AES_BLOCK_SIZE,
14394 + .maxauthsize = SHA384_DIGEST_SIZE,
14397 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14398 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14399 + OP_ALG_AAI_HMAC_PRECOMP,
14406 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
14407 + .cra_driver_name = "authenc-hmac-sha512-"
14408 + "cbc-aes-caam-qi",
14409 + .cra_blocksize = AES_BLOCK_SIZE,
14411 + .setkey = aead_setkey,
14412 + .setauthsize = aead_setauthsize,
14413 + .encrypt = aead_encrypt,
14414 + .decrypt = aead_decrypt,
14415 + .ivsize = AES_BLOCK_SIZE,
14416 + .maxauthsize = SHA512_DIGEST_SIZE,
14419 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14420 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14421 + OP_ALG_AAI_HMAC_PRECOMP,
14427 + .cra_name = "echainiv(authenc(hmac(sha512),"
14429 + .cra_driver_name = "echainiv-authenc-"
14430 + "hmac-sha512-cbc-aes-"
14432 + .cra_blocksize = AES_BLOCK_SIZE,
14434 + .setkey = aead_setkey,
14435 + .setauthsize = aead_setauthsize,
14436 + .encrypt = aead_encrypt,
14437 + .decrypt = aead_decrypt,
14438 + .ivsize = AES_BLOCK_SIZE,
14439 + .maxauthsize = SHA512_DIGEST_SIZE,
14442 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14443 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14444 + OP_ALG_AAI_HMAC_PRECOMP,
14451 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14452 + .cra_driver_name = "authenc-hmac-md5-"
14453 + "cbc-des3_ede-caam-qi",
14454 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14456 + .setkey = aead_setkey,
14457 + .setauthsize = aead_setauthsize,
14458 + .encrypt = aead_encrypt,
14459 + .decrypt = aead_decrypt,
14460 + .ivsize = DES3_EDE_BLOCK_SIZE,
14461 + .maxauthsize = MD5_DIGEST_SIZE,
14464 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14465 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14466 + OP_ALG_AAI_HMAC_PRECOMP,
14472 + .cra_name = "echainiv(authenc(hmac(md5),"
14473 + "cbc(des3_ede)))",
14474 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14475 + "cbc-des3_ede-caam-qi",
14476 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14478 + .setkey = aead_setkey,
14479 + .setauthsize = aead_setauthsize,
14480 + .encrypt = aead_encrypt,
14481 + .decrypt = aead_decrypt,
14482 + .ivsize = DES3_EDE_BLOCK_SIZE,
14483 + .maxauthsize = MD5_DIGEST_SIZE,
14486 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14487 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14488 + OP_ALG_AAI_HMAC_PRECOMP,
14495 + .cra_name = "authenc(hmac(sha1),"
14496 + "cbc(des3_ede))",
14497 + .cra_driver_name = "authenc-hmac-sha1-"
14498 + "cbc-des3_ede-caam-qi",
14499 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14501 + .setkey = aead_setkey,
14502 + .setauthsize = aead_setauthsize,
14503 + .encrypt = aead_encrypt,
14504 + .decrypt = aead_decrypt,
14505 + .ivsize = DES3_EDE_BLOCK_SIZE,
14506 + .maxauthsize = SHA1_DIGEST_SIZE,
14509 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14510 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14511 + OP_ALG_AAI_HMAC_PRECOMP,
14517 + .cra_name = "echainiv(authenc(hmac(sha1),"
14518 + "cbc(des3_ede)))",
14519 + .cra_driver_name = "echainiv-authenc-"
14521 + "cbc-des3_ede-caam-qi",
14522 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14524 + .setkey = aead_setkey,
14525 + .setauthsize = aead_setauthsize,
14526 + .encrypt = aead_encrypt,
14527 + .decrypt = aead_decrypt,
14528 + .ivsize = DES3_EDE_BLOCK_SIZE,
14529 + .maxauthsize = SHA1_DIGEST_SIZE,
14532 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14533 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14534 + OP_ALG_AAI_HMAC_PRECOMP,
14541 + .cra_name = "authenc(hmac(sha224),"
14542 + "cbc(des3_ede))",
14543 + .cra_driver_name = "authenc-hmac-sha224-"
14544 + "cbc-des3_ede-caam-qi",
14545 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14547 + .setkey = aead_setkey,
14548 + .setauthsize = aead_setauthsize,
14549 + .encrypt = aead_encrypt,
14550 + .decrypt = aead_decrypt,
14551 + .ivsize = DES3_EDE_BLOCK_SIZE,
14552 + .maxauthsize = SHA224_DIGEST_SIZE,
14555 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14556 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14557 + OP_ALG_AAI_HMAC_PRECOMP,
14563 + .cra_name = "echainiv(authenc(hmac(sha224),"
14564 + "cbc(des3_ede)))",
14565 + .cra_driver_name = "echainiv-authenc-"
14567 + "cbc-des3_ede-caam-qi",
14568 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14570 + .setkey = aead_setkey,
14571 + .setauthsize = aead_setauthsize,
14572 + .encrypt = aead_encrypt,
14573 + .decrypt = aead_decrypt,
14574 + .ivsize = DES3_EDE_BLOCK_SIZE,
14575 + .maxauthsize = SHA224_DIGEST_SIZE,
14578 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14579 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14580 + OP_ALG_AAI_HMAC_PRECOMP,
14587 + .cra_name = "authenc(hmac(sha256),"
14588 + "cbc(des3_ede))",
14589 + .cra_driver_name = "authenc-hmac-sha256-"
14590 + "cbc-des3_ede-caam-qi",
14591 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14593 + .setkey = aead_setkey,
14594 + .setauthsize = aead_setauthsize,
14595 + .encrypt = aead_encrypt,
14596 + .decrypt = aead_decrypt,
14597 + .ivsize = DES3_EDE_BLOCK_SIZE,
14598 + .maxauthsize = SHA256_DIGEST_SIZE,
14601 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14602 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14603 + OP_ALG_AAI_HMAC_PRECOMP,
14609 + .cra_name = "echainiv(authenc(hmac(sha256),"
14610 + "cbc(des3_ede)))",
14611 + .cra_driver_name = "echainiv-authenc-"
14613 + "cbc-des3_ede-caam-qi",
14614 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14616 + .setkey = aead_setkey,
14617 + .setauthsize = aead_setauthsize,
14618 + .encrypt = aead_encrypt,
14619 + .decrypt = aead_decrypt,
14620 + .ivsize = DES3_EDE_BLOCK_SIZE,
14621 + .maxauthsize = SHA256_DIGEST_SIZE,
14624 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14625 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14626 + OP_ALG_AAI_HMAC_PRECOMP,
14633 + .cra_name = "authenc(hmac(sha384),"
14634 + "cbc(des3_ede))",
14635 + .cra_driver_name = "authenc-hmac-sha384-"
14636 + "cbc-des3_ede-caam-qi",
14637 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14639 + .setkey = aead_setkey,
14640 + .setauthsize = aead_setauthsize,
14641 + .encrypt = aead_encrypt,
14642 + .decrypt = aead_decrypt,
14643 + .ivsize = DES3_EDE_BLOCK_SIZE,
14644 + .maxauthsize = SHA384_DIGEST_SIZE,
14647 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14648 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14649 + OP_ALG_AAI_HMAC_PRECOMP,
14655 + .cra_name = "echainiv(authenc(hmac(sha384),"
14656 + "cbc(des3_ede)))",
14657 + .cra_driver_name = "echainiv-authenc-"
14659 + "cbc-des3_ede-caam-qi",
14660 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14662 + .setkey = aead_setkey,
14663 + .setauthsize = aead_setauthsize,
14664 + .encrypt = aead_encrypt,
14665 + .decrypt = aead_decrypt,
14666 + .ivsize = DES3_EDE_BLOCK_SIZE,
14667 + .maxauthsize = SHA384_DIGEST_SIZE,
14670 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14671 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14672 + OP_ALG_AAI_HMAC_PRECOMP,
14679 + .cra_name = "authenc(hmac(sha512),"
14680 + "cbc(des3_ede))",
14681 + .cra_driver_name = "authenc-hmac-sha512-"
14682 + "cbc-des3_ede-caam-qi",
14683 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14685 + .setkey = aead_setkey,
14686 + .setauthsize = aead_setauthsize,
14687 + .encrypt = aead_encrypt,
14688 + .decrypt = aead_decrypt,
14689 + .ivsize = DES3_EDE_BLOCK_SIZE,
14690 + .maxauthsize = SHA512_DIGEST_SIZE,
14693 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14694 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14695 + OP_ALG_AAI_HMAC_PRECOMP,
14701 + .cra_name = "echainiv(authenc(hmac(sha512),"
14702 + "cbc(des3_ede)))",
14703 + .cra_driver_name = "echainiv-authenc-"
14705 + "cbc-des3_ede-caam-qi",
14706 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14708 + .setkey = aead_setkey,
14709 + .setauthsize = aead_setauthsize,
14710 + .encrypt = aead_encrypt,
14711 + .decrypt = aead_decrypt,
14712 + .ivsize = DES3_EDE_BLOCK_SIZE,
14713 + .maxauthsize = SHA512_DIGEST_SIZE,
14716 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14717 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14718 + OP_ALG_AAI_HMAC_PRECOMP,
14725 + .cra_name = "authenc(hmac(md5),cbc(des))",
14726 + .cra_driver_name = "authenc-hmac-md5-"
14727 + "cbc-des-caam-qi",
14728 + .cra_blocksize = DES_BLOCK_SIZE,
14730 + .setkey = aead_setkey,
14731 + .setauthsize = aead_setauthsize,
14732 + .encrypt = aead_encrypt,
14733 + .decrypt = aead_decrypt,
14734 + .ivsize = DES_BLOCK_SIZE,
14735 + .maxauthsize = MD5_DIGEST_SIZE,
14738 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14739 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14740 + OP_ALG_AAI_HMAC_PRECOMP,
14746 + .cra_name = "echainiv(authenc(hmac(md5),"
14748 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14749 + "cbc-des-caam-qi",
14750 + .cra_blocksize = DES_BLOCK_SIZE,
14752 + .setkey = aead_setkey,
14753 + .setauthsize = aead_setauthsize,
14754 + .encrypt = aead_encrypt,
14755 + .decrypt = aead_decrypt,
14756 + .ivsize = DES_BLOCK_SIZE,
14757 + .maxauthsize = MD5_DIGEST_SIZE,
14760 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14761 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14762 + OP_ALG_AAI_HMAC_PRECOMP,
14769 + .cra_name = "authenc(hmac(sha1),cbc(des))",
14770 + .cra_driver_name = "authenc-hmac-sha1-"
14771 + "cbc-des-caam-qi",
14772 + .cra_blocksize = DES_BLOCK_SIZE,
14774 + .setkey = aead_setkey,
14775 + .setauthsize = aead_setauthsize,
14776 + .encrypt = aead_encrypt,
14777 + .decrypt = aead_decrypt,
14778 + .ivsize = DES_BLOCK_SIZE,
14779 + .maxauthsize = SHA1_DIGEST_SIZE,
14782 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14783 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14784 + OP_ALG_AAI_HMAC_PRECOMP,
14790 + .cra_name = "echainiv(authenc(hmac(sha1),"
14792 + .cra_driver_name = "echainiv-authenc-"
14793 + "hmac-sha1-cbc-des-caam-qi",
14794 + .cra_blocksize = DES_BLOCK_SIZE,
14796 + .setkey = aead_setkey,
14797 + .setauthsize = aead_setauthsize,
14798 + .encrypt = aead_encrypt,
14799 + .decrypt = aead_decrypt,
14800 + .ivsize = DES_BLOCK_SIZE,
14801 + .maxauthsize = SHA1_DIGEST_SIZE,
14804 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14805 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14806 + OP_ALG_AAI_HMAC_PRECOMP,
14813 + .cra_name = "authenc(hmac(sha224),cbc(des))",
14814 + .cra_driver_name = "authenc-hmac-sha224-"
14815 + "cbc-des-caam-qi",
14816 + .cra_blocksize = DES_BLOCK_SIZE,
14818 + .setkey = aead_setkey,
14819 + .setauthsize = aead_setauthsize,
14820 + .encrypt = aead_encrypt,
14821 + .decrypt = aead_decrypt,
14822 + .ivsize = DES_BLOCK_SIZE,
14823 + .maxauthsize = SHA224_DIGEST_SIZE,
14826 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14827 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14828 + OP_ALG_AAI_HMAC_PRECOMP,
14834 + .cra_name = "echainiv(authenc(hmac(sha224),"
14836 + .cra_driver_name = "echainiv-authenc-"
14837 + "hmac-sha224-cbc-des-"
14839 + .cra_blocksize = DES_BLOCK_SIZE,
14841 + .setkey = aead_setkey,
14842 + .setauthsize = aead_setauthsize,
14843 + .encrypt = aead_encrypt,
14844 + .decrypt = aead_decrypt,
14845 + .ivsize = DES_BLOCK_SIZE,
14846 + .maxauthsize = SHA224_DIGEST_SIZE,
14849 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14850 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14851 + OP_ALG_AAI_HMAC_PRECOMP,
14858 + .cra_name = "authenc(hmac(sha256),cbc(des))",
14859 + .cra_driver_name = "authenc-hmac-sha256-"
14860 + "cbc-des-caam-qi",
14861 + .cra_blocksize = DES_BLOCK_SIZE,
14863 + .setkey = aead_setkey,
14864 + .setauthsize = aead_setauthsize,
14865 + .encrypt = aead_encrypt,
14866 + .decrypt = aead_decrypt,
14867 + .ivsize = DES_BLOCK_SIZE,
14868 + .maxauthsize = SHA256_DIGEST_SIZE,
14871 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14872 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14873 + OP_ALG_AAI_HMAC_PRECOMP,
14879 + .cra_name = "echainiv(authenc(hmac(sha256),"
14881 + .cra_driver_name = "echainiv-authenc-"
14882 + "hmac-sha256-cbc-des-"
14884 + .cra_blocksize = DES_BLOCK_SIZE,
14886 + .setkey = aead_setkey,
14887 + .setauthsize = aead_setauthsize,
14888 + .encrypt = aead_encrypt,
14889 + .decrypt = aead_decrypt,
14890 + .ivsize = DES_BLOCK_SIZE,
14891 + .maxauthsize = SHA256_DIGEST_SIZE,
14894 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14895 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14896 + OP_ALG_AAI_HMAC_PRECOMP,
14903 + .cra_name = "authenc(hmac(sha384),cbc(des))",
14904 + .cra_driver_name = "authenc-hmac-sha384-"
14905 + "cbc-des-caam-qi",
14906 + .cra_blocksize = DES_BLOCK_SIZE,
14908 + .setkey = aead_setkey,
14909 + .setauthsize = aead_setauthsize,
14910 + .encrypt = aead_encrypt,
14911 + .decrypt = aead_decrypt,
14912 + .ivsize = DES_BLOCK_SIZE,
14913 + .maxauthsize = SHA384_DIGEST_SIZE,
14916 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14917 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14918 + OP_ALG_AAI_HMAC_PRECOMP,
14924 + .cra_name = "echainiv(authenc(hmac(sha384),"
14926 + .cra_driver_name = "echainiv-authenc-"
14927 + "hmac-sha384-cbc-des-"
14929 + .cra_blocksize = DES_BLOCK_SIZE,
14931 + .setkey = aead_setkey,
14932 + .setauthsize = aead_setauthsize,
14933 + .encrypt = aead_encrypt,
14934 + .decrypt = aead_decrypt,
14935 + .ivsize = DES_BLOCK_SIZE,
14936 + .maxauthsize = SHA384_DIGEST_SIZE,
14939 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14940 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14941 + OP_ALG_AAI_HMAC_PRECOMP,
14948 + .cra_name = "authenc(hmac(sha512),cbc(des))",
14949 + .cra_driver_name = "authenc-hmac-sha512-"
14950 + "cbc-des-caam-qi",
14951 + .cra_blocksize = DES_BLOCK_SIZE,
14953 + .setkey = aead_setkey,
14954 + .setauthsize = aead_setauthsize,
14955 + .encrypt = aead_encrypt,
14956 + .decrypt = aead_decrypt,
14957 + .ivsize = DES_BLOCK_SIZE,
14958 + .maxauthsize = SHA512_DIGEST_SIZE,
14961 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14962 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14963 + OP_ALG_AAI_HMAC_PRECOMP,
14969 + .cra_name = "echainiv(authenc(hmac(sha512),"
14971 + .cra_driver_name = "echainiv-authenc-"
14972 + "hmac-sha512-cbc-des-"
14974 + .cra_blocksize = DES_BLOCK_SIZE,
14976 + .setkey = aead_setkey,
14977 + .setauthsize = aead_setauthsize,
14978 + .encrypt = aead_encrypt,
14979 + .decrypt = aead_decrypt,
14980 + .ivsize = DES_BLOCK_SIZE,
14981 + .maxauthsize = SHA512_DIGEST_SIZE,
14984 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14985 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14986 + OP_ALG_AAI_HMAC_PRECOMP,
14993 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
14994 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
14995 + .cra_blocksize = AES_BLOCK_SIZE,
14997 + .setkey = tls_setkey,
14998 + .setauthsize = tls_setauthsize,
14999 + .encrypt = tls_encrypt,
15000 + .decrypt = tls_decrypt,
15001 + .ivsize = AES_BLOCK_SIZE,
15002 + .maxauthsize = SHA1_DIGEST_SIZE,
15005 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
15006 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
15007 + OP_ALG_AAI_HMAC_PRECOMP,
15012 +struct caam_crypto_alg {
15013 + struct list_head entry;
15014 + struct crypto_alg crypto_alg;
15015 + struct caam_alg_entry caam;
15018 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
15020 + struct caam_drv_private *priv;
15021 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
15022 + static const u8 digest_size[] = {
15024 + SHA1_DIGEST_SIZE,
15025 + SHA224_DIGEST_SIZE,
15026 + SHA256_DIGEST_SIZE,
15027 + SHA384_DIGEST_SIZE,
15028 + SHA512_DIGEST_SIZE
15033 + * distribute tfms across job rings to ensure in-order
15034 + * crypto request processing per tfm
15036 + ctx->jrdev = caam_jr_alloc();
15037 + if (IS_ERR(ctx->jrdev)) {
15038 + pr_err("Job Ring Device allocation for transform failed\n");
15039 + return PTR_ERR(ctx->jrdev);
15042 + ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
15044 + if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
15045 + dev_err(ctx->jrdev, "unable to map key\n");
15046 + caam_jr_free(ctx->jrdev);
15050 + /* copy descriptor header template value */
15051 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
15052 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
15054 + if (ctx->adata.algtype) {
15055 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
15056 + >> OP_ALG_ALGSEL_SHIFT;
15057 + if (op_id < ARRAY_SIZE(digest_size)) {
15058 + ctx->authsize = digest_size[op_id];
15060 + dev_err(ctx->jrdev,
15061 + "incorrect op_id %d; must be less than %zu\n",
15062 + op_id, ARRAY_SIZE(digest_size));
15063 + caam_jr_free(ctx->jrdev);
15067 + ctx->authsize = 0;
15070 + priv = dev_get_drvdata(ctx->jrdev->parent);
15071 + ctx->qidev = priv->qidev;
15073 + spin_lock_init(&ctx->lock);
15074 + ctx->drv_ctx[ENCRYPT] = NULL;
15075 + ctx->drv_ctx[DECRYPT] = NULL;
15076 + ctx->drv_ctx[GIVENCRYPT] = NULL;
15081 +static int caam_cra_init(struct crypto_tfm *tfm)
15083 + struct crypto_alg *alg = tfm->__crt_alg;
15084 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
15086 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
15088 + return caam_init_common(ctx, &caam_alg->caam);
15091 +static int caam_aead_init(struct crypto_aead *tfm)
15093 + struct aead_alg *alg = crypto_aead_alg(tfm);
15094 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
15096 + struct caam_ctx *ctx = crypto_aead_ctx(tfm);
15098 + return caam_init_common(ctx, &caam_alg->caam);
15101 +static void caam_exit_common(struct caam_ctx *ctx)
15103 + caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
15104 + caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
15105 + caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
15107 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
15110 + caam_jr_free(ctx->jrdev);
15113 +static void caam_cra_exit(struct crypto_tfm *tfm)
15115 + caam_exit_common(crypto_tfm_ctx(tfm));
15118 +static void caam_aead_exit(struct crypto_aead *tfm)
15120 + caam_exit_common(crypto_aead_ctx(tfm));
15123 +static struct list_head alg_list;
15124 +static void __exit caam_qi_algapi_exit(void)
15126 + struct caam_crypto_alg *t_alg, *n;
15129 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15130 + struct caam_aead_alg *t_alg = driver_aeads + i;
15132 + if (t_alg->registered)
15133 + crypto_unregister_aead(&t_alg->aead);
15136 + if (!alg_list.next)
15139 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
15140 + crypto_unregister_alg(&t_alg->crypto_alg);
15141 + list_del(&t_alg->entry);
15146 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
15149 + struct caam_crypto_alg *t_alg;
15150 + struct crypto_alg *alg;
15152 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
15154 + return ERR_PTR(-ENOMEM);
15156 + alg = &t_alg->crypto_alg;
15158 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
15159 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
15160 + template->driver_name);
15161 + alg->cra_module = THIS_MODULE;
15162 + alg->cra_init = caam_cra_init;
15163 + alg->cra_exit = caam_cra_exit;
15164 + alg->cra_priority = CAAM_CRA_PRIORITY;
15165 + alg->cra_blocksize = template->blocksize;
15166 + alg->cra_alignmask = 0;
15167 + alg->cra_ctxsize = sizeof(struct caam_ctx);
15168 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
15170 + switch (template->type) {
15171 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15172 + alg->cra_type = &crypto_givcipher_type;
15173 + alg->cra_ablkcipher = template->template_ablkcipher;
15175 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15176 + alg->cra_type = &crypto_ablkcipher_type;
15177 + alg->cra_ablkcipher = template->template_ablkcipher;
15181 + t_alg->caam.class1_alg_type = template->class1_alg_type;
15182 + t_alg->caam.class2_alg_type = template->class2_alg_type;
15187 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
15189 + struct aead_alg *alg = &t_alg->aead;
15191 + alg->base.cra_module = THIS_MODULE;
15192 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
15193 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
15194 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
15196 + alg->init = caam_aead_init;
15197 + alg->exit = caam_aead_exit;
15200 +static int __init caam_qi_algapi_init(void)
15202 + struct device_node *dev_node;
15203 + struct platform_device *pdev;
15204 + struct device *ctrldev;
15205 + struct caam_drv_private *priv;
15206 + int i = 0, err = 0;
15207 + u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15208 + unsigned int md_limit = SHA512_DIGEST_SIZE;
15209 + bool registered = false;
15211 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15213 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15218 + pdev = of_find_device_by_node(dev_node);
15219 + of_node_put(dev_node);
15223 + ctrldev = &pdev->dev;
15224 + priv = dev_get_drvdata(ctrldev);
15227 + * If priv is NULL, it's probably because the caam driver wasn't
15228 + * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15230 + if (!priv || !priv->qi_present)
15233 + if (caam_dpaa2) {
15234 + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15238 + INIT_LIST_HEAD(&alg_list);
15241 + * Register crypto algorithms the device supports.
15242 + * First, detect presence and attributes of DES, AES, and MD blocks.
15244 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15245 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15246 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15247 + aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15248 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15250 + /* If MD is present, limit digest size based on LP256 */
15251 + if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15252 + md_limit = SHA256_DIGEST_SIZE;
15254 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15255 + struct caam_crypto_alg *t_alg;
15256 + struct caam_alg_template *alg = driver_algs + i;
15257 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15259 + /* Skip DES algorithms if not supported by device */
15261 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15262 + (alg_sel == OP_ALG_ALGSEL_DES)))
15265 + /* Skip AES algorithms if not supported by device */
15266 + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15269 + t_alg = caam_alg_alloc(alg);
15270 + if (IS_ERR(t_alg)) {
15271 + err = PTR_ERR(t_alg);
15272 + dev_warn(priv->qidev, "%s alg allocation failed\n",
15273 + alg->driver_name);
15277 + err = crypto_register_alg(&t_alg->crypto_alg);
15279 + dev_warn(priv->qidev, "%s alg registration failed\n",
15280 + t_alg->crypto_alg.cra_driver_name);
15285 + list_add_tail(&t_alg->entry, &alg_list);
15286 + registered = true;
15289 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15290 + struct caam_aead_alg *t_alg = driver_aeads + i;
15291 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15292 + OP_ALG_ALGSEL_MASK;
15293 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15294 + OP_ALG_ALGSEL_MASK;
15295 + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15297 + /* Skip DES algorithms if not supported by device */
15299 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15300 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15303 + /* Skip AES algorithms if not supported by device */
15304 + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15308 + * Check support for AES algorithms not available
15311 + if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15312 + (alg_aai == OP_ALG_AAI_GCM))
15316 + * Skip algorithms requiring message digests
15317 + * if MD or MD size is not supported by device.
15319 + if (c2_alg_sel &&
15320 + (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15323 + caam_aead_alg_init(t_alg);
15325 + err = crypto_register_aead(&t_alg->aead);
15327 + pr_warn("%s alg registration failed\n",
15328 + t_alg->aead.base.cra_driver_name);
15332 + t_alg->registered = true;
15333 + registered = true;
15337 + dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15342 +module_init(caam_qi_algapi_init);
15343 +module_exit(caam_qi_algapi_exit);
15345 +MODULE_LICENSE("GPL");
15346 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15347 +MODULE_AUTHOR("Freescale Semiconductor");
15349 +++ b/drivers/crypto/caam/caamalg_qi2.c
15352 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15353 + * Copyright 2017 NXP
15355 + * Redistribution and use in source and binary forms, with or without
15356 + * modification, are permitted provided that the following conditions are met:
15357 + * * Redistributions of source code must retain the above copyright
15358 + * notice, this list of conditions and the following disclaimer.
15359 + * * Redistributions in binary form must reproduce the above copyright
15360 + * notice, this list of conditions and the following disclaimer in the
15361 + * documentation and/or other materials provided with the distribution.
15362 + * * Neither the names of the above-listed copyright holders nor the
15363 + * names of any contributors may be used to endorse or promote products
15364 + * derived from this software without specific prior written permission.
15367 + * ALTERNATIVELY, this software may be distributed under the terms of the
15368 + * GNU General Public License ("GPL") as published by the Free Software
15369 + * Foundation, either version 2 of that License or (at your option) any
15372 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15373 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15374 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15375 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15376 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15377 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15378 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15379 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15380 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15381 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15382 + * POSSIBILITY OF SUCH DAMAGE.
15385 +#include "compat.h"
15387 +#include "caamalg_qi2.h"
15388 +#include "dpseci_cmd.h"
15389 +#include "desc_constr.h"
15390 +#include "error.h"
15391 +#include "sg_sw_sec4.h"
15392 +#include "sg_sw_qm2.h"
15393 +#include "key_gen.h"
15394 +#include "caamalg_desc.h"
15395 +#include "caamhash_desc.h"
15396 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
15397 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15398 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15400 +#define CAAM_CRA_PRIORITY 2000
15402 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15403 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15404 + SHA512_DIGEST_SIZE * 2)
15406 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15407 +bool caam_little_end;
15408 +EXPORT_SYMBOL(caam_little_end);
15410 +EXPORT_SYMBOL(caam_imx);
15414 + * This is a a cache of buffers, from which the users of CAAM QI driver
15415 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15416 + * NOTE: A more elegant solution would be to have some headroom in the frames
15417 + * being processed. This can be added by the dpaa2-eth driver. This would
15418 + * pose a problem for userspace application processing which cannot
15419 + * know of this limitation. So for now, this will work.
15420 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15422 +static struct kmem_cache *qi_cache;
15424 +struct caam_alg_entry {
15425 + struct device *dev;
15426 + int class1_alg_type;
15427 + int class2_alg_type;
15432 +struct caam_aead_alg {
15433 + struct aead_alg aead;
15434 + struct caam_alg_entry caam;
15439 + * caam_ctx - per-session context
15440 + * @flc: Flow Contexts array
15441 + * @key: virtual address of the key(s): [authentication key], encryption key
15442 + * @flc_dma: I/O virtual addresses of the Flow Contexts
15443 + * @key_dma: I/O virtual address of the key
15444 + * @dev: dpseci device
15445 + * @adata: authentication algorithm details
15446 + * @cdata: encryption algorithm details
15447 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15450 + struct caam_flc flc[NUM_OP];
15451 + u8 key[CAAM_MAX_KEY_SIZE];
15452 + dma_addr_t flc_dma[NUM_OP];
15453 + dma_addr_t key_dma;
15454 + struct device *dev;
15455 + struct alginfo adata;
15456 + struct alginfo cdata;
15457 + unsigned int authsize;
15460 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15461 + dma_addr_t iova_addr)
15463 + phys_addr_t phys_addr;
15465 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15468 + return phys_to_virt(phys_addr);
15472 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
15474 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
15475 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15476 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15477 + * hosting 16 SG entries.
15479 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15481 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15483 +static inline void *qi_cache_zalloc(gfp_t flags)
15485 + return kmem_cache_zalloc(qi_cache, flags);
15489 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15491 + * @obj - buffer previously allocated by qi_cache_zalloc
15493 + * No checking is being done, the call is a passthrough call to
15494 + * kmem_cache_free(...)
15496 +static inline void qi_cache_free(void *obj)
15498 + kmem_cache_free(qi_cache, obj);
15501 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15503 + switch (crypto_tfm_alg_type(areq->tfm)) {
15504 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15505 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15506 + return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15507 + case CRYPTO_ALG_TYPE_AEAD:
15508 + return aead_request_ctx(container_of(areq, struct aead_request,
15510 + case CRYPTO_ALG_TYPE_AHASH:
15511 + return ahash_request_ctx(ahash_request_cast(areq));
15513 + return ERR_PTR(-EINVAL);
15517 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15518 + struct scatterlist *dst, int src_nents,
15519 + int dst_nents, dma_addr_t iv_dma, int ivsize,
15520 + enum optype op_type, dma_addr_t qm_sg_dma,
15523 + if (dst != src) {
15525 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15526 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15528 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15532 + dma_unmap_single(dev, iv_dma, ivsize,
15533 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15537 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15540 +static int aead_set_sh_desc(struct crypto_aead *aead)
15542 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15543 + typeof(*alg), aead);
15544 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15545 + unsigned int ivsize = crypto_aead_ivsize(aead);
15546 + struct device *dev = ctx->dev;
15547 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
15548 + struct caam_flc *flc;
15550 + u32 ctx1_iv_off = 0;
15551 + u32 *nonce = NULL;
15552 + unsigned int data_len[2];
15554 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15555 + OP_ALG_AAI_CTR_MOD128);
15556 + const bool is_rfc3686 = alg->caam.rfc3686;
15558 + if (!ctx->cdata.keylen || !ctx->authsize)
15562 + * AES-CTR needs to load IV in CONTEXT1 reg
15563 + * at an offset of 128bits (16bytes)
15564 + * CONTEXT1[255:128] = IV
15567 + ctx1_iv_off = 16;
15570 + * RFC3686 specific:
15571 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15573 + if (is_rfc3686) {
15574 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15575 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15576 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15579 + data_len[0] = ctx->adata.keylen_pad;
15580 + data_len[1] = ctx->cdata.keylen;
15582 + /* aead_encrypt shared descriptor */
15583 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15584 + DESC_QI_AEAD_ENC_LEN) +
15585 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15586 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15587 + ARRAY_SIZE(data_len)) < 0)
15590 + if (inl_mask & 1)
15591 + ctx->adata.key_virt = ctx->key;
15593 + ctx->adata.key_dma = ctx->key_dma;
15595 + if (inl_mask & 2)
15596 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15598 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15600 + ctx->adata.key_inline = !!(inl_mask & 1);
15601 + ctx->cdata.key_inline = !!(inl_mask & 2);
15603 + flc = &ctx->flc[ENCRYPT];
15604 + desc = flc->sh_desc;
15606 + if (alg->caam.geniv)
15607 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
15608 + ivsize, ctx->authsize, is_rfc3686,
15609 + nonce, ctx1_iv_off, true,
15610 + priv->sec_attr.era);
15612 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
15613 + ivsize, ctx->authsize, is_rfc3686, nonce,
15614 + ctx1_iv_off, true, priv->sec_attr.era);
15616 + flc->flc[1] = desc_len(desc); /* SDL */
15617 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
15618 + sizeof(flc->flc) + desc_bytes(desc),
15619 + DMA_BIDIRECTIONAL);
15621 + /* aead_decrypt shared descriptor */
15622 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
15623 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15624 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15625 + ARRAY_SIZE(data_len)) < 0)
15628 + if (inl_mask & 1)
15629 + ctx->adata.key_virt = ctx->key;
15631 + ctx->adata.key_dma = ctx->key_dma;
15633 + if (inl_mask & 2)
15634 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15636 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15638 + ctx->adata.key_inline = !!(inl_mask & 1);
15639 + ctx->cdata.key_inline = !!(inl_mask & 2);
15641 + flc = &ctx->flc[DECRYPT];
15642 + desc = flc->sh_desc;
15643 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
15644 + ivsize, ctx->authsize, alg->caam.geniv,
15645 + is_rfc3686, nonce, ctx1_iv_off, true,
15646 + priv->sec_attr.era);
15647 + flc->flc[1] = desc_len(desc); /* SDL */
15648 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
15649 + sizeof(flc->flc) + desc_bytes(desc),
15650 + DMA_BIDIRECTIONAL);
15655 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
15657 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
15659 + ctx->authsize = authsize;
15660 + aead_set_sh_desc(authenc);
15665 +struct split_key_sh_result {
15666 + struct completion completion;
15668 + struct device *dev;
15671 +static void split_key_sh_done(void *cbk_ctx, u32 err)
15673 + struct split_key_sh_result *res = cbk_ctx;
15676 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
15680 + caam_qi2_strstatus(res->dev, err);
15683 + complete(&res->completion);
15686 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
15687 + unsigned int keylen)
15689 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15690 + struct device *dev = ctx->dev;
15691 + struct crypto_authenc_keys keys;
15693 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
15697 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
15698 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
15699 + keys.authkeylen);
15700 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
15701 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
15704 + ctx->adata.keylen = keys.authkeylen;
15705 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
15706 + OP_ALG_ALGSEL_MASK);
15708 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
15711 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
15712 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
15713 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
15714 + keys.enckeylen, DMA_BIDIRECTIONAL);
15716 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15717 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
15718 + ctx->adata.keylen_pad + keys.enckeylen, 1);
15721 + ctx->cdata.keylen = keys.enckeylen;
15723 + return aead_set_sh_desc(aead);
15725 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
15729 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
15732 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
15733 + struct caam_request *req_ctx = aead_request_ctx(req);
15734 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15735 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15736 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15737 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15738 + typeof(*alg), aead);
15739 + struct device *dev = ctx->dev;
15740 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
15741 + GFP_KERNEL : GFP_ATOMIC;
15742 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15743 + struct aead_edesc *edesc;
15744 + dma_addr_t qm_sg_dma, iv_dma = 0;
15746 + unsigned int authsize = ctx->authsize;
15747 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
15748 + int in_len, out_len;
15749 + struct dpaa2_sg_entry *sg_table;
15750 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15752 + /* allocate space for base edesc and link tables */
15753 + edesc = qi_cache_zalloc(GFP_DMA | flags);
15754 + if (unlikely(!edesc)) {
15755 + dev_err(dev, "could not allocate extended descriptor\n");
15756 + return ERR_PTR(-ENOMEM);
15759 + if (unlikely(req->dst != req->src)) {
15760 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15762 + if (unlikely(src_nents < 0)) {
15763 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15764 + req->assoclen + req->cryptlen);
15765 + qi_cache_free(edesc);
15766 + return ERR_PTR(src_nents);
15769 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
15771 + (encrypt ? authsize :
15773 + if (unlikely(dst_nents < 0)) {
15774 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15775 + req->assoclen + req->cryptlen +
15776 + (encrypt ? authsize : (-authsize)));
15777 + qi_cache_free(edesc);
15778 + return ERR_PTR(dst_nents);
15782 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15784 + if (unlikely(!mapped_src_nents)) {
15785 + dev_err(dev, "unable to map source\n");
15786 + qi_cache_free(edesc);
15787 + return ERR_PTR(-ENOMEM);
15790 + mapped_src_nents = 0;
15793 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
15794 + DMA_FROM_DEVICE);
15795 + if (unlikely(!mapped_dst_nents)) {
15796 + dev_err(dev, "unable to map destination\n");
15797 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15798 + qi_cache_free(edesc);
15799 + return ERR_PTR(-ENOMEM);
15802 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15804 + (encrypt ? authsize : 0));
15805 + if (unlikely(src_nents < 0)) {
15806 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15807 + req->assoclen + req->cryptlen +
15808 + (encrypt ? authsize : 0));
15809 + qi_cache_free(edesc);
15810 + return ERR_PTR(src_nents);
15813 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15814 + DMA_BIDIRECTIONAL);
15815 + if (unlikely(!mapped_src_nents)) {
15816 + dev_err(dev, "unable to map source\n");
15817 + qi_cache_free(edesc);
15818 + return ERR_PTR(-ENOMEM);
15822 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
15823 + ivsize = crypto_aead_ivsize(aead);
15824 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15825 + if (dma_mapping_error(dev, iv_dma)) {
15826 + dev_err(dev, "unable to map IV\n");
15827 + caam_unmap(dev, req->src, req->dst, src_nents,
15828 + dst_nents, 0, 0, op_type, 0, 0);
15829 + qi_cache_free(edesc);
15830 + return ERR_PTR(-ENOMEM);
15835 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
15836 + * Input is not contiguous.
15838 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
15839 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15840 + if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
15841 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
15842 + qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
15843 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15844 + iv_dma, ivsize, op_type, 0, 0);
15845 + qi_cache_free(edesc);
15846 + return ERR_PTR(-ENOMEM);
15848 + sg_table = &edesc->sgt[0];
15849 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
15851 + edesc->src_nents = src_nents;
15852 + edesc->dst_nents = dst_nents;
15853 + edesc->iv_dma = iv_dma;
15855 + edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
15857 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
15858 + dev_err(dev, "unable to map assoclen\n");
15859 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15860 + iv_dma, ivsize, op_type, 0, 0);
15861 + qi_cache_free(edesc);
15862 + return ERR_PTR(-ENOMEM);
15865 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
15868 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
15871 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15872 + qm_sg_index += mapped_src_nents;
15874 + if (mapped_dst_nents > 1)
15875 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
15878 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
15879 + if (dma_mapping_error(dev, qm_sg_dma)) {
15880 + dev_err(dev, "unable to map S/G table\n");
15881 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
15882 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15883 + iv_dma, ivsize, op_type, 0, 0);
15884 + qi_cache_free(edesc);
15885 + return ERR_PTR(-ENOMEM);
15888 + edesc->qm_sg_dma = qm_sg_dma;
15889 + edesc->qm_sg_bytes = qm_sg_bytes;
15891 + out_len = req->assoclen + req->cryptlen +
15892 + (encrypt ? ctx->authsize : (-ctx->authsize));
15893 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
15895 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
15896 + dpaa2_fl_set_final(in_fle, true);
15897 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
15898 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
15899 + dpaa2_fl_set_len(in_fle, in_len);
15901 + if (req->dst == req->src) {
15902 + if (mapped_src_nents == 1) {
15903 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15904 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
15906 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15907 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
15908 + (1 + !!ivsize) * sizeof(*sg_table));
15910 + } else if (mapped_dst_nents == 1) {
15911 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15912 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
15914 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15915 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
15916 + sizeof(*sg_table));
15919 + dpaa2_fl_set_len(out_fle, out_len);
15924 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
15927 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
15928 + unsigned int blocksize = crypto_aead_blocksize(tls);
15929 + unsigned int padsize, authsize;
15930 + struct caam_request *req_ctx = aead_request_ctx(req);
15931 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15932 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15933 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
15934 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
15935 + typeof(*alg), aead);
15936 + struct device *dev = ctx->dev;
15937 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
15938 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15939 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15940 + struct tls_edesc *edesc;
15941 + dma_addr_t qm_sg_dma, iv_dma = 0;
15943 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
15944 + int in_len, out_len;
15945 + struct dpaa2_sg_entry *sg_table;
15946 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15947 + struct scatterlist *dst;
15950 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
15952 + authsize = ctx->authsize + padsize;
15954 + authsize = ctx->authsize;
15957 + /* allocate space for base edesc and link tables */
15958 + edesc = qi_cache_zalloc(GFP_DMA | flags);
15959 + if (unlikely(!edesc)) {
15960 + dev_err(dev, "could not allocate extended descriptor\n");
15961 + return ERR_PTR(-ENOMEM);
15964 + if (likely(req->src == req->dst)) {
15965 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15967 + (encrypt ? authsize : 0));
15968 + if (unlikely(src_nents < 0)) {
15969 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15970 + req->assoclen + req->cryptlen +
15971 + (encrypt ? authsize : 0));
15972 + qi_cache_free(edesc);
15973 + return ERR_PTR(src_nents);
15976 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15977 + DMA_BIDIRECTIONAL);
15978 + if (unlikely(!mapped_src_nents)) {
15979 + dev_err(dev, "unable to map source\n");
15980 + qi_cache_free(edesc);
15981 + return ERR_PTR(-ENOMEM);
15985 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15987 + if (unlikely(src_nents < 0)) {
15988 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15989 + req->assoclen + req->cryptlen);
15990 + qi_cache_free(edesc);
15991 + return ERR_PTR(src_nents);
15994 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
15995 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
15996 + (encrypt ? authsize : 0));
15997 + if (unlikely(dst_nents < 0)) {
15998 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16000 + (encrypt ? authsize : 0));
16001 + qi_cache_free(edesc);
16002 + return ERR_PTR(dst_nents);
16006 + mapped_src_nents = dma_map_sg(dev, req->src,
16007 + src_nents, DMA_TO_DEVICE);
16008 + if (unlikely(!mapped_src_nents)) {
16009 + dev_err(dev, "unable to map source\n");
16010 + qi_cache_free(edesc);
16011 + return ERR_PTR(-ENOMEM);
16014 + mapped_src_nents = 0;
16017 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
16018 + DMA_FROM_DEVICE);
16019 + if (unlikely(!mapped_dst_nents)) {
16020 + dev_err(dev, "unable to map destination\n");
16021 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16022 + qi_cache_free(edesc);
16023 + return ERR_PTR(-ENOMEM);
16027 + ivsize = crypto_aead_ivsize(tls);
16028 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
16029 + if (dma_mapping_error(dev, iv_dma)) {
16030 + dev_err(dev, "unable to map IV\n");
16031 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
16033 + qi_cache_free(edesc);
16034 + return ERR_PTR(-ENOMEM);
16038 + * Create S/G table: IV, src, dst.
16039 + * Input is not contiguous.
16041 + qm_sg_ents = 1 + mapped_src_nents +
16042 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
16043 + sg_table = &edesc->sgt[0];
16044 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16046 + edesc->src_nents = src_nents;
16047 + edesc->dst_nents = dst_nents;
16048 + edesc->dst = dst;
16049 + edesc->iv_dma = iv_dma;
16051 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16054 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
16055 + qm_sg_index += mapped_src_nents;
16057 + if (mapped_dst_nents > 1)
16058 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
16061 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16062 + if (dma_mapping_error(dev, qm_sg_dma)) {
16063 + dev_err(dev, "unable to map S/G table\n");
16064 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16065 + ivsize, op_type, 0, 0);
16066 + qi_cache_free(edesc);
16067 + return ERR_PTR(-ENOMEM);
16070 + edesc->qm_sg_dma = qm_sg_dma;
16071 + edesc->qm_sg_bytes = qm_sg_bytes;
16073 + out_len = req->cryptlen + (encrypt ? authsize : 0);
16074 + in_len = ivsize + req->assoclen + req->cryptlen;
16076 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16077 + dpaa2_fl_set_final(in_fle, true);
16078 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16079 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16080 + dpaa2_fl_set_len(in_fle, in_len);
16082 + if (req->dst == req->src) {
16083 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16084 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16085 + (sg_nents_for_len(req->src, req->assoclen) +
16086 + 1) * sizeof(*sg_table));
16087 + } else if (mapped_dst_nents == 1) {
16088 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16089 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16091 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16092 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16093 + sizeof(*sg_table));
16096 + dpaa2_fl_set_len(out_fle, out_len);
16101 +static int tls_set_sh_desc(struct crypto_aead *tls)
16103 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16104 + unsigned int ivsize = crypto_aead_ivsize(tls);
16105 + unsigned int blocksize = crypto_aead_blocksize(tls);
16106 + struct device *dev = ctx->dev;
16107 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
16108 + struct caam_flc *flc;
16110 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
16111 + unsigned int data_len[2];
16114 + if (!ctx->cdata.keylen || !ctx->authsize)
16118 + * TLS 1.0 encrypt shared descriptor
16119 + * Job Descriptor and Shared Descriptor
16120 + * must fit into the 64-word Descriptor h/w Buffer
16122 + data_len[0] = ctx->adata.keylen_pad;
16123 + data_len[1] = ctx->cdata.keylen;
16125 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16126 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
16129 + if (inl_mask & 1)
16130 + ctx->adata.key_virt = ctx->key;
16132 + ctx->adata.key_dma = ctx->key_dma;
16134 + if (inl_mask & 2)
16135 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16137 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16139 + ctx->adata.key_inline = !!(inl_mask & 1);
16140 + ctx->cdata.key_inline = !!(inl_mask & 2);
16142 + flc = &ctx->flc[ENCRYPT];
16143 + desc = flc->sh_desc;
16144 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16145 + assoclen, ivsize, ctx->authsize, blocksize,
16146 + priv->sec_attr.era);
16147 + flc->flc[1] = desc_len(desc);
16148 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16149 + sizeof(flc->flc) + desc_bytes(desc),
16150 + DMA_BIDIRECTIONAL);
16153 + * TLS 1.0 decrypt shared descriptor
16154 + * Keys do not fit inline, regardless of algorithms used
16156 + ctx->adata.key_inline = false;
16157 + ctx->adata.key_dma = ctx->key_dma;
16158 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16160 + flc = &ctx->flc[DECRYPT];
16161 + desc = flc->sh_desc;
16162 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16163 + ctx->authsize, blocksize, priv->sec_attr.era);
16164 + flc->flc[1] = desc_len(desc); /* SDL */
16165 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16166 + sizeof(flc->flc) + desc_bytes(desc),
16167 + DMA_BIDIRECTIONAL);
16172 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16173 + unsigned int keylen)
16175 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16176 + struct device *dev = ctx->dev;
16177 + struct crypto_authenc_keys keys;
16179 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16183 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16184 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
16185 + keys.authkeylen);
16186 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16187 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16190 + ctx->adata.keylen = keys.authkeylen;
16191 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
16192 + OP_ALG_ALGSEL_MASK);
16194 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16197 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
16198 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16199 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
16200 + keys.enckeylen, DMA_BIDIRECTIONAL);
16202 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16203 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16204 + ctx->adata.keylen_pad + keys.enckeylen, 1);
16207 + ctx->cdata.keylen = keys.enckeylen;
16209 + return tls_set_sh_desc(tls);
16211 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16215 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16217 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16219 + ctx->authsize = authsize;
16220 + tls_set_sh_desc(tls);
16225 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16227 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16228 + struct device *dev = ctx->dev;
16229 + unsigned int ivsize = crypto_aead_ivsize(aead);
16230 + struct caam_flc *flc;
16232 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16233 + ctx->cdata.keylen;
16235 + if (!ctx->cdata.keylen || !ctx->authsize)
16239 + * AES GCM encrypt shared descriptor
16240 + * Job Descriptor and Shared Descriptor
16241 + * must fit into the 64-word Descriptor h/w Buffer
16243 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16244 + ctx->cdata.key_inline = true;
16245 + ctx->cdata.key_virt = ctx->key;
16247 + ctx->cdata.key_inline = false;
16248 + ctx->cdata.key_dma = ctx->key_dma;
16251 + flc = &ctx->flc[ENCRYPT];
16252 + desc = flc->sh_desc;
16253 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16254 + flc->flc[1] = desc_len(desc); /* SDL */
16255 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16256 + sizeof(flc->flc) + desc_bytes(desc),
16257 + DMA_BIDIRECTIONAL);
16260 + * Job Descriptor and Shared Descriptors
16261 + * must all fit into the 64-word Descriptor h/w Buffer
16263 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16264 + ctx->cdata.key_inline = true;
16265 + ctx->cdata.key_virt = ctx->key;
16267 + ctx->cdata.key_inline = false;
16268 + ctx->cdata.key_dma = ctx->key_dma;
16271 + flc = &ctx->flc[DECRYPT];
16272 + desc = flc->sh_desc;
16273 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16274 + flc->flc[1] = desc_len(desc); /* SDL */
16275 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16276 + sizeof(flc->flc) + desc_bytes(desc),
16277 + DMA_BIDIRECTIONAL);
16282 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16284 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16286 + ctx->authsize = authsize;
16287 + gcm_set_sh_desc(authenc);
16292 +static int gcm_setkey(struct crypto_aead *aead,
16293 + const u8 *key, unsigned int keylen)
16295 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16296 + struct device *dev = ctx->dev;
16299 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16300 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16303 + memcpy(ctx->key, key, keylen);
16304 + dma_sync_single_for_device(dev, ctx->key_dma, keylen,
16305 + DMA_BIDIRECTIONAL);
16306 + ctx->cdata.keylen = keylen;
16308 + return gcm_set_sh_desc(aead);
16311 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16313 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16314 + struct device *dev = ctx->dev;
16315 + unsigned int ivsize = crypto_aead_ivsize(aead);
16316 + struct caam_flc *flc;
16318 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16319 + ctx->cdata.keylen;
16321 + if (!ctx->cdata.keylen || !ctx->authsize)
16324 + ctx->cdata.key_virt = ctx->key;
16327 + * RFC4106 encrypt shared descriptor
16328 + * Job Descriptor and Shared Descriptor
16329 + * must fit into the 64-word Descriptor h/w Buffer
16331 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16332 + ctx->cdata.key_inline = true;
16334 + ctx->cdata.key_inline = false;
16335 + ctx->cdata.key_dma = ctx->key_dma;
16338 + flc = &ctx->flc[ENCRYPT];
16339 + desc = flc->sh_desc;
16340 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16342 + flc->flc[1] = desc_len(desc); /* SDL */
16343 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16344 + sizeof(flc->flc) + desc_bytes(desc),
16345 + DMA_BIDIRECTIONAL);
16348 + * Job Descriptor and Shared Descriptors
16349 + * must all fit into the 64-word Descriptor h/w Buffer
16351 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16352 + ctx->cdata.key_inline = true;
16354 + ctx->cdata.key_inline = false;
16355 + ctx->cdata.key_dma = ctx->key_dma;
16358 + flc = &ctx->flc[DECRYPT];
16359 + desc = flc->sh_desc;
16360 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16362 + flc->flc[1] = desc_len(desc); /* SDL */
16363 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16364 + sizeof(flc->flc) + desc_bytes(desc),
16365 + DMA_BIDIRECTIONAL);
16370 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16371 + unsigned int authsize)
16373 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16375 + ctx->authsize = authsize;
16376 + rfc4106_set_sh_desc(authenc);
16381 +static int rfc4106_setkey(struct crypto_aead *aead,
16382 + const u8 *key, unsigned int keylen)
16384 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16385 + struct device *dev = ctx->dev;
16391 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16392 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16395 + memcpy(ctx->key, key, keylen);
16397 + * The last four bytes of the key material are used as the salt value
16398 + * in the nonce. Update the AES key length.
16400 + ctx->cdata.keylen = keylen - 4;
16401 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
16402 + DMA_BIDIRECTIONAL);
16404 + return rfc4106_set_sh_desc(aead);
16407 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16409 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16410 + struct device *dev = ctx->dev;
16411 + unsigned int ivsize = crypto_aead_ivsize(aead);
16412 + struct caam_flc *flc;
16414 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16415 + ctx->cdata.keylen;
16417 + if (!ctx->cdata.keylen || !ctx->authsize)
16420 + ctx->cdata.key_virt = ctx->key;
16423 + * RFC4543 encrypt shared descriptor
16424 + * Job Descriptor and Shared Descriptor
16425 + * must fit into the 64-word Descriptor h/w Buffer
16427 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16428 + ctx->cdata.key_inline = true;
16430 + ctx->cdata.key_inline = false;
16431 + ctx->cdata.key_dma = ctx->key_dma;
16434 + flc = &ctx->flc[ENCRYPT];
16435 + desc = flc->sh_desc;
16436 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16438 + flc->flc[1] = desc_len(desc); /* SDL */
16439 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16440 + sizeof(flc->flc) + desc_bytes(desc),
16441 + DMA_BIDIRECTIONAL);
16444 + * Job Descriptor and Shared Descriptors
16445 + * must all fit into the 64-word Descriptor h/w Buffer
16447 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16448 + ctx->cdata.key_inline = true;
16450 + ctx->cdata.key_inline = false;
16451 + ctx->cdata.key_dma = ctx->key_dma;
16454 + flc = &ctx->flc[DECRYPT];
16455 + desc = flc->sh_desc;
16456 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16458 + flc->flc[1] = desc_len(desc); /* SDL */
16459 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16460 + sizeof(flc->flc) + desc_bytes(desc),
16461 + DMA_BIDIRECTIONAL);
16466 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16467 + unsigned int authsize)
16469 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16471 + ctx->authsize = authsize;
16472 + rfc4543_set_sh_desc(authenc);
16477 +static int rfc4543_setkey(struct crypto_aead *aead,
16478 + const u8 *key, unsigned int keylen)
16480 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16481 + struct device *dev = ctx->dev;
16487 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16488 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16491 + memcpy(ctx->key, key, keylen);
16493 + * The last four bytes of the key material are used as the salt value
16494 + * in the nonce. Update the AES key length.
16496 + ctx->cdata.keylen = keylen - 4;
16497 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
16498 + DMA_BIDIRECTIONAL);
16500 + return rfc4543_set_sh_desc(aead);
16503 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16504 + const u8 *key, unsigned int keylen)
16506 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16507 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16508 + const char *alg_name = crypto_tfm_alg_name(tfm);
16509 + struct device *dev = ctx->dev;
16510 + struct caam_flc *flc;
16511 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16513 + u32 ctx1_iv_off = 0;
16514 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16515 + OP_ALG_AAI_CTR_MOD128);
16516 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16519 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16520 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16523 + * AES-CTR needs to load IV in CONTEXT1 reg
16524 + * at an offset of 128bits (16bytes)
16525 + * CONTEXT1[255:128] = IV
16528 + ctx1_iv_off = 16;
16531 + * RFC3686 specific:
16532 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16533 + * | *key = {KEY, NONCE}
16535 + if (is_rfc3686) {
16536 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16537 + keylen -= CTR_RFC3686_NONCE_SIZE;
16540 + ctx->cdata.keylen = keylen;
16541 + ctx->cdata.key_virt = key;
16542 + ctx->cdata.key_inline = true;
16544 + /* ablkcipher_encrypt shared descriptor */
16545 + flc = &ctx->flc[ENCRYPT];
16546 + desc = flc->sh_desc;
16547 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16548 + is_rfc3686, ctx1_iv_off);
16549 + flc->flc[1] = desc_len(desc); /* SDL */
16550 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16551 + sizeof(flc->flc) + desc_bytes(desc),
16552 + DMA_BIDIRECTIONAL);
16554 + /* ablkcipher_decrypt shared descriptor */
16555 + flc = &ctx->flc[DECRYPT];
16556 + desc = flc->sh_desc;
16557 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16558 + is_rfc3686, ctx1_iv_off);
16559 + flc->flc[1] = desc_len(desc); /* SDL */
16560 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16561 + sizeof(flc->flc) + desc_bytes(desc),
16562 + DMA_BIDIRECTIONAL);
16564 + /* ablkcipher_givencrypt shared descriptor */
16565 + flc = &ctx->flc[GIVENCRYPT];
16566 + desc = flc->sh_desc;
16567 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16568 + ivsize, is_rfc3686, ctx1_iv_off);
16569 + flc->flc[1] = desc_len(desc); /* SDL */
16570 + dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT],
16571 + sizeof(flc->flc) + desc_bytes(desc),
16572 + DMA_BIDIRECTIONAL);
16577 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16578 + const u8 *key, unsigned int keylen)
16580 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16581 + struct device *dev = ctx->dev;
16582 + struct caam_flc *flc;
16585 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
16586 + dev_err(dev, "key size mismatch\n");
16587 + crypto_ablkcipher_set_flags(ablkcipher,
16588 + CRYPTO_TFM_RES_BAD_KEY_LEN);
16592 + ctx->cdata.keylen = keylen;
16593 + ctx->cdata.key_virt = key;
16594 + ctx->cdata.key_inline = true;
16596 + /* xts_ablkcipher_encrypt shared descriptor */
16597 + flc = &ctx->flc[ENCRYPT];
16598 + desc = flc->sh_desc;
16599 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
16600 + flc->flc[1] = desc_len(desc); /* SDL */
16601 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16602 + sizeof(flc->flc) + desc_bytes(desc),
16603 + DMA_BIDIRECTIONAL);
16605 + /* xts_ablkcipher_decrypt shared descriptor */
16606 + flc = &ctx->flc[DECRYPT];
16607 + desc = flc->sh_desc;
16608 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
16609 + flc->flc[1] = desc_len(desc); /* SDL */
16610 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16611 + sizeof(flc->flc) + desc_bytes(desc),
16612 + DMA_BIDIRECTIONAL);
16617 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
16618 + *req, bool encrypt)
16620 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16621 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16622 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16623 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16624 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16625 + struct device *dev = ctx->dev;
16626 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16627 + GFP_KERNEL : GFP_ATOMIC;
16628 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16629 + struct ablkcipher_edesc *edesc;
16630 + dma_addr_t iv_dma;
16632 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16633 + int dst_sg_idx, qm_sg_ents;
16634 + struct dpaa2_sg_entry *sg_table;
16635 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16637 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16638 + if (unlikely(src_nents < 0)) {
16639 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16641 + return ERR_PTR(src_nents);
16644 + if (unlikely(req->dst != req->src)) {
16645 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16646 + if (unlikely(dst_nents < 0)) {
16647 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16649 + return ERR_PTR(dst_nents);
16652 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16654 + if (unlikely(!mapped_src_nents)) {
16655 + dev_err(dev, "unable to map source\n");
16656 + return ERR_PTR(-ENOMEM);
16659 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16660 + DMA_FROM_DEVICE);
16661 + if (unlikely(!mapped_dst_nents)) {
16662 + dev_err(dev, "unable to map destination\n");
16663 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16664 + return ERR_PTR(-ENOMEM);
16667 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16668 + DMA_BIDIRECTIONAL);
16669 + if (unlikely(!mapped_src_nents)) {
16670 + dev_err(dev, "unable to map source\n");
16671 + return ERR_PTR(-ENOMEM);
16675 + iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
16676 + if (dma_mapping_error(dev, iv_dma)) {
16677 + dev_err(dev, "unable to map IV\n");
16678 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16680 + return ERR_PTR(-ENOMEM);
16683 + if (mapped_src_nents == 1 &&
16684 + iv_dma + ivsize == sg_dma_address(req->src)) {
16685 + in_contig = true;
16688 + in_contig = false;
16689 + qm_sg_ents = 1 + mapped_src_nents;
16691 + dst_sg_idx = qm_sg_ents;
16693 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
16694 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16695 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16696 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16697 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16698 + iv_dma, ivsize, op_type, 0, 0);
16699 + return ERR_PTR(-ENOMEM);
16702 + /* allocate space for base edesc and link tables */
16703 + edesc = qi_cache_zalloc(GFP_DMA | flags);
16704 + if (unlikely(!edesc)) {
16705 + dev_err(dev, "could not allocate extended descriptor\n");
16706 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16707 + iv_dma, ivsize, op_type, 0, 0);
16708 + return ERR_PTR(-ENOMEM);
16711 + edesc->src_nents = src_nents;
16712 + edesc->dst_nents = dst_nents;
16713 + edesc->iv_dma = iv_dma;
16714 + sg_table = &edesc->sgt[0];
16715 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16717 + if (!in_contig) {
16718 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16719 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
16722 + if (mapped_dst_nents > 1)
16723 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16726 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16728 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16729 + dev_err(dev, "unable to map S/G table\n");
16730 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16731 + iv_dma, ivsize, op_type, 0, 0);
16732 + qi_cache_free(edesc);
16733 + return ERR_PTR(-ENOMEM);
16736 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16737 + dpaa2_fl_set_final(in_fle, true);
16738 + dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
16739 + dpaa2_fl_set_len(out_fle, req->nbytes);
16741 + if (!in_contig) {
16742 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16743 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16745 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16746 + dpaa2_fl_set_addr(in_fle, iv_dma);
16749 + if (req->src == req->dst) {
16750 + if (!in_contig) {
16751 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16752 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
16753 + sizeof(*sg_table));
16755 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16756 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16758 + } else if (mapped_dst_nents > 1) {
16759 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16760 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16761 + sizeof(*sg_table));
16763 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16764 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16770 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
16771 + struct skcipher_givcrypt_request *greq)
16773 + struct ablkcipher_request *req = &greq->creq;
16774 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16775 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16776 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16777 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16778 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16779 + struct device *dev = ctx->dev;
16780 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16781 + GFP_KERNEL : GFP_ATOMIC;
16782 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
16783 + struct ablkcipher_edesc *edesc;
16784 + dma_addr_t iv_dma;
16786 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16787 + struct dpaa2_sg_entry *sg_table;
16788 + int dst_sg_idx, qm_sg_ents;
16790 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16791 + if (unlikely(src_nents < 0)) {
16792 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16794 + return ERR_PTR(src_nents);
16797 + if (unlikely(req->dst != req->src)) {
16798 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16799 + if (unlikely(dst_nents < 0)) {
16800 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16802 + return ERR_PTR(dst_nents);
16805 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16807 + if (unlikely(!mapped_src_nents)) {
16808 + dev_err(dev, "unable to map source\n");
16809 + return ERR_PTR(-ENOMEM);
16812 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16813 + DMA_FROM_DEVICE);
16814 + if (unlikely(!mapped_dst_nents)) {
16815 + dev_err(dev, "unable to map destination\n");
16816 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16817 + return ERR_PTR(-ENOMEM);
16820 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16821 + DMA_BIDIRECTIONAL);
16822 + if (unlikely(!mapped_src_nents)) {
16823 + dev_err(dev, "unable to map source\n");
16824 + return ERR_PTR(-ENOMEM);
16827 + dst_nents = src_nents;
16828 + mapped_dst_nents = src_nents;
16831 + iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
16832 + if (dma_mapping_error(dev, iv_dma)) {
16833 + dev_err(dev, "unable to map IV\n");
16834 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16836 + return ERR_PTR(-ENOMEM);
16839 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
16840 + dst_sg_idx = qm_sg_ents;
16841 + if (mapped_dst_nents == 1 &&
16842 + iv_dma + ivsize == sg_dma_address(req->dst)) {
16843 + out_contig = true;
16845 + out_contig = false;
16846 + qm_sg_ents += 1 + mapped_dst_nents;
16849 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16850 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16851 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16852 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16853 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16854 + return ERR_PTR(-ENOMEM);
16857 + /* allocate space for base edesc and link tables */
16858 + edesc = qi_cache_zalloc(GFP_DMA | flags);
16860 + dev_err(dev, "could not allocate extended descriptor\n");
16861 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16862 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16863 + return ERR_PTR(-ENOMEM);
16866 + edesc->src_nents = src_nents;
16867 + edesc->dst_nents = dst_nents;
16868 + edesc->iv_dma = iv_dma;
16869 + sg_table = &edesc->sgt[0];
16870 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16872 + if (mapped_src_nents > 1)
16873 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
16875 + if (!out_contig) {
16876 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
16877 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16878 + dst_sg_idx + 1, 0);
16881 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16883 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16884 + dev_err(dev, "unable to map S/G table\n");
16885 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16886 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16887 + qi_cache_free(edesc);
16888 + return ERR_PTR(-ENOMEM);
16891 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16892 + dpaa2_fl_set_final(in_fle, true);
16893 + dpaa2_fl_set_len(in_fle, req->nbytes);
16894 + dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
16896 + if (mapped_src_nents > 1) {
16897 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16898 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16900 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16901 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
16904 + if (!out_contig) {
16905 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16906 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16907 + sizeof(*sg_table));
16909 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16910 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16916 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
16917 + struct aead_request *req)
16919 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16920 + int ivsize = crypto_aead_ivsize(aead);
16921 + struct caam_request *caam_req = aead_request_ctx(req);
16923 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16924 + edesc->iv_dma, ivsize, caam_req->op_type,
16925 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
16926 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
16929 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
16930 + struct aead_request *req)
16932 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
16933 + int ivsize = crypto_aead_ivsize(tls);
16934 + struct caam_request *caam_req = aead_request_ctx(req);
16936 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
16937 + edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
16938 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
16941 +static void ablkcipher_unmap(struct device *dev,
16942 + struct ablkcipher_edesc *edesc,
16943 + struct ablkcipher_request *req)
16945 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16946 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16947 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
16949 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16950 + edesc->iv_dma, ivsize, caam_req->op_type,
16951 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
16954 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
16956 + struct crypto_async_request *areq = cbk_ctx;
16957 + struct aead_request *req = container_of(areq, struct aead_request,
16959 + struct caam_request *req_ctx = to_caam_req(areq);
16960 + struct aead_edesc *edesc = req_ctx->edesc;
16961 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16962 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16966 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
16969 + if (unlikely(status)) {
16970 + caam_qi2_strstatus(ctx->dev, status);
16974 + aead_unmap(ctx->dev, edesc, req);
16975 + qi_cache_free(edesc);
16976 + aead_request_complete(req, ecode);
16979 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
16981 + struct crypto_async_request *areq = cbk_ctx;
16982 + struct aead_request *req = container_of(areq, struct aead_request,
16984 + struct caam_request *req_ctx = to_caam_req(areq);
16985 + struct aead_edesc *edesc = req_ctx->edesc;
16986 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16987 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16991 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
16994 + if (unlikely(status)) {
16995 + caam_qi2_strstatus(ctx->dev, status);
16997 + * verify hw auth check passed else return -EBADMSG
16999 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17000 + JRSTA_CCBERR_ERRID_ICVCHK)
17001 + ecode = -EBADMSG;
17006 + aead_unmap(ctx->dev, edesc, req);
17007 + qi_cache_free(edesc);
17008 + aead_request_complete(req, ecode);
17011 +static int aead_encrypt(struct aead_request *req)
17013 + struct aead_edesc *edesc;
17014 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17015 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17016 + struct caam_request *caam_req = aead_request_ctx(req);
17019 + /* allocate extended descriptor */
17020 + edesc = aead_edesc_alloc(req, true);
17021 + if (IS_ERR(edesc))
17022 + return PTR_ERR(edesc);
17024 + caam_req->flc = &ctx->flc[ENCRYPT];
17025 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17026 + caam_req->op_type = ENCRYPT;
17027 + caam_req->cbk = aead_encrypt_done;
17028 + caam_req->ctx = &req->base;
17029 + caam_req->edesc = edesc;
17030 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17031 + if (ret != -EINPROGRESS &&
17032 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17033 + aead_unmap(ctx->dev, edesc, req);
17034 + qi_cache_free(edesc);
17040 +static int aead_decrypt(struct aead_request *req)
17042 + struct aead_edesc *edesc;
17043 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17044 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17045 + struct caam_request *caam_req = aead_request_ctx(req);
17048 + /* allocate extended descriptor */
17049 + edesc = aead_edesc_alloc(req, false);
17050 + if (IS_ERR(edesc))
17051 + return PTR_ERR(edesc);
17053 + caam_req->flc = &ctx->flc[DECRYPT];
17054 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17055 + caam_req->op_type = DECRYPT;
17056 + caam_req->cbk = aead_decrypt_done;
17057 + caam_req->ctx = &req->base;
17058 + caam_req->edesc = edesc;
17059 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17060 + if (ret != -EINPROGRESS &&
17061 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17062 + aead_unmap(ctx->dev, edesc, req);
17063 + qi_cache_free(edesc);
17069 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17071 + struct crypto_async_request *areq = cbk_ctx;
17072 + struct aead_request *req = container_of(areq, struct aead_request,
17074 + struct caam_request *req_ctx = to_caam_req(areq);
17075 + struct tls_edesc *edesc = req_ctx->edesc;
17076 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17077 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17081 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17084 + if (unlikely(status)) {
17085 + caam_qi2_strstatus(ctx->dev, status);
17089 + tls_unmap(ctx->dev, edesc, req);
17090 + qi_cache_free(edesc);
17091 + aead_request_complete(req, ecode);
17094 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17096 + struct crypto_async_request *areq = cbk_ctx;
17097 + struct aead_request *req = container_of(areq, struct aead_request,
17099 + struct caam_request *req_ctx = to_caam_req(areq);
17100 + struct tls_edesc *edesc = req_ctx->edesc;
17101 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17102 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17106 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17109 + if (unlikely(status)) {
17110 + caam_qi2_strstatus(ctx->dev, status);
17112 + * verify hw auth check passed else return -EBADMSG
17114 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17115 + JRSTA_CCBERR_ERRID_ICVCHK)
17116 + ecode = -EBADMSG;
17121 + tls_unmap(ctx->dev, edesc, req);
17122 + qi_cache_free(edesc);
17123 + aead_request_complete(req, ecode);
17126 +static int tls_encrypt(struct aead_request *req)
17128 + struct tls_edesc *edesc;
17129 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17130 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17131 + struct caam_request *caam_req = aead_request_ctx(req);
17134 + /* allocate extended descriptor */
17135 + edesc = tls_edesc_alloc(req, true);
17136 + if (IS_ERR(edesc))
17137 + return PTR_ERR(edesc);
17139 + caam_req->flc = &ctx->flc[ENCRYPT];
17140 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17141 + caam_req->op_type = ENCRYPT;
17142 + caam_req->cbk = tls_encrypt_done;
17143 + caam_req->ctx = &req->base;
17144 + caam_req->edesc = edesc;
17145 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17146 + if (ret != -EINPROGRESS &&
17147 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17148 + tls_unmap(ctx->dev, edesc, req);
17149 + qi_cache_free(edesc);
17155 +static int tls_decrypt(struct aead_request *req)
17157 + struct tls_edesc *edesc;
17158 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17159 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17160 + struct caam_request *caam_req = aead_request_ctx(req);
17163 + /* allocate extended descriptor */
17164 + edesc = tls_edesc_alloc(req, false);
17165 + if (IS_ERR(edesc))
17166 + return PTR_ERR(edesc);
17168 + caam_req->flc = &ctx->flc[DECRYPT];
17169 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17170 + caam_req->op_type = DECRYPT;
17171 + caam_req->cbk = tls_decrypt_done;
17172 + caam_req->ctx = &req->base;
17173 + caam_req->edesc = edesc;
17174 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17175 + if (ret != -EINPROGRESS &&
17176 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17177 + tls_unmap(ctx->dev, edesc, req);
17178 + qi_cache_free(edesc);
17184 +static int ipsec_gcm_encrypt(struct aead_request *req)
17186 + if (req->assoclen < 8)
17189 + return aead_encrypt(req);
17192 +static int ipsec_gcm_decrypt(struct aead_request *req)
17194 + if (req->assoclen < 8)
17197 + return aead_decrypt(req);
17200 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17202 + struct crypto_async_request *areq = cbk_ctx;
17203 + struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17204 + struct caam_request *req_ctx = to_caam_req(areq);
17205 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17206 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17207 + struct ablkcipher_edesc *edesc = req_ctx->edesc;
17209 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17212 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17215 + if (unlikely(status)) {
17216 + caam_qi2_strstatus(ctx->dev, status);
17221 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
17222 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17223 + edesc->src_nents > 1 ? 100 : ivsize, 1);
17224 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
17225 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17226 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17229 + ablkcipher_unmap(ctx->dev, edesc, req);
17230 + qi_cache_free(edesc);
17233 + * The crypto API expects us to set the IV (req->info) to the last
17234 + * ciphertext block. This is used e.g. by the CTS mode.
17236 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17239 + ablkcipher_request_complete(req, ecode);
17242 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17244 + struct ablkcipher_edesc *edesc;
17245 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17246 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17247 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17250 + /* allocate extended descriptor */
17251 + edesc = ablkcipher_edesc_alloc(req, true);
17252 + if (IS_ERR(edesc))
17253 + return PTR_ERR(edesc);
17255 + caam_req->flc = &ctx->flc[ENCRYPT];
17256 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17257 + caam_req->op_type = ENCRYPT;
17258 + caam_req->cbk = ablkcipher_done;
17259 + caam_req->ctx = &req->base;
17260 + caam_req->edesc = edesc;
17261 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17262 + if (ret != -EINPROGRESS &&
17263 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17264 + ablkcipher_unmap(ctx->dev, edesc, req);
17265 + qi_cache_free(edesc);
17271 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17273 + struct ablkcipher_request *req = &greq->creq;
17274 + struct ablkcipher_edesc *edesc;
17275 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17276 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17277 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17280 + /* allocate extended descriptor */
17281 + edesc = ablkcipher_giv_edesc_alloc(greq);
17282 + if (IS_ERR(edesc))
17283 + return PTR_ERR(edesc);
17285 + caam_req->flc = &ctx->flc[GIVENCRYPT];
17286 + caam_req->flc_dma = ctx->flc_dma[GIVENCRYPT];
17287 + caam_req->op_type = GIVENCRYPT;
17288 + caam_req->cbk = ablkcipher_done;
17289 + caam_req->ctx = &req->base;
17290 + caam_req->edesc = edesc;
17291 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17292 + if (ret != -EINPROGRESS &&
17293 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17294 + ablkcipher_unmap(ctx->dev, edesc, req);
17295 + qi_cache_free(edesc);
17301 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17303 + struct ablkcipher_edesc *edesc;
17304 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17305 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17306 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17309 + /* allocate extended descriptor */
17310 + edesc = ablkcipher_edesc_alloc(req, false);
17311 + if (IS_ERR(edesc))
17312 + return PTR_ERR(edesc);
17314 + caam_req->flc = &ctx->flc[DECRYPT];
17315 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17316 + caam_req->op_type = DECRYPT;
17317 + caam_req->cbk = ablkcipher_done;
17318 + caam_req->ctx = &req->base;
17319 + caam_req->edesc = edesc;
17320 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17321 + if (ret != -EINPROGRESS &&
17322 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17323 + ablkcipher_unmap(ctx->dev, edesc, req);
17324 + qi_cache_free(edesc);
17330 +struct caam_crypto_alg {
17331 + struct list_head entry;
17332 + struct crypto_alg crypto_alg;
17333 + struct caam_alg_entry caam;
17336 +static int caam_cra_init(struct crypto_tfm *tfm)
17338 + struct crypto_alg *alg = tfm->__crt_alg;
17339 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17341 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17342 + dma_addr_t dma_addr;
17345 + /* copy descriptor header template value */
17346 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17347 + caam_alg->caam.class1_alg_type;
17348 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17349 + caam_alg->caam.class2_alg_type;
17351 + ctx->dev = caam_alg->caam.dev;
17353 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
17354 + offsetof(struct caam_ctx, flc_dma),
17355 + DMA_BIDIRECTIONAL,
17356 + DMA_ATTR_SKIP_CPU_SYNC);
17357 + if (dma_mapping_error(ctx->dev, dma_addr)) {
17358 + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
17362 + for (i = 0; i < NUM_OP; i++)
17363 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
17364 + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
17369 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17371 + struct ablkcipher_tfm *ablkcipher_tfm =
17372 + crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17374 + ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17375 + return caam_cra_init(tfm);
17378 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17380 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17381 + return caam_cra_init(crypto_aead_tfm(tfm));
17384 +static void caam_exit_common(struct caam_ctx *ctx)
17386 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
17387 + offsetof(struct caam_ctx, flc_dma),
17388 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
17391 +static void caam_cra_exit(struct crypto_tfm *tfm)
17393 + caam_exit_common(crypto_tfm_ctx(tfm));
17396 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17398 + caam_exit_common(crypto_aead_ctx(tfm));
17401 +#define template_ablkcipher template_u.ablkcipher
17402 +struct caam_alg_template {
17403 + char name[CRYPTO_MAX_ALG_NAME];
17404 + char driver_name[CRYPTO_MAX_ALG_NAME];
17405 + unsigned int blocksize;
17408 + struct ablkcipher_alg ablkcipher;
17410 + u32 class1_alg_type;
17411 + u32 class2_alg_type;
17414 +static struct caam_alg_template driver_algs[] = {
17415 + /* ablkcipher descriptor */
17417 + .name = "cbc(aes)",
17418 + .driver_name = "cbc-aes-caam-qi2",
17419 + .blocksize = AES_BLOCK_SIZE,
17420 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17421 + .template_ablkcipher = {
17422 + .setkey = ablkcipher_setkey,
17423 + .encrypt = ablkcipher_encrypt,
17424 + .decrypt = ablkcipher_decrypt,
17425 + .givencrypt = ablkcipher_givencrypt,
17426 + .geniv = "<built-in>",
17427 + .min_keysize = AES_MIN_KEY_SIZE,
17428 + .max_keysize = AES_MAX_KEY_SIZE,
17429 + .ivsize = AES_BLOCK_SIZE,
17431 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17434 + .name = "cbc(des3_ede)",
17435 + .driver_name = "cbc-3des-caam-qi2",
17436 + .blocksize = DES3_EDE_BLOCK_SIZE,
17437 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17438 + .template_ablkcipher = {
17439 + .setkey = ablkcipher_setkey,
17440 + .encrypt = ablkcipher_encrypt,
17441 + .decrypt = ablkcipher_decrypt,
17442 + .givencrypt = ablkcipher_givencrypt,
17443 + .geniv = "<built-in>",
17444 + .min_keysize = DES3_EDE_KEY_SIZE,
17445 + .max_keysize = DES3_EDE_KEY_SIZE,
17446 + .ivsize = DES3_EDE_BLOCK_SIZE,
17448 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17451 + .name = "cbc(des)",
17452 + .driver_name = "cbc-des-caam-qi2",
17453 + .blocksize = DES_BLOCK_SIZE,
17454 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17455 + .template_ablkcipher = {
17456 + .setkey = ablkcipher_setkey,
17457 + .encrypt = ablkcipher_encrypt,
17458 + .decrypt = ablkcipher_decrypt,
17459 + .givencrypt = ablkcipher_givencrypt,
17460 + .geniv = "<built-in>",
17461 + .min_keysize = DES_KEY_SIZE,
17462 + .max_keysize = DES_KEY_SIZE,
17463 + .ivsize = DES_BLOCK_SIZE,
17465 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17468 + .name = "ctr(aes)",
17469 + .driver_name = "ctr-aes-caam-qi2",
17471 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17472 + .template_ablkcipher = {
17473 + .setkey = ablkcipher_setkey,
17474 + .encrypt = ablkcipher_encrypt,
17475 + .decrypt = ablkcipher_decrypt,
17476 + .geniv = "chainiv",
17477 + .min_keysize = AES_MIN_KEY_SIZE,
17478 + .max_keysize = AES_MAX_KEY_SIZE,
17479 + .ivsize = AES_BLOCK_SIZE,
17481 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17484 + .name = "rfc3686(ctr(aes))",
17485 + .driver_name = "rfc3686-ctr-aes-caam-qi2",
17487 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17488 + .template_ablkcipher = {
17489 + .setkey = ablkcipher_setkey,
17490 + .encrypt = ablkcipher_encrypt,
17491 + .decrypt = ablkcipher_decrypt,
17492 + .givencrypt = ablkcipher_givencrypt,
17493 + .geniv = "<built-in>",
17494 + .min_keysize = AES_MIN_KEY_SIZE +
17495 + CTR_RFC3686_NONCE_SIZE,
17496 + .max_keysize = AES_MAX_KEY_SIZE +
17497 + CTR_RFC3686_NONCE_SIZE,
17498 + .ivsize = CTR_RFC3686_IV_SIZE,
17500 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17503 + .name = "xts(aes)",
17504 + .driver_name = "xts-aes-caam-qi2",
17505 + .blocksize = AES_BLOCK_SIZE,
17506 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17507 + .template_ablkcipher = {
17508 + .setkey = xts_ablkcipher_setkey,
17509 + .encrypt = ablkcipher_encrypt,
17510 + .decrypt = ablkcipher_decrypt,
17511 + .geniv = "eseqiv",
17512 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
17513 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
17514 + .ivsize = AES_BLOCK_SIZE,
17516 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17520 +static struct caam_aead_alg driver_aeads[] = {
17524 + .cra_name = "rfc4106(gcm(aes))",
17525 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17526 + .cra_blocksize = 1,
17528 + .setkey = rfc4106_setkey,
17529 + .setauthsize = rfc4106_setauthsize,
17530 + .encrypt = ipsec_gcm_encrypt,
17531 + .decrypt = ipsec_gcm_decrypt,
17533 + .maxauthsize = AES_BLOCK_SIZE,
17536 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17542 + .cra_name = "rfc4543(gcm(aes))",
17543 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17544 + .cra_blocksize = 1,
17546 + .setkey = rfc4543_setkey,
17547 + .setauthsize = rfc4543_setauthsize,
17548 + .encrypt = ipsec_gcm_encrypt,
17549 + .decrypt = ipsec_gcm_decrypt,
17551 + .maxauthsize = AES_BLOCK_SIZE,
17554 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17557 + /* Galois Counter Mode */
17561 + .cra_name = "gcm(aes)",
17562 + .cra_driver_name = "gcm-aes-caam-qi2",
17563 + .cra_blocksize = 1,
17565 + .setkey = gcm_setkey,
17566 + .setauthsize = gcm_setauthsize,
17567 + .encrypt = aead_encrypt,
17568 + .decrypt = aead_decrypt,
17570 + .maxauthsize = AES_BLOCK_SIZE,
17573 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17576 + /* single-pass ipsec_esp descriptor */
17580 + .cra_name = "authenc(hmac(md5),cbc(aes))",
17581 + .cra_driver_name = "authenc-hmac-md5-"
17582 + "cbc-aes-caam-qi2",
17583 + .cra_blocksize = AES_BLOCK_SIZE,
17585 + .setkey = aead_setkey,
17586 + .setauthsize = aead_setauthsize,
17587 + .encrypt = aead_encrypt,
17588 + .decrypt = aead_decrypt,
17589 + .ivsize = AES_BLOCK_SIZE,
17590 + .maxauthsize = MD5_DIGEST_SIZE,
17593 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17594 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17595 + OP_ALG_AAI_HMAC_PRECOMP,
17601 + .cra_name = "echainiv(authenc(hmac(md5),"
17603 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17604 + "cbc-aes-caam-qi2",
17605 + .cra_blocksize = AES_BLOCK_SIZE,
17607 + .setkey = aead_setkey,
17608 + .setauthsize = aead_setauthsize,
17609 + .encrypt = aead_encrypt,
17610 + .decrypt = aead_decrypt,
17611 + .ivsize = AES_BLOCK_SIZE,
17612 + .maxauthsize = MD5_DIGEST_SIZE,
17615 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17616 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17617 + OP_ALG_AAI_HMAC_PRECOMP,
17624 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
17625 + .cra_driver_name = "authenc-hmac-sha1-"
17626 + "cbc-aes-caam-qi2",
17627 + .cra_blocksize = AES_BLOCK_SIZE,
17629 + .setkey = aead_setkey,
17630 + .setauthsize = aead_setauthsize,
17631 + .encrypt = aead_encrypt,
17632 + .decrypt = aead_decrypt,
17633 + .ivsize = AES_BLOCK_SIZE,
17634 + .maxauthsize = SHA1_DIGEST_SIZE,
17637 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17638 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17639 + OP_ALG_AAI_HMAC_PRECOMP,
17645 + .cra_name = "echainiv(authenc(hmac(sha1),"
17647 + .cra_driver_name = "echainiv-authenc-"
17648 + "hmac-sha1-cbc-aes-caam-qi2",
17649 + .cra_blocksize = AES_BLOCK_SIZE,
17651 + .setkey = aead_setkey,
17652 + .setauthsize = aead_setauthsize,
17653 + .encrypt = aead_encrypt,
17654 + .decrypt = aead_decrypt,
17655 + .ivsize = AES_BLOCK_SIZE,
17656 + .maxauthsize = SHA1_DIGEST_SIZE,
17659 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17660 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17661 + OP_ALG_AAI_HMAC_PRECOMP,
17668 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
17669 + .cra_driver_name = "authenc-hmac-sha224-"
17670 + "cbc-aes-caam-qi2",
17671 + .cra_blocksize = AES_BLOCK_SIZE,
17673 + .setkey = aead_setkey,
17674 + .setauthsize = aead_setauthsize,
17675 + .encrypt = aead_encrypt,
17676 + .decrypt = aead_decrypt,
17677 + .ivsize = AES_BLOCK_SIZE,
17678 + .maxauthsize = SHA224_DIGEST_SIZE,
17681 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17682 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17683 + OP_ALG_AAI_HMAC_PRECOMP,
17689 + .cra_name = "echainiv(authenc(hmac(sha224),"
17691 + .cra_driver_name = "echainiv-authenc-"
17692 + "hmac-sha224-cbc-aes-caam-qi2",
17693 + .cra_blocksize = AES_BLOCK_SIZE,
17695 + .setkey = aead_setkey,
17696 + .setauthsize = aead_setauthsize,
17697 + .encrypt = aead_encrypt,
17698 + .decrypt = aead_decrypt,
17699 + .ivsize = AES_BLOCK_SIZE,
17700 + .maxauthsize = SHA224_DIGEST_SIZE,
17703 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17704 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17705 + OP_ALG_AAI_HMAC_PRECOMP,
17712 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
17713 + .cra_driver_name = "authenc-hmac-sha256-"
17714 + "cbc-aes-caam-qi2",
17715 + .cra_blocksize = AES_BLOCK_SIZE,
17717 + .setkey = aead_setkey,
17718 + .setauthsize = aead_setauthsize,
17719 + .encrypt = aead_encrypt,
17720 + .decrypt = aead_decrypt,
17721 + .ivsize = AES_BLOCK_SIZE,
17722 + .maxauthsize = SHA256_DIGEST_SIZE,
17725 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17726 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17727 + OP_ALG_AAI_HMAC_PRECOMP,
17733 + .cra_name = "echainiv(authenc(hmac(sha256),"
17735 + .cra_driver_name = "echainiv-authenc-"
17736 + "hmac-sha256-cbc-aes-"
17738 + .cra_blocksize = AES_BLOCK_SIZE,
17740 + .setkey = aead_setkey,
17741 + .setauthsize = aead_setauthsize,
17742 + .encrypt = aead_encrypt,
17743 + .decrypt = aead_decrypt,
17744 + .ivsize = AES_BLOCK_SIZE,
17745 + .maxauthsize = SHA256_DIGEST_SIZE,
17748 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17749 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17750 + OP_ALG_AAI_HMAC_PRECOMP,
17757 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
17758 + .cra_driver_name = "authenc-hmac-sha384-"
17759 + "cbc-aes-caam-qi2",
17760 + .cra_blocksize = AES_BLOCK_SIZE,
17762 + .setkey = aead_setkey,
17763 + .setauthsize = aead_setauthsize,
17764 + .encrypt = aead_encrypt,
17765 + .decrypt = aead_decrypt,
17766 + .ivsize = AES_BLOCK_SIZE,
17767 + .maxauthsize = SHA384_DIGEST_SIZE,
17770 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17771 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17772 + OP_ALG_AAI_HMAC_PRECOMP,
17778 + .cra_name = "echainiv(authenc(hmac(sha384),"
17780 + .cra_driver_name = "echainiv-authenc-"
17781 + "hmac-sha384-cbc-aes-"
17783 + .cra_blocksize = AES_BLOCK_SIZE,
17785 + .setkey = aead_setkey,
17786 + .setauthsize = aead_setauthsize,
17787 + .encrypt = aead_encrypt,
17788 + .decrypt = aead_decrypt,
17789 + .ivsize = AES_BLOCK_SIZE,
17790 + .maxauthsize = SHA384_DIGEST_SIZE,
17793 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17794 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17795 + OP_ALG_AAI_HMAC_PRECOMP,
17802 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
17803 + .cra_driver_name = "authenc-hmac-sha512-"
17804 + "cbc-aes-caam-qi2",
17805 + .cra_blocksize = AES_BLOCK_SIZE,
17807 + .setkey = aead_setkey,
17808 + .setauthsize = aead_setauthsize,
17809 + .encrypt = aead_encrypt,
17810 + .decrypt = aead_decrypt,
17811 + .ivsize = AES_BLOCK_SIZE,
17812 + .maxauthsize = SHA512_DIGEST_SIZE,
17815 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17816 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17817 + OP_ALG_AAI_HMAC_PRECOMP,
17823 + .cra_name = "echainiv(authenc(hmac(sha512),"
17825 + .cra_driver_name = "echainiv-authenc-"
17826 + "hmac-sha512-cbc-aes-"
17828 + .cra_blocksize = AES_BLOCK_SIZE,
17830 + .setkey = aead_setkey,
17831 + .setauthsize = aead_setauthsize,
17832 + .encrypt = aead_encrypt,
17833 + .decrypt = aead_decrypt,
17834 + .ivsize = AES_BLOCK_SIZE,
17835 + .maxauthsize = SHA512_DIGEST_SIZE,
17838 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17839 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17840 + OP_ALG_AAI_HMAC_PRECOMP,
17847 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
17848 + .cra_driver_name = "authenc-hmac-md5-"
17849 + "cbc-des3_ede-caam-qi2",
17850 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17852 + .setkey = aead_setkey,
17853 + .setauthsize = aead_setauthsize,
17854 + .encrypt = aead_encrypt,
17855 + .decrypt = aead_decrypt,
17856 + .ivsize = DES3_EDE_BLOCK_SIZE,
17857 + .maxauthsize = MD5_DIGEST_SIZE,
17860 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17861 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17862 + OP_ALG_AAI_HMAC_PRECOMP,
17868 + .cra_name = "echainiv(authenc(hmac(md5),"
17869 + "cbc(des3_ede)))",
17870 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17871 + "cbc-des3_ede-caam-qi2",
17872 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17874 + .setkey = aead_setkey,
17875 + .setauthsize = aead_setauthsize,
17876 + .encrypt = aead_encrypt,
17877 + .decrypt = aead_decrypt,
17878 + .ivsize = DES3_EDE_BLOCK_SIZE,
17879 + .maxauthsize = MD5_DIGEST_SIZE,
17882 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17883 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17884 + OP_ALG_AAI_HMAC_PRECOMP,
17891 + .cra_name = "authenc(hmac(sha1),"
17892 + "cbc(des3_ede))",
17893 + .cra_driver_name = "authenc-hmac-sha1-"
17894 + "cbc-des3_ede-caam-qi2",
17895 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17897 + .setkey = aead_setkey,
17898 + .setauthsize = aead_setauthsize,
17899 + .encrypt = aead_encrypt,
17900 + .decrypt = aead_decrypt,
17901 + .ivsize = DES3_EDE_BLOCK_SIZE,
17902 + .maxauthsize = SHA1_DIGEST_SIZE,
17905 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17906 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17907 + OP_ALG_AAI_HMAC_PRECOMP,
17913 + .cra_name = "echainiv(authenc(hmac(sha1),"
17914 + "cbc(des3_ede)))",
17915 + .cra_driver_name = "echainiv-authenc-"
17917 + "cbc-des3_ede-caam-qi2",
17918 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17920 + .setkey = aead_setkey,
17921 + .setauthsize = aead_setauthsize,
17922 + .encrypt = aead_encrypt,
17923 + .decrypt = aead_decrypt,
17924 + .ivsize = DES3_EDE_BLOCK_SIZE,
17925 + .maxauthsize = SHA1_DIGEST_SIZE,
17928 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17929 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17930 + OP_ALG_AAI_HMAC_PRECOMP,
17937 + .cra_name = "authenc(hmac(sha224),"
17938 + "cbc(des3_ede))",
17939 + .cra_driver_name = "authenc-hmac-sha224-"
17940 + "cbc-des3_ede-caam-qi2",
17941 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17943 + .setkey = aead_setkey,
17944 + .setauthsize = aead_setauthsize,
17945 + .encrypt = aead_encrypt,
17946 + .decrypt = aead_decrypt,
17947 + .ivsize = DES3_EDE_BLOCK_SIZE,
17948 + .maxauthsize = SHA224_DIGEST_SIZE,
17951 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17952 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17953 + OP_ALG_AAI_HMAC_PRECOMP,
17959 + .cra_name = "echainiv(authenc(hmac(sha224),"
17960 + "cbc(des3_ede)))",
17961 + .cra_driver_name = "echainiv-authenc-"
17963 + "cbc-des3_ede-caam-qi2",
17964 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17966 + .setkey = aead_setkey,
17967 + .setauthsize = aead_setauthsize,
17968 + .encrypt = aead_encrypt,
17969 + .decrypt = aead_decrypt,
17970 + .ivsize = DES3_EDE_BLOCK_SIZE,
17971 + .maxauthsize = SHA224_DIGEST_SIZE,
17974 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17975 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17976 + OP_ALG_AAI_HMAC_PRECOMP,
17983 + .cra_name = "authenc(hmac(sha256),"
17984 + "cbc(des3_ede))",
17985 + .cra_driver_name = "authenc-hmac-sha256-"
17986 + "cbc-des3_ede-caam-qi2",
17987 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17989 + .setkey = aead_setkey,
17990 + .setauthsize = aead_setauthsize,
17991 + .encrypt = aead_encrypt,
17992 + .decrypt = aead_decrypt,
17993 + .ivsize = DES3_EDE_BLOCK_SIZE,
17994 + .maxauthsize = SHA256_DIGEST_SIZE,
17997 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17998 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17999 + OP_ALG_AAI_HMAC_PRECOMP,
18005 + .cra_name = "echainiv(authenc(hmac(sha256),"
18006 + "cbc(des3_ede)))",
18007 + .cra_driver_name = "echainiv-authenc-"
18009 + "cbc-des3_ede-caam-qi2",
18010 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18012 + .setkey = aead_setkey,
18013 + .setauthsize = aead_setauthsize,
18014 + .encrypt = aead_encrypt,
18015 + .decrypt = aead_decrypt,
18016 + .ivsize = DES3_EDE_BLOCK_SIZE,
18017 + .maxauthsize = SHA256_DIGEST_SIZE,
18020 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18021 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18022 + OP_ALG_AAI_HMAC_PRECOMP,
18029 + .cra_name = "authenc(hmac(sha384),"
18030 + "cbc(des3_ede))",
18031 + .cra_driver_name = "authenc-hmac-sha384-"
18032 + "cbc-des3_ede-caam-qi2",
18033 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18035 + .setkey = aead_setkey,
18036 + .setauthsize = aead_setauthsize,
18037 + .encrypt = aead_encrypt,
18038 + .decrypt = aead_decrypt,
18039 + .ivsize = DES3_EDE_BLOCK_SIZE,
18040 + .maxauthsize = SHA384_DIGEST_SIZE,
18043 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18044 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18045 + OP_ALG_AAI_HMAC_PRECOMP,
18051 + .cra_name = "echainiv(authenc(hmac(sha384),"
18052 + "cbc(des3_ede)))",
18053 + .cra_driver_name = "echainiv-authenc-"
18055 + "cbc-des3_ede-caam-qi2",
18056 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18058 + .setkey = aead_setkey,
18059 + .setauthsize = aead_setauthsize,
18060 + .encrypt = aead_encrypt,
18061 + .decrypt = aead_decrypt,
18062 + .ivsize = DES3_EDE_BLOCK_SIZE,
18063 + .maxauthsize = SHA384_DIGEST_SIZE,
18066 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18067 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18068 + OP_ALG_AAI_HMAC_PRECOMP,
18075 + .cra_name = "authenc(hmac(sha512),"
18076 + "cbc(des3_ede))",
18077 + .cra_driver_name = "authenc-hmac-sha512-"
18078 + "cbc-des3_ede-caam-qi2",
18079 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18081 + .setkey = aead_setkey,
18082 + .setauthsize = aead_setauthsize,
18083 + .encrypt = aead_encrypt,
18084 + .decrypt = aead_decrypt,
18085 + .ivsize = DES3_EDE_BLOCK_SIZE,
18086 + .maxauthsize = SHA512_DIGEST_SIZE,
18089 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18090 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18091 + OP_ALG_AAI_HMAC_PRECOMP,
18097 + .cra_name = "echainiv(authenc(hmac(sha512),"
18098 + "cbc(des3_ede)))",
18099 + .cra_driver_name = "echainiv-authenc-"
18101 + "cbc-des3_ede-caam-qi2",
18102 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18104 + .setkey = aead_setkey,
18105 + .setauthsize = aead_setauthsize,
18106 + .encrypt = aead_encrypt,
18107 + .decrypt = aead_decrypt,
18108 + .ivsize = DES3_EDE_BLOCK_SIZE,
18109 + .maxauthsize = SHA512_DIGEST_SIZE,
18112 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18113 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18114 + OP_ALG_AAI_HMAC_PRECOMP,
18121 + .cra_name = "authenc(hmac(md5),cbc(des))",
18122 + .cra_driver_name = "authenc-hmac-md5-"
18123 + "cbc-des-caam-qi2",
18124 + .cra_blocksize = DES_BLOCK_SIZE,
18126 + .setkey = aead_setkey,
18127 + .setauthsize = aead_setauthsize,
18128 + .encrypt = aead_encrypt,
18129 + .decrypt = aead_decrypt,
18130 + .ivsize = DES_BLOCK_SIZE,
18131 + .maxauthsize = MD5_DIGEST_SIZE,
18134 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18135 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18136 + OP_ALG_AAI_HMAC_PRECOMP,
18142 + .cra_name = "echainiv(authenc(hmac(md5),"
18144 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
18145 + "cbc-des-caam-qi2",
18146 + .cra_blocksize = DES_BLOCK_SIZE,
18148 + .setkey = aead_setkey,
18149 + .setauthsize = aead_setauthsize,
18150 + .encrypt = aead_encrypt,
18151 + .decrypt = aead_decrypt,
18152 + .ivsize = DES_BLOCK_SIZE,
18153 + .maxauthsize = MD5_DIGEST_SIZE,
18156 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18157 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18158 + OP_ALG_AAI_HMAC_PRECOMP,
18165 + .cra_name = "authenc(hmac(sha1),cbc(des))",
18166 + .cra_driver_name = "authenc-hmac-sha1-"
18167 + "cbc-des-caam-qi2",
18168 + .cra_blocksize = DES_BLOCK_SIZE,
18170 + .setkey = aead_setkey,
18171 + .setauthsize = aead_setauthsize,
18172 + .encrypt = aead_encrypt,
18173 + .decrypt = aead_decrypt,
18174 + .ivsize = DES_BLOCK_SIZE,
18175 + .maxauthsize = SHA1_DIGEST_SIZE,
18178 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18179 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18180 + OP_ALG_AAI_HMAC_PRECOMP,
18186 + .cra_name = "echainiv(authenc(hmac(sha1),"
18188 + .cra_driver_name = "echainiv-authenc-"
18189 + "hmac-sha1-cbc-des-caam-qi2",
18190 + .cra_blocksize = DES_BLOCK_SIZE,
18192 + .setkey = aead_setkey,
18193 + .setauthsize = aead_setauthsize,
18194 + .encrypt = aead_encrypt,
18195 + .decrypt = aead_decrypt,
18196 + .ivsize = DES_BLOCK_SIZE,
18197 + .maxauthsize = SHA1_DIGEST_SIZE,
18200 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18201 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18202 + OP_ALG_AAI_HMAC_PRECOMP,
18209 + .cra_name = "authenc(hmac(sha224),cbc(des))",
18210 + .cra_driver_name = "authenc-hmac-sha224-"
18211 + "cbc-des-caam-qi2",
18212 + .cra_blocksize = DES_BLOCK_SIZE,
18214 + .setkey = aead_setkey,
18215 + .setauthsize = aead_setauthsize,
18216 + .encrypt = aead_encrypt,
18217 + .decrypt = aead_decrypt,
18218 + .ivsize = DES_BLOCK_SIZE,
18219 + .maxauthsize = SHA224_DIGEST_SIZE,
18222 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18223 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18224 + OP_ALG_AAI_HMAC_PRECOMP,
18230 + .cra_name = "echainiv(authenc(hmac(sha224),"
18232 + .cra_driver_name = "echainiv-authenc-"
18233 + "hmac-sha224-cbc-des-"
18235 + .cra_blocksize = DES_BLOCK_SIZE,
18237 + .setkey = aead_setkey,
18238 + .setauthsize = aead_setauthsize,
18239 + .encrypt = aead_encrypt,
18240 + .decrypt = aead_decrypt,
18241 + .ivsize = DES_BLOCK_SIZE,
18242 + .maxauthsize = SHA224_DIGEST_SIZE,
18245 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18246 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18247 + OP_ALG_AAI_HMAC_PRECOMP,
18254 + .cra_name = "authenc(hmac(sha256),cbc(des))",
18255 + .cra_driver_name = "authenc-hmac-sha256-"
18256 + "cbc-des-caam-qi2",
18257 + .cra_blocksize = DES_BLOCK_SIZE,
18259 + .setkey = aead_setkey,
18260 + .setauthsize = aead_setauthsize,
18261 + .encrypt = aead_encrypt,
18262 + .decrypt = aead_decrypt,
18263 + .ivsize = DES_BLOCK_SIZE,
18264 + .maxauthsize = SHA256_DIGEST_SIZE,
18267 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18268 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18269 + OP_ALG_AAI_HMAC_PRECOMP,
18275 + .cra_name = "echainiv(authenc(hmac(sha256),"
18277 + .cra_driver_name = "echainiv-authenc-"
18278 + "hmac-sha256-cbc-desi-"
18280 + .cra_blocksize = DES_BLOCK_SIZE,
18282 + .setkey = aead_setkey,
18283 + .setauthsize = aead_setauthsize,
18284 + .encrypt = aead_encrypt,
18285 + .decrypt = aead_decrypt,
18286 + .ivsize = DES_BLOCK_SIZE,
18287 + .maxauthsize = SHA256_DIGEST_SIZE,
18290 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18291 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18292 + OP_ALG_AAI_HMAC_PRECOMP,
18299 + .cra_name = "authenc(hmac(sha384),cbc(des))",
18300 + .cra_driver_name = "authenc-hmac-sha384-"
18301 + "cbc-des-caam-qi2",
18302 + .cra_blocksize = DES_BLOCK_SIZE,
18304 + .setkey = aead_setkey,
18305 + .setauthsize = aead_setauthsize,
18306 + .encrypt = aead_encrypt,
18307 + .decrypt = aead_decrypt,
18308 + .ivsize = DES_BLOCK_SIZE,
18309 + .maxauthsize = SHA384_DIGEST_SIZE,
18312 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18313 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18314 + OP_ALG_AAI_HMAC_PRECOMP,
18320 + .cra_name = "echainiv(authenc(hmac(sha384),"
18322 + .cra_driver_name = "echainiv-authenc-"
18323 + "hmac-sha384-cbc-des-"
18325 + .cra_blocksize = DES_BLOCK_SIZE,
18327 + .setkey = aead_setkey,
18328 + .setauthsize = aead_setauthsize,
18329 + .encrypt = aead_encrypt,
18330 + .decrypt = aead_decrypt,
18331 + .ivsize = DES_BLOCK_SIZE,
18332 + .maxauthsize = SHA384_DIGEST_SIZE,
18335 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18336 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18337 + OP_ALG_AAI_HMAC_PRECOMP,
18344 + .cra_name = "authenc(hmac(sha512),cbc(des))",
18345 + .cra_driver_name = "authenc-hmac-sha512-"
18346 + "cbc-des-caam-qi2",
18347 + .cra_blocksize = DES_BLOCK_SIZE,
18349 + .setkey = aead_setkey,
18350 + .setauthsize = aead_setauthsize,
18351 + .encrypt = aead_encrypt,
18352 + .decrypt = aead_decrypt,
18353 + .ivsize = DES_BLOCK_SIZE,
18354 + .maxauthsize = SHA512_DIGEST_SIZE,
18357 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18358 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18359 + OP_ALG_AAI_HMAC_PRECOMP,
18365 + .cra_name = "echainiv(authenc(hmac(sha512),"
18367 + .cra_driver_name = "echainiv-authenc-"
18368 + "hmac-sha512-cbc-des-"
18370 + .cra_blocksize = DES_BLOCK_SIZE,
18372 + .setkey = aead_setkey,
18373 + .setauthsize = aead_setauthsize,
18374 + .encrypt = aead_encrypt,
18375 + .decrypt = aead_decrypt,
18376 + .ivsize = DES_BLOCK_SIZE,
18377 + .maxauthsize = SHA512_DIGEST_SIZE,
18380 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18381 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18382 + OP_ALG_AAI_HMAC_PRECOMP,
18389 + .cra_name = "authenc(hmac(md5),"
18390 + "rfc3686(ctr(aes)))",
18391 + .cra_driver_name = "authenc-hmac-md5-"
18392 + "rfc3686-ctr-aes-caam-qi2",
18393 + .cra_blocksize = 1,
18395 + .setkey = aead_setkey,
18396 + .setauthsize = aead_setauthsize,
18397 + .encrypt = aead_encrypt,
18398 + .decrypt = aead_decrypt,
18399 + .ivsize = CTR_RFC3686_IV_SIZE,
18400 + .maxauthsize = MD5_DIGEST_SIZE,
18403 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18404 + OP_ALG_AAI_CTR_MOD128,
18405 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18406 + OP_ALG_AAI_HMAC_PRECOMP,
18413 + .cra_name = "seqiv(authenc("
18414 + "hmac(md5),rfc3686(ctr(aes))))",
18415 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
18416 + "rfc3686-ctr-aes-caam-qi2",
18417 + .cra_blocksize = 1,
18419 + .setkey = aead_setkey,
18420 + .setauthsize = aead_setauthsize,
18421 + .encrypt = aead_encrypt,
18422 + .decrypt = aead_decrypt,
18423 + .ivsize = CTR_RFC3686_IV_SIZE,
18424 + .maxauthsize = MD5_DIGEST_SIZE,
18427 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18428 + OP_ALG_AAI_CTR_MOD128,
18429 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18430 + OP_ALG_AAI_HMAC_PRECOMP,
18438 + .cra_name = "authenc(hmac(sha1),"
18439 + "rfc3686(ctr(aes)))",
18440 + .cra_driver_name = "authenc-hmac-sha1-"
18441 + "rfc3686-ctr-aes-caam-qi2",
18442 + .cra_blocksize = 1,
18444 + .setkey = aead_setkey,
18445 + .setauthsize = aead_setauthsize,
18446 + .encrypt = aead_encrypt,
18447 + .decrypt = aead_decrypt,
18448 + .ivsize = CTR_RFC3686_IV_SIZE,
18449 + .maxauthsize = SHA1_DIGEST_SIZE,
18452 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18453 + OP_ALG_AAI_CTR_MOD128,
18454 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18455 + OP_ALG_AAI_HMAC_PRECOMP,
18462 + .cra_name = "seqiv(authenc("
18463 + "hmac(sha1),rfc3686(ctr(aes))))",
18464 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18465 + "rfc3686-ctr-aes-caam-qi2",
18466 + .cra_blocksize = 1,
18468 + .setkey = aead_setkey,
18469 + .setauthsize = aead_setauthsize,
18470 + .encrypt = aead_encrypt,
18471 + .decrypt = aead_decrypt,
18472 + .ivsize = CTR_RFC3686_IV_SIZE,
18473 + .maxauthsize = SHA1_DIGEST_SIZE,
18476 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18477 + OP_ALG_AAI_CTR_MOD128,
18478 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18479 + OP_ALG_AAI_HMAC_PRECOMP,
18487 + .cra_name = "authenc(hmac(sha224),"
18488 + "rfc3686(ctr(aes)))",
18489 + .cra_driver_name = "authenc-hmac-sha224-"
18490 + "rfc3686-ctr-aes-caam-qi2",
18491 + .cra_blocksize = 1,
18493 + .setkey = aead_setkey,
18494 + .setauthsize = aead_setauthsize,
18495 + .encrypt = aead_encrypt,
18496 + .decrypt = aead_decrypt,
18497 + .ivsize = CTR_RFC3686_IV_SIZE,
18498 + .maxauthsize = SHA224_DIGEST_SIZE,
18501 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18502 + OP_ALG_AAI_CTR_MOD128,
18503 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18504 + OP_ALG_AAI_HMAC_PRECOMP,
18511 + .cra_name = "seqiv(authenc("
18512 + "hmac(sha224),rfc3686(ctr(aes))))",
18513 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18514 + "rfc3686-ctr-aes-caam-qi2",
18515 + .cra_blocksize = 1,
18517 + .setkey = aead_setkey,
18518 + .setauthsize = aead_setauthsize,
18519 + .encrypt = aead_encrypt,
18520 + .decrypt = aead_decrypt,
18521 + .ivsize = CTR_RFC3686_IV_SIZE,
18522 + .maxauthsize = SHA224_DIGEST_SIZE,
18525 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18526 + OP_ALG_AAI_CTR_MOD128,
18527 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18528 + OP_ALG_AAI_HMAC_PRECOMP,
18536 + .cra_name = "authenc(hmac(sha256),"
18537 + "rfc3686(ctr(aes)))",
18538 + .cra_driver_name = "authenc-hmac-sha256-"
18539 + "rfc3686-ctr-aes-caam-qi2",
18540 + .cra_blocksize = 1,
18542 + .setkey = aead_setkey,
18543 + .setauthsize = aead_setauthsize,
18544 + .encrypt = aead_encrypt,
18545 + .decrypt = aead_decrypt,
18546 + .ivsize = CTR_RFC3686_IV_SIZE,
18547 + .maxauthsize = SHA256_DIGEST_SIZE,
18550 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18551 + OP_ALG_AAI_CTR_MOD128,
18552 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18553 + OP_ALG_AAI_HMAC_PRECOMP,
18560 + .cra_name = "seqiv(authenc(hmac(sha256),"
18561 + "rfc3686(ctr(aes))))",
18562 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18563 + "rfc3686-ctr-aes-caam-qi2",
18564 + .cra_blocksize = 1,
18566 + .setkey = aead_setkey,
18567 + .setauthsize = aead_setauthsize,
18568 + .encrypt = aead_encrypt,
18569 + .decrypt = aead_decrypt,
18570 + .ivsize = CTR_RFC3686_IV_SIZE,
18571 + .maxauthsize = SHA256_DIGEST_SIZE,
18574 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18575 + OP_ALG_AAI_CTR_MOD128,
18576 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18577 + OP_ALG_AAI_HMAC_PRECOMP,
18585 + .cra_name = "authenc(hmac(sha384),"
18586 + "rfc3686(ctr(aes)))",
18587 + .cra_driver_name = "authenc-hmac-sha384-"
18588 + "rfc3686-ctr-aes-caam-qi2",
18589 + .cra_blocksize = 1,
18591 + .setkey = aead_setkey,
18592 + .setauthsize = aead_setauthsize,
18593 + .encrypt = aead_encrypt,
18594 + .decrypt = aead_decrypt,
18595 + .ivsize = CTR_RFC3686_IV_SIZE,
18596 + .maxauthsize = SHA384_DIGEST_SIZE,
18599 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18600 + OP_ALG_AAI_CTR_MOD128,
18601 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18602 + OP_ALG_AAI_HMAC_PRECOMP,
18609 + .cra_name = "seqiv(authenc(hmac(sha384),"
18610 + "rfc3686(ctr(aes))))",
18611 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
18612 + "rfc3686-ctr-aes-caam-qi2",
18613 + .cra_blocksize = 1,
18615 + .setkey = aead_setkey,
18616 + .setauthsize = aead_setauthsize,
18617 + .encrypt = aead_encrypt,
18618 + .decrypt = aead_decrypt,
18619 + .ivsize = CTR_RFC3686_IV_SIZE,
18620 + .maxauthsize = SHA384_DIGEST_SIZE,
18623 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18624 + OP_ALG_AAI_CTR_MOD128,
18625 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18626 + OP_ALG_AAI_HMAC_PRECOMP,
18634 + .cra_name = "authenc(hmac(sha512),"
18635 + "rfc3686(ctr(aes)))",
18636 + .cra_driver_name = "authenc-hmac-sha512-"
18637 + "rfc3686-ctr-aes-caam-qi2",
18638 + .cra_blocksize = 1,
18640 + .setkey = aead_setkey,
18641 + .setauthsize = aead_setauthsize,
18642 + .encrypt = aead_encrypt,
18643 + .decrypt = aead_decrypt,
18644 + .ivsize = CTR_RFC3686_IV_SIZE,
18645 + .maxauthsize = SHA512_DIGEST_SIZE,
18648 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18649 + OP_ALG_AAI_CTR_MOD128,
18650 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18651 + OP_ALG_AAI_HMAC_PRECOMP,
18658 + .cra_name = "seqiv(authenc(hmac(sha512),"
18659 + "rfc3686(ctr(aes))))",
18660 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
18661 + "rfc3686-ctr-aes-caam-qi2",
18662 + .cra_blocksize = 1,
18664 + .setkey = aead_setkey,
18665 + .setauthsize = aead_setauthsize,
18666 + .encrypt = aead_encrypt,
18667 + .decrypt = aead_decrypt,
18668 + .ivsize = CTR_RFC3686_IV_SIZE,
18669 + .maxauthsize = SHA512_DIGEST_SIZE,
18672 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18673 + OP_ALG_AAI_CTR_MOD128,
18674 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18675 + OP_ALG_AAI_HMAC_PRECOMP,
18683 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
18684 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
18685 + .cra_blocksize = AES_BLOCK_SIZE,
18687 + .setkey = tls_setkey,
18688 + .setauthsize = tls_setauthsize,
18689 + .encrypt = tls_encrypt,
18690 + .decrypt = tls_decrypt,
18691 + .ivsize = AES_BLOCK_SIZE,
18692 + .maxauthsize = SHA1_DIGEST_SIZE,
18695 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18696 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18697 + OP_ALG_AAI_HMAC_PRECOMP,
18702 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
18705 + struct caam_crypto_alg *t_alg;
18706 + struct crypto_alg *alg;
18708 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
18710 + return ERR_PTR(-ENOMEM);
18712 + alg = &t_alg->crypto_alg;
18714 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
18715 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
18716 + template->driver_name);
18717 + alg->cra_module = THIS_MODULE;
18718 + alg->cra_exit = caam_cra_exit;
18719 + alg->cra_priority = CAAM_CRA_PRIORITY;
18720 + alg->cra_blocksize = template->blocksize;
18721 + alg->cra_alignmask = 0;
18722 + alg->cra_ctxsize = sizeof(struct caam_ctx);
18723 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
18725 + switch (template->type) {
18726 + case CRYPTO_ALG_TYPE_GIVCIPHER:
18727 + alg->cra_init = caam_cra_init_ablkcipher;
18728 + alg->cra_type = &crypto_givcipher_type;
18729 + alg->cra_ablkcipher = template->template_ablkcipher;
18731 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
18732 + alg->cra_init = caam_cra_init_ablkcipher;
18733 + alg->cra_type = &crypto_ablkcipher_type;
18734 + alg->cra_ablkcipher = template->template_ablkcipher;
18738 + t_alg->caam.class1_alg_type = template->class1_alg_type;
18739 + t_alg->caam.class2_alg_type = template->class2_alg_type;
18744 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
18746 + struct aead_alg *alg = &t_alg->aead;
18748 + alg->base.cra_module = THIS_MODULE;
18749 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
18750 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
18751 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
18753 + alg->init = caam_cra_init_aead;
18754 + alg->exit = caam_cra_exit_aead;
18757 +/* max hash key is max split key size */
18758 +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
18760 +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
18761 +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
18763 +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
18764 + CAAM_MAX_HASH_KEY_SIZE)
18765 +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
18767 +/* caam context sizes for hashes: running digest + 8 */
18768 +#define HASH_MSG_LEN 8
18769 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
18771 +enum hash_optype {
18780 + * caam_hash_ctx - ahash per-session context
18781 + * @flc: Flow Contexts array
18782 + * @flc_dma: I/O virtual addresses of the Flow Contexts
18783 + * @key: virtual address of the authentication key
18784 + * @dev: dpseci device
18785 + * @ctx_len: size of Context Register
18786 + * @adata: hashing algorithm details
18788 +struct caam_hash_ctx {
18789 + struct caam_flc flc[HASH_NUM_OP];
18790 + dma_addr_t flc_dma[HASH_NUM_OP];
18791 + u8 key[CAAM_MAX_HASH_KEY_SIZE];
18792 + struct device *dev;
18794 + struct alginfo adata;
18798 +struct caam_hash_state {
18799 + struct caam_request caam_req;
18800 + dma_addr_t buf_dma;
18801 + dma_addr_t ctx_dma;
18802 + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
18804 + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
18806 + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
18807 + int (*update)(struct ahash_request *req);
18808 + int (*final)(struct ahash_request *req);
18809 + int (*finup)(struct ahash_request *req);
18813 +struct caam_export_state {
18814 + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
18815 + u8 caam_ctx[MAX_CTX_LEN];
18817 + int (*update)(struct ahash_request *req);
18818 + int (*final)(struct ahash_request *req);
18819 + int (*finup)(struct ahash_request *req);
18822 +static inline void switch_buf(struct caam_hash_state *state)
18824 + state->current_buf ^= 1;
18827 +static inline u8 *current_buf(struct caam_hash_state *state)
18829 + return state->current_buf ? state->buf_1 : state->buf_0;
18832 +static inline u8 *alt_buf(struct caam_hash_state *state)
18834 + return state->current_buf ? state->buf_0 : state->buf_1;
18837 +static inline int *current_buflen(struct caam_hash_state *state)
18839 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
18842 +static inline int *alt_buflen(struct caam_hash_state *state)
18844 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
18847 +/* Map current buffer in state (if length > 0) and put it in link table */
18848 +static inline int buf_map_to_qm_sg(struct device *dev,
18849 + struct dpaa2_sg_entry *qm_sg,
18850 + struct caam_hash_state *state)
18852 + int buflen = *current_buflen(state);
18857 + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
18859 + if (dma_mapping_error(dev, state->buf_dma)) {
18860 + dev_err(dev, "unable to map buf\n");
18861 + state->buf_dma = 0;
18865 + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
18870 +/* Map state->caam_ctx, and add it to link table */
18871 +static inline int ctx_map_to_qm_sg(struct device *dev,
18872 + struct caam_hash_state *state, int ctx_len,
18873 + struct dpaa2_sg_entry *qm_sg, u32 flag)
18875 + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
18876 + if (dma_mapping_error(dev, state->ctx_dma)) {
18877 + dev_err(dev, "unable to map ctx\n");
18878 + state->ctx_dma = 0;
18882 + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
18887 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
18889 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
18890 + int digestsize = crypto_ahash_digestsize(ahash);
18891 + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
18892 + struct caam_flc *flc;
18895 + ctx->adata.key_virt = ctx->key;
18896 + ctx->adata.key_inline = true;
18898 + /* ahash_update shared descriptor */
18899 + flc = &ctx->flc[UPDATE];
18900 + desc = flc->sh_desc;
18901 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
18902 + ctx->ctx_len, true, priv->sec_attr.era);
18903 + flc->flc[1] = desc_len(desc); /* SDL */
18904 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
18905 + desc_bytes(desc), DMA_BIDIRECTIONAL);
18907 + print_hex_dump(KERN_ERR,
18908 + "ahash update shdesc@" __stringify(__LINE__)": ",
18909 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18912 + /* ahash_update_first shared descriptor */
18913 + flc = &ctx->flc[UPDATE_FIRST];
18914 + desc = flc->sh_desc;
18915 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
18916 + ctx->ctx_len, false, priv->sec_attr.era);
18917 + flc->flc[1] = desc_len(desc); /* SDL */
18918 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
18919 + desc_bytes(desc), DMA_BIDIRECTIONAL);
18921 + print_hex_dump(KERN_ERR,
18922 + "ahash update first shdesc@" __stringify(__LINE__)": ",
18923 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18926 + /* ahash_final shared descriptor */
18927 + flc = &ctx->flc[FINALIZE];
18928 + desc = flc->sh_desc;
18929 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
18930 + ctx->ctx_len, true, priv->sec_attr.era);
18931 + flc->flc[1] = desc_len(desc); /* SDL */
18932 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
18933 + desc_bytes(desc), DMA_BIDIRECTIONAL);
18935 + print_hex_dump(KERN_ERR,
18936 + "ahash final shdesc@" __stringify(__LINE__)": ",
18937 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18940 + /* ahash_digest shared descriptor */
18941 + flc = &ctx->flc[DIGEST];
18942 + desc = flc->sh_desc;
18943 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
18944 + ctx->ctx_len, false, priv->sec_attr.era);
18945 + flc->flc[1] = desc_len(desc); /* SDL */
18946 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
18947 + desc_bytes(desc), DMA_BIDIRECTIONAL);
18949 + print_hex_dump(KERN_ERR,
18950 + "ahash digest shdesc@" __stringify(__LINE__)": ",
18951 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18957 +/* Digest hash size if it is too large */
18958 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
18959 + u32 *keylen, u8 *key_out, u32 digestsize)
18961 + struct caam_request *req_ctx;
18963 + struct split_key_sh_result result;
18964 + dma_addr_t src_dma, dst_dma;
18965 + struct caam_flc *flc;
18966 + dma_addr_t flc_dma;
18967 + int ret = -ENOMEM;
18968 + struct dpaa2_fl_entry *in_fle, *out_fle;
18970 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
18974 + in_fle = &req_ctx->fd_flt[1];
18975 + out_fle = &req_ctx->fd_flt[0];
18977 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
18981 + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
18983 + if (dma_mapping_error(ctx->dev, src_dma)) {
18984 + dev_err(ctx->dev, "unable to map key input memory\n");
18985 + goto err_src_dma;
18987 + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
18988 + DMA_FROM_DEVICE);
18989 + if (dma_mapping_error(ctx->dev, dst_dma)) {
18990 + dev_err(ctx->dev, "unable to map key output memory\n");
18991 + goto err_dst_dma;
18994 + desc = flc->sh_desc;
18996 + init_sh_desc(desc, 0);
18998 + /* descriptor to perform unkeyed hash on key_in */
18999 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
19000 + OP_ALG_AS_INITFINAL);
19001 + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
19002 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
19003 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
19004 + LDST_SRCDST_BYTE_CONTEXT);
19006 + flc->flc[1] = desc_len(desc); /* SDL */
19007 + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
19008 + desc_bytes(desc), DMA_TO_DEVICE);
19009 + if (dma_mapping_error(ctx->dev, flc_dma)) {
19010 + dev_err(ctx->dev, "unable to map shared descriptor\n");
19011 + goto err_flc_dma;
19014 + dpaa2_fl_set_final(in_fle, true);
19015 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19016 + dpaa2_fl_set_addr(in_fle, src_dma);
19017 + dpaa2_fl_set_len(in_fle, *keylen);
19018 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19019 + dpaa2_fl_set_addr(out_fle, dst_dma);
19020 + dpaa2_fl_set_len(out_fle, digestsize);
19023 + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
19024 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
19025 + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
19026 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
19030 + init_completion(&result.completion);
19031 + result.dev = ctx->dev;
19033 + req_ctx->flc = flc;
19034 + req_ctx->flc_dma = flc_dma;
19035 + req_ctx->cbk = split_key_sh_done;
19036 + req_ctx->ctx = &result;
19038 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19039 + if (ret == -EINPROGRESS) {
19040 + /* in progress */
19041 + wait_for_completion(&result.completion);
19042 + ret = result.err;
19044 + print_hex_dump(KERN_ERR,
19045 + "digested key@" __stringify(__LINE__)": ",
19046 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
19051 + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
19054 + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
19056 + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
19062 + *keylen = digestsize;
19067 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
19068 + unsigned int keylen)
19070 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19071 + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
19072 + unsigned int digestsize = crypto_ahash_digestsize(ahash);
19074 + u8 *hashed_key = NULL;
19077 + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
19080 + if (keylen > blocksize) {
19081 + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
19082 + GFP_KERNEL | GFP_DMA);
19085 + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
19088 + goto bad_free_key;
19089 + key = hashed_key;
19092 + ctx->adata.keylen = keylen;
19093 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
19094 + OP_ALG_ALGSEL_MASK);
19095 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
19096 + goto bad_free_key;
19098 + memcpy(ctx->key, key, keylen);
19100 + kfree(hashed_key);
19101 + return ahash_set_sh_desc(ahash);
19103 + kfree(hashed_key);
19104 + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
19108 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
19109 + struct ahash_request *req, int dst_len)
19111 + struct caam_hash_state *state = ahash_request_ctx(req);
19113 + if (edesc->src_nents)
19114 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
19115 + if (edesc->dst_dma)
19116 + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
19118 + if (edesc->qm_sg_bytes)
19119 + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
19122 + if (state->buf_dma) {
19123 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
19125 + state->buf_dma = 0;
19129 +static inline void ahash_unmap_ctx(struct device *dev,
19130 + struct ahash_edesc *edesc,
19131 + struct ahash_request *req, int dst_len,
19134 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19135 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19136 + struct caam_hash_state *state = ahash_request_ctx(req);
19138 + if (state->ctx_dma) {
19139 + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
19140 + state->ctx_dma = 0;
19142 + ahash_unmap(dev, edesc, req, dst_len);
19145 +static void ahash_done(void *cbk_ctx, u32 status)
19147 + struct crypto_async_request *areq = cbk_ctx;
19148 + struct ahash_request *req = ahash_request_cast(areq);
19149 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19150 + struct caam_hash_state *state = ahash_request_ctx(req);
19151 + struct ahash_edesc *edesc = state->caam_req.edesc;
19152 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19153 + int digestsize = crypto_ahash_digestsize(ahash);
19157 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19160 + if (unlikely(status)) {
19161 + caam_qi2_strstatus(ctx->dev, status);
19165 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19166 + qi_cache_free(edesc);
19169 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19170 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19171 + ctx->ctx_len, 1);
19173 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19174 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19178 + req->base.complete(&req->base, ecode);
19181 +static void ahash_done_bi(void *cbk_ctx, u32 status)
19183 + struct crypto_async_request *areq = cbk_ctx;
19184 + struct ahash_request *req = ahash_request_cast(areq);
19185 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19186 + struct caam_hash_state *state = ahash_request_ctx(req);
19187 + struct ahash_edesc *edesc = state->caam_req.edesc;
19188 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19191 + int digestsize = crypto_ahash_digestsize(ahash);
19193 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19196 + if (unlikely(status)) {
19197 + caam_qi2_strstatus(ctx->dev, status);
19201 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
19202 + switch_buf(state);
19203 + qi_cache_free(edesc);
19206 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19207 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19208 + ctx->ctx_len, 1);
19210 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19211 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19215 + req->base.complete(&req->base, ecode);
19218 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
19220 + struct crypto_async_request *areq = cbk_ctx;
19221 + struct ahash_request *req = ahash_request_cast(areq);
19222 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19223 + struct caam_hash_state *state = ahash_request_ctx(req);
19224 + struct ahash_edesc *edesc = state->caam_req.edesc;
19225 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19226 + int digestsize = crypto_ahash_digestsize(ahash);
19230 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19233 + if (unlikely(status)) {
19234 + caam_qi2_strstatus(ctx->dev, status);
19238 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
19239 + qi_cache_free(edesc);
19242 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19243 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19244 + ctx->ctx_len, 1);
19246 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19247 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19251 + req->base.complete(&req->base, ecode);
19254 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
19256 + struct crypto_async_request *areq = cbk_ctx;
19257 + struct ahash_request *req = ahash_request_cast(areq);
19258 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19259 + struct caam_hash_state *state = ahash_request_ctx(req);
19260 + struct ahash_edesc *edesc = state->caam_req.edesc;
19261 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19264 + int digestsize = crypto_ahash_digestsize(ahash);
19266 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19269 + if (unlikely(status)) {
19270 + caam_qi2_strstatus(ctx->dev, status);
19274 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
19275 + switch_buf(state);
19276 + qi_cache_free(edesc);
19279 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19280 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19281 + ctx->ctx_len, 1);
19283 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19284 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19288 + req->base.complete(&req->base, ecode);
19291 +static int ahash_update_ctx(struct ahash_request *req)
19293 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19294 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19295 + struct caam_hash_state *state = ahash_request_ctx(req);
19296 + struct caam_request *req_ctx = &state->caam_req;
19297 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19298 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19299 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19300 + GFP_KERNEL : GFP_ATOMIC;
19301 + u8 *buf = current_buf(state);
19302 + int *buflen = current_buflen(state);
19303 + u8 *next_buf = alt_buf(state);
19304 + int *next_buflen = alt_buflen(state), last_buflen;
19305 + int in_len = *buflen + req->nbytes, to_hash;
19306 + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
19307 + struct ahash_edesc *edesc;
19310 + last_buflen = *next_buflen;
19311 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
19312 + to_hash = in_len - *next_buflen;
19315 + struct dpaa2_sg_entry *sg_table;
19317 + src_nents = sg_nents_for_len(req->src,
19318 + req->nbytes - (*next_buflen));
19319 + if (src_nents < 0) {
19320 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19321 + return src_nents;
19325 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19327 + if (!mapped_nents) {
19328 + dev_err(ctx->dev, "unable to DMA map source\n");
19332 + mapped_nents = 0;
19335 + /* allocate space for base edesc and link tables */
19336 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19338 + dma_unmap_sg(ctx->dev, req->src, src_nents,
19343 + edesc->src_nents = src_nents;
19344 + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
19345 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
19346 + sizeof(*sg_table);
19347 + sg_table = &edesc->sgt[0];
19349 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19350 + DMA_BIDIRECTIONAL);
19354 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19358 + if (mapped_nents) {
19359 + sg_to_qm_sg_last(req->src, mapped_nents,
19360 + sg_table + qm_sg_src_index, 0);
19361 + if (*next_buflen)
19362 + scatterwalk_map_and_copy(next_buf, req->src,
19363 + to_hash - *buflen,
19364 + *next_buflen, 0);
19366 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
19370 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19371 + qm_sg_bytes, DMA_TO_DEVICE);
19372 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19373 + dev_err(ctx->dev, "unable to map S/G table\n");
19377 + edesc->qm_sg_bytes = qm_sg_bytes;
19379 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19380 + dpaa2_fl_set_final(in_fle, true);
19381 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19382 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19383 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
19384 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19385 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
19386 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
19388 + req_ctx->flc = &ctx->flc[UPDATE];
19389 + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
19390 + req_ctx->cbk = ahash_done_bi;
19391 + req_ctx->ctx = &req->base;
19392 + req_ctx->edesc = edesc;
19394 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19395 + if (ret != -EINPROGRESS &&
19396 + !(ret == -EBUSY &&
19397 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19399 + } else if (*next_buflen) {
19400 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
19402 + *buflen = *next_buflen;
19403 + *next_buflen = last_buflen;
19406 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
19407 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
19408 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
19409 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
19410 + *next_buflen, 1);
19415 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
19416 + qi_cache_free(edesc);
19420 +static int ahash_final_ctx(struct ahash_request *req)
19422 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19423 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19424 + struct caam_hash_state *state = ahash_request_ctx(req);
19425 + struct caam_request *req_ctx = &state->caam_req;
19426 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19427 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19428 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19429 + GFP_KERNEL : GFP_ATOMIC;
19430 + int buflen = *current_buflen(state);
19431 + int qm_sg_bytes, qm_sg_src_index;
19432 + int digestsize = crypto_ahash_digestsize(ahash);
19433 + struct ahash_edesc *edesc;
19434 + struct dpaa2_sg_entry *sg_table;
19437 + /* allocate space for base edesc and link tables */
19438 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19442 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
19443 + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
19444 + sg_table = &edesc->sgt[0];
19446 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19451 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19455 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
19457 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19459 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19460 + dev_err(ctx->dev, "unable to map S/G table\n");
19464 + edesc->qm_sg_bytes = qm_sg_bytes;
19466 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19467 + DMA_FROM_DEVICE);
19468 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19469 + dev_err(ctx->dev, "unable to map dst\n");
19470 + edesc->dst_dma = 0;
19475 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19476 + dpaa2_fl_set_final(in_fle, true);
19477 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19478 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19479 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
19480 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19481 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19482 + dpaa2_fl_set_len(out_fle, digestsize);
19484 + req_ctx->flc = &ctx->flc[FINALIZE];
19485 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
19486 + req_ctx->cbk = ahash_done_ctx_src;
19487 + req_ctx->ctx = &req->base;
19488 + req_ctx->edesc = edesc;
19490 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19491 + if (ret == -EINPROGRESS ||
19492 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19496 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
19497 + qi_cache_free(edesc);
19501 +static int ahash_finup_ctx(struct ahash_request *req)
19503 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19504 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19505 + struct caam_hash_state *state = ahash_request_ctx(req);
19506 + struct caam_request *req_ctx = &state->caam_req;
19507 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19508 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19509 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19510 + GFP_KERNEL : GFP_ATOMIC;
19511 + int buflen = *current_buflen(state);
19512 + int qm_sg_bytes, qm_sg_src_index;
19513 + int src_nents, mapped_nents;
19514 + int digestsize = crypto_ahash_digestsize(ahash);
19515 + struct ahash_edesc *edesc;
19516 + struct dpaa2_sg_entry *sg_table;
19519 + src_nents = sg_nents_for_len(req->src, req->nbytes);
19520 + if (src_nents < 0) {
19521 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19522 + return src_nents;
19526 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19528 + if (!mapped_nents) {
19529 + dev_err(ctx->dev, "unable to DMA map source\n");
19533 + mapped_nents = 0;
19536 + /* allocate space for base edesc and link tables */
19537 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19539 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19543 + edesc->src_nents = src_nents;
19544 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
19545 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
19546 + sg_table = &edesc->sgt[0];
19548 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19553 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19557 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
19559 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19561 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19562 + dev_err(ctx->dev, "unable to map S/G table\n");
19566 + edesc->qm_sg_bytes = qm_sg_bytes;
19568 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19569 + DMA_FROM_DEVICE);
19570 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19571 + dev_err(ctx->dev, "unable to map dst\n");
19572 + edesc->dst_dma = 0;
19577 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19578 + dpaa2_fl_set_final(in_fle, true);
19579 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19580 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19581 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
19582 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19583 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19584 + dpaa2_fl_set_len(out_fle, digestsize);
19586 + req_ctx->flc = &ctx->flc[FINALIZE];
19587 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
19588 + req_ctx->cbk = ahash_done_ctx_src;
19589 + req_ctx->ctx = &req->base;
19590 + req_ctx->edesc = edesc;
19592 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19593 + if (ret == -EINPROGRESS ||
19594 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19598 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
19599 + qi_cache_free(edesc);
19603 +static int ahash_digest(struct ahash_request *req)
19605 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19606 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19607 + struct caam_hash_state *state = ahash_request_ctx(req);
19608 + struct caam_request *req_ctx = &state->caam_req;
19609 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19610 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19611 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19612 + GFP_KERNEL : GFP_ATOMIC;
19613 + int digestsize = crypto_ahash_digestsize(ahash);
19614 + int src_nents, mapped_nents;
19615 + struct ahash_edesc *edesc;
19616 + int ret = -ENOMEM;
19618 + state->buf_dma = 0;
19620 + src_nents = sg_nents_for_len(req->src, req->nbytes);
19621 + if (src_nents < 0) {
19622 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19623 + return src_nents;
19627 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19629 + if (!mapped_nents) {
19630 + dev_err(ctx->dev, "unable to map source for DMA\n");
19634 + mapped_nents = 0;
19637 + /* allocate space for base edesc and link tables */
19638 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19640 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19644 + edesc->src_nents = src_nents;
19645 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19647 + if (mapped_nents > 1) {
19649 + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
19651 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
19652 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
19653 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19654 + qm_sg_bytes, DMA_TO_DEVICE);
19655 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19656 + dev_err(ctx->dev, "unable to map S/G table\n");
19659 + edesc->qm_sg_bytes = qm_sg_bytes;
19660 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19661 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19663 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19664 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
19667 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19668 + DMA_FROM_DEVICE);
19669 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19670 + dev_err(ctx->dev, "unable to map dst\n");
19671 + edesc->dst_dma = 0;
19675 + dpaa2_fl_set_final(in_fle, true);
19676 + dpaa2_fl_set_len(in_fle, req->nbytes);
19677 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19678 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19679 + dpaa2_fl_set_len(out_fle, digestsize);
19681 + req_ctx->flc = &ctx->flc[DIGEST];
19682 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19683 + req_ctx->cbk = ahash_done;
19684 + req_ctx->ctx = &req->base;
19685 + req_ctx->edesc = edesc;
19686 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19687 + if (ret == -EINPROGRESS ||
19688 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19692 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19693 + qi_cache_free(edesc);
19697 +static int ahash_final_no_ctx(struct ahash_request *req)
19699 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19700 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19701 + struct caam_hash_state *state = ahash_request_ctx(req);
19702 + struct caam_request *req_ctx = &state->caam_req;
19703 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19704 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19705 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19706 + GFP_KERNEL : GFP_ATOMIC;
19707 + u8 *buf = current_buf(state);
19708 + int buflen = *current_buflen(state);
19709 + int digestsize = crypto_ahash_digestsize(ahash);
19710 + struct ahash_edesc *edesc;
19711 + int ret = -ENOMEM;
19713 + /* allocate space for base edesc and link tables */
19714 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19718 + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
19719 + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
19720 + dev_err(ctx->dev, "unable to map src\n");
19724 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19725 + DMA_FROM_DEVICE);
19726 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19727 + dev_err(ctx->dev, "unable to map dst\n");
19728 + edesc->dst_dma = 0;
19732 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19733 + dpaa2_fl_set_final(in_fle, true);
19734 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19735 + dpaa2_fl_set_addr(in_fle, state->buf_dma);
19736 + dpaa2_fl_set_len(in_fle, buflen);
19737 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19738 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19739 + dpaa2_fl_set_len(out_fle, digestsize);
19741 + req_ctx->flc = &ctx->flc[DIGEST];
19742 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19743 + req_ctx->cbk = ahash_done;
19744 + req_ctx->ctx = &req->base;
19745 + req_ctx->edesc = edesc;
19747 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19748 + if (ret == -EINPROGRESS ||
19749 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19753 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19754 + qi_cache_free(edesc);
19758 +static int ahash_update_no_ctx(struct ahash_request *req)
19760 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19761 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19762 + struct caam_hash_state *state = ahash_request_ctx(req);
19763 + struct caam_request *req_ctx = &state->caam_req;
19764 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19765 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19766 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19767 + GFP_KERNEL : GFP_ATOMIC;
19768 + u8 *buf = current_buf(state);
19769 + int *buflen = current_buflen(state);
19770 + u8 *next_buf = alt_buf(state);
19771 + int *next_buflen = alt_buflen(state);
19772 + int in_len = *buflen + req->nbytes, to_hash;
19773 + int qm_sg_bytes, src_nents, mapped_nents;
19774 + struct ahash_edesc *edesc;
19777 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
19778 + to_hash = in_len - *next_buflen;
19781 + struct dpaa2_sg_entry *sg_table;
19783 + src_nents = sg_nents_for_len(req->src,
19784 + req->nbytes - *next_buflen);
19785 + if (src_nents < 0) {
19786 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19787 + return src_nents;
19791 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19793 + if (!mapped_nents) {
19794 + dev_err(ctx->dev, "unable to DMA map source\n");
19798 + mapped_nents = 0;
19801 + /* allocate space for base edesc and link tables */
19802 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19804 + dma_unmap_sg(ctx->dev, req->src, src_nents,
19809 + edesc->src_nents = src_nents;
19810 + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
19811 + sg_table = &edesc->sgt[0];
19813 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
19817 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
19819 + if (*next_buflen)
19820 + scatterwalk_map_and_copy(next_buf, req->src,
19821 + to_hash - *buflen,
19822 + *next_buflen, 0);
19824 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19825 + qm_sg_bytes, DMA_TO_DEVICE);
19826 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19827 + dev_err(ctx->dev, "unable to map S/G table\n");
19831 + edesc->qm_sg_bytes = qm_sg_bytes;
19833 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
19834 + ctx->ctx_len, DMA_FROM_DEVICE);
19835 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
19836 + dev_err(ctx->dev, "unable to map ctx\n");
19837 + state->ctx_dma = 0;
19842 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19843 + dpaa2_fl_set_final(in_fle, true);
19844 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19845 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19846 + dpaa2_fl_set_len(in_fle, to_hash);
19847 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19848 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
19849 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
19851 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
19852 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
19853 + req_ctx->cbk = ahash_done_ctx_dst;
19854 + req_ctx->ctx = &req->base;
19855 + req_ctx->edesc = edesc;
19857 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19858 + if (ret != -EINPROGRESS &&
19859 + !(ret == -EBUSY &&
19860 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19863 + state->update = ahash_update_ctx;
19864 + state->finup = ahash_finup_ctx;
19865 + state->final = ahash_final_ctx;
19866 + } else if (*next_buflen) {
19867 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
19869 + *buflen = *next_buflen;
19870 + *next_buflen = 0;
19873 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
19874 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
19875 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
19876 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
19877 + *next_buflen, 1);
19882 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
19883 + qi_cache_free(edesc);
19887 +static int ahash_finup_no_ctx(struct ahash_request *req)
19889 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19890 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19891 + struct caam_hash_state *state = ahash_request_ctx(req);
19892 + struct caam_request *req_ctx = &state->caam_req;
19893 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19894 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19895 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19896 + GFP_KERNEL : GFP_ATOMIC;
19897 + int buflen = *current_buflen(state);
19898 + int qm_sg_bytes, src_nents, mapped_nents;
19899 + int digestsize = crypto_ahash_digestsize(ahash);
19900 + struct ahash_edesc *edesc;
19901 + struct dpaa2_sg_entry *sg_table;
19904 + src_nents = sg_nents_for_len(req->src, req->nbytes);
19905 + if (src_nents < 0) {
19906 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19907 + return src_nents;
19911 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19913 + if (!mapped_nents) {
19914 + dev_err(ctx->dev, "unable to DMA map source\n");
19918 + mapped_nents = 0;
19921 + /* allocate space for base edesc and link tables */
19922 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19924 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19928 + edesc->src_nents = src_nents;
19929 + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
19930 + sg_table = &edesc->sgt[0];
19932 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
19936 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
19938 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19940 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19941 + dev_err(ctx->dev, "unable to map S/G table\n");
19945 + edesc->qm_sg_bytes = qm_sg_bytes;
19947 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19948 + DMA_FROM_DEVICE);
19949 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19950 + dev_err(ctx->dev, "unable to map dst\n");
19951 + edesc->dst_dma = 0;
19956 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19957 + dpaa2_fl_set_final(in_fle, true);
19958 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19959 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19960 + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
19961 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19962 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19963 + dpaa2_fl_set_len(out_fle, digestsize);
19965 + req_ctx->flc = &ctx->flc[DIGEST];
19966 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19967 + req_ctx->cbk = ahash_done;
19968 + req_ctx->ctx = &req->base;
19969 + req_ctx->edesc = edesc;
19970 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19971 + if (ret != -EINPROGRESS &&
19972 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19977 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19978 + qi_cache_free(edesc);
19982 +static int ahash_update_first(struct ahash_request *req)
19984 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19985 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19986 + struct caam_hash_state *state = ahash_request_ctx(req);
19987 + struct caam_request *req_ctx = &state->caam_req;
19988 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19989 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19990 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19991 + GFP_KERNEL : GFP_ATOMIC;
19992 + u8 *next_buf = alt_buf(state);
19993 + int *next_buflen = alt_buflen(state);
19995 + int src_nents, mapped_nents;
19996 + struct ahash_edesc *edesc;
19999 + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
20001 + to_hash = req->nbytes - *next_buflen;
20004 + struct dpaa2_sg_entry *sg_table;
20006 + src_nents = sg_nents_for_len(req->src,
20007 + req->nbytes - (*next_buflen));
20008 + if (src_nents < 0) {
20009 + dev_err(ctx->dev, "Invalid number of src SG.\n");
20010 + return src_nents;
20014 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
20016 + if (!mapped_nents) {
20017 + dev_err(ctx->dev, "unable to map source for DMA\n");
20021 + mapped_nents = 0;
20024 + /* allocate space for base edesc and link tables */
20025 + edesc = qi_cache_zalloc(GFP_DMA | flags);
20027 + dma_unmap_sg(ctx->dev, req->src, src_nents,
20032 + edesc->src_nents = src_nents;
20033 + sg_table = &edesc->sgt[0];
20035 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
20036 + dpaa2_fl_set_final(in_fle, true);
20037 + dpaa2_fl_set_len(in_fle, to_hash);
20039 + if (mapped_nents > 1) {
20042 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
20043 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
20044 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
20047 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
20048 + dev_err(ctx->dev, "unable to map S/G table\n");
20052 + edesc->qm_sg_bytes = qm_sg_bytes;
20053 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
20054 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
20056 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
20057 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
20060 + if (*next_buflen)
20061 + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
20062 + *next_buflen, 0);
20064 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
20065 + ctx->ctx_len, DMA_FROM_DEVICE);
20066 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
20067 + dev_err(ctx->dev, "unable to map ctx\n");
20068 + state->ctx_dma = 0;
20073 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20074 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
20075 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
20077 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
20078 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
20079 + req_ctx->cbk = ahash_done_ctx_dst;
20080 + req_ctx->ctx = &req->base;
20081 + req_ctx->edesc = edesc;
20083 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20084 + if (ret != -EINPROGRESS &&
20085 + !(ret == -EBUSY && req->base.flags &
20086 + CRYPTO_TFM_REQ_MAY_BACKLOG))
20089 + state->update = ahash_update_ctx;
20090 + state->finup = ahash_finup_ctx;
20091 + state->final = ahash_final_ctx;
20092 + } else if (*next_buflen) {
20093 + state->update = ahash_update_no_ctx;
20094 + state->finup = ahash_finup_no_ctx;
20095 + state->final = ahash_final_no_ctx;
20096 + scatterwalk_map_and_copy(next_buf, req->src, 0,
20098 + switch_buf(state);
20101 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
20102 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
20107 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
20108 + qi_cache_free(edesc);
20112 +static int ahash_finup_first(struct ahash_request *req)
20114 + return ahash_digest(req);
20117 +static int ahash_init(struct ahash_request *req)
20119 + struct caam_hash_state *state = ahash_request_ctx(req);
20121 + state->update = ahash_update_first;
20122 + state->finup = ahash_finup_first;
20123 + state->final = ahash_final_no_ctx;
20125 + state->ctx_dma = 0;
20126 + state->current_buf = 0;
20127 + state->buf_dma = 0;
20128 + state->buflen_0 = 0;
20129 + state->buflen_1 = 0;
20134 +static int ahash_update(struct ahash_request *req)
20136 + struct caam_hash_state *state = ahash_request_ctx(req);
20138 + return state->update(req);
20141 +static int ahash_finup(struct ahash_request *req)
20143 + struct caam_hash_state *state = ahash_request_ctx(req);
20145 + return state->finup(req);
20148 +static int ahash_final(struct ahash_request *req)
20150 + struct caam_hash_state *state = ahash_request_ctx(req);
20152 + return state->final(req);
20155 +static int ahash_export(struct ahash_request *req, void *out)
20157 + struct caam_hash_state *state = ahash_request_ctx(req);
20158 + struct caam_export_state *export = out;
20162 + if (state->current_buf) {
20163 + buf = state->buf_1;
20164 + len = state->buflen_1;
20166 + buf = state->buf_0;
20167 + len = state->buflen_0;
20170 + memcpy(export->buf, buf, len);
20171 + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
20172 + export->buflen = len;
20173 + export->update = state->update;
20174 + export->final = state->final;
20175 + export->finup = state->finup;
20180 +static int ahash_import(struct ahash_request *req, const void *in)
20182 + struct caam_hash_state *state = ahash_request_ctx(req);
20183 + const struct caam_export_state *export = in;
20185 + memset(state, 0, sizeof(*state));
20186 + memcpy(state->buf_0, export->buf, export->buflen);
20187 + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
20188 + state->buflen_0 = export->buflen;
20189 + state->update = export->update;
20190 + state->final = export->final;
20191 + state->finup = export->finup;
20196 +struct caam_hash_template {
20197 + char name[CRYPTO_MAX_ALG_NAME];
20198 + char driver_name[CRYPTO_MAX_ALG_NAME];
20199 + char hmac_name[CRYPTO_MAX_ALG_NAME];
20200 + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
20201 + unsigned int blocksize;
20202 + struct ahash_alg template_ahash;
20206 +/* ahash descriptors */
20207 +static struct caam_hash_template driver_hash[] = {
20210 + .driver_name = "sha1-caam-qi2",
20211 + .hmac_name = "hmac(sha1)",
20212 + .hmac_driver_name = "hmac-sha1-caam-qi2",
20213 + .blocksize = SHA1_BLOCK_SIZE,
20214 + .template_ahash = {
20215 + .init = ahash_init,
20216 + .update = ahash_update,
20217 + .final = ahash_final,
20218 + .finup = ahash_finup,
20219 + .digest = ahash_digest,
20220 + .export = ahash_export,
20221 + .import = ahash_import,
20222 + .setkey = ahash_setkey,
20224 + .digestsize = SHA1_DIGEST_SIZE,
20225 + .statesize = sizeof(struct caam_export_state),
20228 + .alg_type = OP_ALG_ALGSEL_SHA1,
20230 + .name = "sha224",
20231 + .driver_name = "sha224-caam-qi2",
20232 + .hmac_name = "hmac(sha224)",
20233 + .hmac_driver_name = "hmac-sha224-caam-qi2",
20234 + .blocksize = SHA224_BLOCK_SIZE,
20235 + .template_ahash = {
20236 + .init = ahash_init,
20237 + .update = ahash_update,
20238 + .final = ahash_final,
20239 + .finup = ahash_finup,
20240 + .digest = ahash_digest,
20241 + .export = ahash_export,
20242 + .import = ahash_import,
20243 + .setkey = ahash_setkey,
20245 + .digestsize = SHA224_DIGEST_SIZE,
20246 + .statesize = sizeof(struct caam_export_state),
20249 + .alg_type = OP_ALG_ALGSEL_SHA224,
20251 + .name = "sha256",
20252 + .driver_name = "sha256-caam-qi2",
20253 + .hmac_name = "hmac(sha256)",
20254 + .hmac_driver_name = "hmac-sha256-caam-qi2",
20255 + .blocksize = SHA256_BLOCK_SIZE,
20256 + .template_ahash = {
20257 + .init = ahash_init,
20258 + .update = ahash_update,
20259 + .final = ahash_final,
20260 + .finup = ahash_finup,
20261 + .digest = ahash_digest,
20262 + .export = ahash_export,
20263 + .import = ahash_import,
20264 + .setkey = ahash_setkey,
20266 + .digestsize = SHA256_DIGEST_SIZE,
20267 + .statesize = sizeof(struct caam_export_state),
20270 + .alg_type = OP_ALG_ALGSEL_SHA256,
20272 + .name = "sha384",
20273 + .driver_name = "sha384-caam-qi2",
20274 + .hmac_name = "hmac(sha384)",
20275 + .hmac_driver_name = "hmac-sha384-caam-qi2",
20276 + .blocksize = SHA384_BLOCK_SIZE,
20277 + .template_ahash = {
20278 + .init = ahash_init,
20279 + .update = ahash_update,
20280 + .final = ahash_final,
20281 + .finup = ahash_finup,
20282 + .digest = ahash_digest,
20283 + .export = ahash_export,
20284 + .import = ahash_import,
20285 + .setkey = ahash_setkey,
20287 + .digestsize = SHA384_DIGEST_SIZE,
20288 + .statesize = sizeof(struct caam_export_state),
20291 + .alg_type = OP_ALG_ALGSEL_SHA384,
20293 + .name = "sha512",
20294 + .driver_name = "sha512-caam-qi2",
20295 + .hmac_name = "hmac(sha512)",
20296 + .hmac_driver_name = "hmac-sha512-caam-qi2",
20297 + .blocksize = SHA512_BLOCK_SIZE,
20298 + .template_ahash = {
20299 + .init = ahash_init,
20300 + .update = ahash_update,
20301 + .final = ahash_final,
20302 + .finup = ahash_finup,
20303 + .digest = ahash_digest,
20304 + .export = ahash_export,
20305 + .import = ahash_import,
20306 + .setkey = ahash_setkey,
20308 + .digestsize = SHA512_DIGEST_SIZE,
20309 + .statesize = sizeof(struct caam_export_state),
20312 + .alg_type = OP_ALG_ALGSEL_SHA512,
20315 + .driver_name = "md5-caam-qi2",
20316 + .hmac_name = "hmac(md5)",
20317 + .hmac_driver_name = "hmac-md5-caam-qi2",
20318 + .blocksize = MD5_BLOCK_WORDS * 4,
20319 + .template_ahash = {
20320 + .init = ahash_init,
20321 + .update = ahash_update,
20322 + .final = ahash_final,
20323 + .finup = ahash_finup,
20324 + .digest = ahash_digest,
20325 + .export = ahash_export,
20326 + .import = ahash_import,
20327 + .setkey = ahash_setkey,
20329 + .digestsize = MD5_DIGEST_SIZE,
20330 + .statesize = sizeof(struct caam_export_state),
20333 + .alg_type = OP_ALG_ALGSEL_MD5,
20337 +struct caam_hash_alg {
20338 + struct list_head entry;
20339 + struct device *dev;
20341 + struct ahash_alg ahash_alg;
20344 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
20346 + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
20347 + struct crypto_alg *base = tfm->__crt_alg;
20348 + struct hash_alg_common *halg =
20349 + container_of(base, struct hash_alg_common, base);
20350 + struct ahash_alg *alg =
20351 + container_of(halg, struct ahash_alg, halg);
20352 + struct caam_hash_alg *caam_hash =
20353 + container_of(alg, struct caam_hash_alg, ahash_alg);
20354 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20355 + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
20356 + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
20357 + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
20358 + HASH_MSG_LEN + 32,
20359 + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20360 + HASH_MSG_LEN + 64,
20361 + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20362 + dma_addr_t dma_addr;
20365 + ctx->dev = caam_hash->dev;
20367 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
20368 + DMA_BIDIRECTIONAL,
20369 + DMA_ATTR_SKIP_CPU_SYNC);
20370 + if (dma_mapping_error(ctx->dev, dma_addr)) {
20371 + dev_err(ctx->dev, "unable to map shared descriptors\n");
20375 + for (i = 0; i < HASH_NUM_OP; i++)
20376 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
20378 + /* copy descriptor header template value */
20379 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20381 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
20382 + OP_ALG_ALGSEL_SUBMASK) >>
20383 + OP_ALG_ALGSEL_SHIFT];
20385 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20386 + sizeof(struct caam_hash_state));
20388 + return ahash_set_sh_desc(ahash);
20391 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
20393 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20395 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
20396 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
20399 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
20400 + struct caam_hash_template *template, bool keyed)
20402 + struct caam_hash_alg *t_alg;
20403 + struct ahash_alg *halg;
20404 + struct crypto_alg *alg;
20406 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
20408 + return ERR_PTR(-ENOMEM);
20410 + t_alg->ahash_alg = template->template_ahash;
20411 + halg = &t_alg->ahash_alg;
20412 + alg = &halg->halg.base;
20415 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
20416 + template->hmac_name);
20417 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
20418 + template->hmac_driver_name);
20420 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
20422 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
20423 + template->driver_name);
20425 + alg->cra_module = THIS_MODULE;
20426 + alg->cra_init = caam_hash_cra_init;
20427 + alg->cra_exit = caam_hash_cra_exit;
20428 + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
20429 + alg->cra_priority = CAAM_CRA_PRIORITY;
20430 + alg->cra_blocksize = template->blocksize;
20431 + alg->cra_alignmask = 0;
20432 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
20433 + alg->cra_type = &crypto_ahash_type;
20435 + t_alg->alg_type = template->alg_type;
20436 + t_alg->dev = dev;
20441 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
20443 + struct dpaa2_caam_priv_per_cpu *ppriv;
20445 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
20446 + napi_schedule_irqoff(&ppriv->napi);
20449 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
20451 + struct device *dev = priv->dev;
20452 + struct dpaa2_io_notification_ctx *nctx;
20453 + struct dpaa2_caam_priv_per_cpu *ppriv;
20454 + int err, i = 0, cpu;
20456 + for_each_online_cpu(cpu) {
20457 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20458 + ppriv->priv = priv;
20459 + nctx = &ppriv->nctx;
20460 + nctx->is_cdan = 0;
20461 + nctx->id = ppriv->rsp_fqid;
20462 + nctx->desired_cpu = cpu;
20463 + nctx->cb = dpaa2_caam_fqdan_cb;
20465 + /* Register notification callbacks */
20466 + err = dpaa2_io_service_register(NULL, nctx);
20467 + if (unlikely(err)) {
20468 + dev_err(dev, "notification register failed\n");
20473 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
20475 + if (unlikely(!ppriv->store)) {
20476 + dev_err(dev, "dpaa2_io_store_create() failed\n");
20480 + if (++i == priv->num_pairs)
20487 + for_each_online_cpu(cpu) {
20488 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20489 + if (!ppriv->nctx.cb)
20491 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
20494 + for_each_online_cpu(cpu) {
20495 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20496 + if (!ppriv->store)
20498 + dpaa2_io_store_destroy(ppriv->store);
20504 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
20506 + struct dpaa2_caam_priv_per_cpu *ppriv;
20509 + for_each_online_cpu(cpu) {
20510 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20511 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
20512 + dpaa2_io_store_destroy(ppriv->store);
20514 + if (++i == priv->num_pairs)
20519 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
20521 + struct dpseci_rx_queue_cfg rx_queue_cfg;
20522 + struct device *dev = priv->dev;
20523 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20524 + struct dpaa2_caam_priv_per_cpu *ppriv;
20525 + int err = 0, i = 0, cpu;
20527 + /* Configure Rx queues */
20528 + for_each_online_cpu(cpu) {
20529 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20531 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
20532 + DPSECI_QUEUE_OPT_USER_CTX;
20533 + rx_queue_cfg.order_preservation_en = 0;
20534 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
20535 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
20537 + * Rx priority (WQ) doesn't really matter, since we use
20538 + * pull mode, i.e. volatile dequeues from specific FQs
20540 + rx_queue_cfg.dest_cfg.priority = 0;
20541 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
20543 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20546 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
20551 + if (++i == priv->num_pairs)
20558 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
20560 + struct device *dev = priv->dev;
20562 + if (!priv->cscn_mem)
20565 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20566 + kfree(priv->cscn_mem);
20569 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
20571 + struct device *dev = priv->dev;
20572 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20574 + dpaa2_dpseci_congestion_free(priv);
20575 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
20578 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
20579 + const struct dpaa2_fd *fd)
20581 + struct caam_request *req;
20584 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
20585 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
20589 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
20590 + if (unlikely(fd_err))
20591 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
20594 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
20595 + * in FD[ERR] or FD[FRC].
20597 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
20598 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
20599 + DMA_BIDIRECTIONAL);
20600 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
20603 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
20607 + /* Retry while portal is busy */
20609 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
20611 + } while (err == -EBUSY);
20613 + if (unlikely(err))
20614 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
20619 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
20621 + struct dpaa2_dq *dq;
20622 + int cleaned = 0, is_last;
20625 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
20626 + if (unlikely(!dq)) {
20627 + if (unlikely(!is_last)) {
20628 + dev_dbg(ppriv->priv->dev,
20629 + "FQ %d returned no valid frames\n",
20630 + ppriv->rsp_fqid);
20632 + * MUST retry until we get some sort of
20633 + * valid response token (be it "empty dequeue"
20634 + * or a valid frame).
20642 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
20644 + } while (!is_last);
20649 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
20651 + struct dpaa2_caam_priv_per_cpu *ppriv;
20652 + struct dpaa2_caam_priv *priv;
20653 + int err, cleaned = 0, store_cleaned;
20655 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
20656 + priv = ppriv->priv;
20658 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
20662 + store_cleaned = dpaa2_caam_store_consume(ppriv);
20663 + cleaned += store_cleaned;
20665 + if (store_cleaned == 0 ||
20666 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
20669 + /* Try to dequeue some more */
20670 + err = dpaa2_caam_pull_fq(ppriv);
20671 + if (unlikely(err))
20675 + if (cleaned < budget) {
20676 + napi_complete_done(napi, cleaned);
20677 + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
20678 + if (unlikely(err))
20679 + dev_err(priv->dev, "Notification rearm failed: %d\n",
20686 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
20689 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
20690 + struct device *dev = priv->dev;
20694 + * Congestion group feature supported starting with DPSECI API v5.1
20695 + * and only when object has been created with this capability.
20697 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
20698 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
20701 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
20702 + GFP_KERNEL | GFP_DMA);
20703 + if (!priv->cscn_mem)
20706 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
20707 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
20708 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20709 + if (dma_mapping_error(dev, priv->cscn_dma)) {
20710 + dev_err(dev, "Error mapping CSCN memory area\n");
20712 + goto err_dma_map;
20715 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
20716 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
20717 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
20718 + cong_notif_cfg.message_ctx = (u64)priv;
20719 + cong_notif_cfg.message_iova = priv->cscn_dma;
20720 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
20721 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
20722 + DPSECI_CGN_MODE_COHERENT_WRITE;
20724 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
20725 + &cong_notif_cfg);
20727 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
20728 + goto err_set_cong;
20734 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20736 + kfree(priv->cscn_mem);
20741 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
20743 + struct device *dev = &ls_dev->dev;
20744 + struct dpaa2_caam_priv *priv;
20745 + struct dpaa2_caam_priv_per_cpu *ppriv;
20749 + priv = dev_get_drvdata(dev);
20752 + priv->dpsec_id = ls_dev->obj_desc.id;
20754 + /* Get a handle for the DPSECI this interface is associate with */
20755 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
20757 + dev_err(dev, "dpsec_open() failed: %d\n", err);
20761 + dev_info(dev, "Opened dpseci object successfully\n");
20763 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
20764 + &priv->minor_ver);
20766 + dev_err(dev, "dpseci_get_api_version() failed\n");
20767 + goto err_get_vers;
20770 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
20771 + &priv->dpseci_attr);
20773 + dev_err(dev, "dpseci_get_attributes() failed\n");
20774 + goto err_get_vers;
20777 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
20778 + &priv->sec_attr);
20780 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
20781 + goto err_get_vers;
20784 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
20786 + dev_err(dev, "setup_congestion() failed\n");
20787 + goto err_get_vers;
20790 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
20791 + priv->dpseci_attr.num_tx_queues);
20792 + if (priv->num_pairs > num_online_cpus()) {
20793 + dev_warn(dev, "%d queues won't be used\n",
20794 + priv->num_pairs - num_online_cpus());
20795 + priv->num_pairs = num_online_cpus();
20798 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
20799 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20800 + &priv->rx_queue_attr[i]);
20802 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
20803 + goto err_get_rx_queue;
20807 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
20808 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20809 + &priv->tx_queue_attr[i]);
20811 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
20812 + goto err_get_rx_queue;
20817 + for_each_online_cpu(cpu) {
20818 + dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
20819 + priv->rx_queue_attr[i].fqid,
20820 + priv->tx_queue_attr[i].fqid);
20822 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20823 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
20824 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
20827 + ppriv->net_dev.dev = *dev;
20828 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
20829 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
20830 + DPAA2_CAAM_NAPI_WEIGHT);
20831 + if (++i == priv->num_pairs)
20838 + dpaa2_dpseci_congestion_free(priv);
20840 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
20845 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
20847 + struct device *dev = priv->dev;
20848 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20849 + struct dpaa2_caam_priv_per_cpu *ppriv;
20852 + for (i = 0; i < priv->num_pairs; i++) {
20853 + ppriv = per_cpu_ptr(priv->ppriv, i);
20854 + napi_enable(&ppriv->napi);
20857 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
20859 + dev_err(dev, "dpseci_enable() failed\n");
20863 + dev_info(dev, "DPSECI version %d.%d\n",
20865 + priv->minor_ver);
20870 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
20872 + struct device *dev = priv->dev;
20873 + struct dpaa2_caam_priv_per_cpu *ppriv;
20874 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20875 + int i, err = 0, enabled;
20877 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
20879 + dev_err(dev, "dpseci_disable() failed\n");
20883 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
20885 + dev_err(dev, "dpseci_is_enabled() failed\n");
20889 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
20891 + for (i = 0; i < priv->num_pairs; i++) {
20892 + ppriv = per_cpu_ptr(priv->ppriv, i);
20893 + napi_disable(&ppriv->napi);
20894 + netif_napi_del(&ppriv->napi);
20900 +static struct list_head alg_list;
20901 +static struct list_head hash_list;
20903 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
20905 + struct device *dev;
20906 + struct dpaa2_caam_priv *priv;
20908 + bool registered = false;
20911 + * There is no way to get CAAM endianness - there is no direct register
20912 + * space access and MC f/w does not provide this attribute.
20913 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
20916 + caam_little_end = true;
20918 + caam_imx = false;
20920 + dev = &dpseci_dev->dev;
20922 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
20926 + dev_set_drvdata(dev, priv);
20928 + priv->domain = iommu_get_domain_for_dev(dev);
20930 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
20931 + 0, SLAB_CACHE_DMA, NULL);
20933 + dev_err(dev, "Can't allocate SEC cache\n");
20935 + goto err_qicache;
20938 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
20940 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
20941 + goto err_dma_mask;
20944 + /* Obtain a MC portal */
20945 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
20947 + dev_err(dev, "MC portal allocation failed\n");
20948 + goto err_dma_mask;
20951 + priv->ppriv = alloc_percpu(*priv->ppriv);
20952 + if (!priv->ppriv) {
20953 + dev_err(dev, "alloc_percpu() failed\n");
20954 + goto err_alloc_ppriv;
20957 + /* DPSECI initialization */
20958 + err = dpaa2_dpseci_setup(dpseci_dev);
20960 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
20961 + goto err_dpseci_setup;
20965 + err = dpaa2_dpseci_dpio_setup(priv);
20967 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
20968 + goto err_dpio_setup;
20971 + /* DPSECI binding to DPIO */
20972 + err = dpaa2_dpseci_bind(priv);
20974 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
20978 + /* DPSECI enable */
20979 + err = dpaa2_dpseci_enable(priv);
20981 + dev_err(dev, "dpaa2_dpseci_enable() failed");
20985 + /* register crypto algorithms the device supports */
20986 + INIT_LIST_HEAD(&alg_list);
20987 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
20988 + struct caam_crypto_alg *t_alg;
20989 + struct caam_alg_template *alg = driver_algs + i;
20990 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
20992 + /* Skip DES algorithms if not supported by device */
20993 + if (!priv->sec_attr.des_acc_num &&
20994 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
20995 + (alg_sel == OP_ALG_ALGSEL_DES)))
20998 + /* Skip AES algorithms if not supported by device */
20999 + if (!priv->sec_attr.aes_acc_num &&
21000 + (alg_sel == OP_ALG_ALGSEL_AES))
21003 + t_alg = caam_alg_alloc(alg);
21004 + if (IS_ERR(t_alg)) {
21005 + err = PTR_ERR(t_alg);
21006 + dev_warn(dev, "%s alg allocation failed: %d\n",
21007 + alg->driver_name, err);
21010 + t_alg->caam.dev = dev;
21012 + err = crypto_register_alg(&t_alg->crypto_alg);
21014 + dev_warn(dev, "%s alg registration failed: %d\n",
21015 + t_alg->crypto_alg.cra_driver_name, err);
21020 + list_add_tail(&t_alg->entry, &alg_list);
21021 + registered = true;
21024 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
21025 + struct caam_aead_alg *t_alg = driver_aeads + i;
21026 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
21027 + OP_ALG_ALGSEL_MASK;
21028 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
21029 + OP_ALG_ALGSEL_MASK;
21031 + /* Skip DES algorithms if not supported by device */
21032 + if (!priv->sec_attr.des_acc_num &&
21033 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
21034 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
21037 + /* Skip AES algorithms if not supported by device */
21038 + if (!priv->sec_attr.aes_acc_num &&
21039 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
21043 + * Skip algorithms requiring message digests
21044 + * if MD not supported by device.
21046 + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
21049 + t_alg->caam.dev = dev;
21050 + caam_aead_alg_init(t_alg);
21052 + err = crypto_register_aead(&t_alg->aead);
21054 + dev_warn(dev, "%s alg registration failed: %d\n",
21055 + t_alg->aead.base.cra_driver_name, err);
21059 + t_alg->registered = true;
21060 + registered = true;
21063 + dev_info(dev, "algorithms registered in /proc/crypto\n");
21065 + /* register hash algorithms the device supports */
21066 + INIT_LIST_HEAD(&hash_list);
21069 + * Skip registration of any hashing algorithms if MD block
21070 + * is not present.
21072 + if (!priv->sec_attr.md_acc_num)
21075 + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
21076 + struct caam_hash_alg *t_alg;
21077 + struct caam_hash_template *alg = driver_hash + i;
21079 + /* register hmac version */
21080 + t_alg = caam_hash_alloc(dev, alg, true);
21081 + if (IS_ERR(t_alg)) {
21082 + err = PTR_ERR(t_alg);
21083 + dev_warn(dev, "%s hash alg allocation failed: %d\n",
21084 + alg->driver_name, err);
21088 + err = crypto_register_ahash(&t_alg->ahash_alg);
21090 + dev_warn(dev, "%s alg registration failed: %d\n",
21091 + t_alg->ahash_alg.halg.base.cra_driver_name,
21095 + list_add_tail(&t_alg->entry, &hash_list);
21098 + /* register unkeyed version */
21099 + t_alg = caam_hash_alloc(dev, alg, false);
21100 + if (IS_ERR(t_alg)) {
21101 + err = PTR_ERR(t_alg);
21102 + dev_warn(dev, "%s alg allocation failed: %d\n",
21103 + alg->driver_name, err);
21107 + err = crypto_register_ahash(&t_alg->ahash_alg);
21109 + dev_warn(dev, "%s alg registration failed: %d\n",
21110 + t_alg->ahash_alg.halg.base.cra_driver_name,
21114 + list_add_tail(&t_alg->entry, &hash_list);
21117 + if (!list_empty(&hash_list))
21118 + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
21123 + dpaa2_dpseci_dpio_free(priv);
21125 + dpaa2_dpseci_free(priv);
21127 + free_percpu(priv->ppriv);
21129 + fsl_mc_portal_free(priv->mc_io);
21131 + kmem_cache_destroy(qi_cache);
21133 + dev_set_drvdata(dev, NULL);
21138 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
21140 + struct device *dev;
21141 + struct dpaa2_caam_priv *priv;
21144 + dev = &ls_dev->dev;
21145 + priv = dev_get_drvdata(dev);
21147 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
21148 + struct caam_aead_alg *t_alg = driver_aeads + i;
21150 + if (t_alg->registered)
21151 + crypto_unregister_aead(&t_alg->aead);
21154 + if (alg_list.next) {
21155 + struct caam_crypto_alg *t_alg, *n;
21157 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
21158 + crypto_unregister_alg(&t_alg->crypto_alg);
21159 + list_del(&t_alg->entry);
21164 + if (hash_list.next) {
21165 + struct caam_hash_alg *t_hash_alg, *p;
21167 + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
21168 + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
21169 + list_del(&t_hash_alg->entry);
21170 + kfree(t_hash_alg);
21174 + dpaa2_dpseci_disable(priv);
21175 + dpaa2_dpseci_dpio_free(priv);
21176 + dpaa2_dpseci_free(priv);
21177 + free_percpu(priv->ppriv);
21178 + fsl_mc_portal_free(priv->mc_io);
21179 + dev_set_drvdata(dev, NULL);
21180 + kmem_cache_destroy(qi_cache);
21185 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
21187 + struct dpaa2_fd fd;
21188 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
21189 + int err = 0, i, id;
21192 + return PTR_ERR(req);
21194 + if (priv->cscn_mem) {
21195 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
21197 + DMA_FROM_DEVICE);
21198 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
21199 + dev_dbg_ratelimited(dev, "Dropping request\n");
21204 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
21206 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
21207 + DMA_BIDIRECTIONAL);
21208 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
21209 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
21213 + memset(&fd, 0, sizeof(fd));
21214 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
21215 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
21216 + dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
21217 + dpaa2_fd_set_flc(&fd, req->flc_dma);
21220 + * There is no guarantee that preemption is disabled here,
21221 + * thus take action.
21223 + preempt_disable();
21224 + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
21225 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
21226 + err = dpaa2_io_service_enqueue_fq(NULL,
21227 + priv->tx_queue_attr[id].fqid,
21229 + if (err != -EBUSY)
21232 + preempt_enable();
21234 + if (unlikely(err < 0)) {
21235 + dev_err(dev, "Error enqueuing frame: %d\n", err);
21239 + return -EINPROGRESS;
21242 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
21243 + DMA_BIDIRECTIONAL);
21246 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
21248 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
21250 + .vendor = FSL_MC_VENDOR_FREESCALE,
21251 + .obj_type = "dpseci",
21253 + { .vendor = 0x0 }
21256 +static struct fsl_mc_driver dpaa2_caam_driver = {
21258 + .name = KBUILD_MODNAME,
21259 + .owner = THIS_MODULE,
21261 + .probe = dpaa2_caam_probe,
21262 + .remove = dpaa2_caam_remove,
21263 + .match_id_table = dpaa2_caam_match_id_table
21266 +MODULE_LICENSE("Dual BSD/GPL");
21267 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
21268 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
21270 +module_fsl_mc_driver(dpaa2_caam_driver);
21272 +++ b/drivers/crypto/caam/caamalg_qi2.h
21275 + * Copyright 2015-2016 Freescale Semiconductor Inc.
21276 + * Copyright 2017 NXP
21278 + * Redistribution and use in source and binary forms, with or without
21279 + * modification, are permitted provided that the following conditions are met:
21280 + * * Redistributions of source code must retain the above copyright
21281 + * notice, this list of conditions and the following disclaimer.
21282 + * * Redistributions in binary form must reproduce the above copyright
21283 + * notice, this list of conditions and the following disclaimer in the
21284 + * documentation and/or other materials provided with the distribution.
21285 + * * Neither the names of the above-listed copyright holders nor the
21286 + * names of any contributors may be used to endorse or promote products
21287 + * derived from this software without specific prior written permission.
21290 + * ALTERNATIVELY, this software may be distributed under the terms of the
21291 + * GNU General Public License ("GPL") as published by the Free Software
21292 + * Foundation, either version 2 of that License or (at your option) any
21295 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21296 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21297 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21298 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21299 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21300 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21301 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21302 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21303 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21304 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21305 + * POSSIBILITY OF SUCH DAMAGE.
21308 +#ifndef _CAAMALG_QI2_H_
21309 +#define _CAAMALG_QI2_H_
21311 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
21312 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
21313 +#include <linux/threads.h>
21314 +#include "dpseci.h"
21315 +#include "desc_constr.h"
21317 +#define DPAA2_CAAM_STORE_SIZE 16
21318 +/* NAPI weight *must* be a multiple of the store size. */
21319 +#define DPAA2_CAAM_NAPI_WEIGHT 64
21321 +/* The congestion entrance threshold was chosen so that on LS2088
21322 + * we support the maximum throughput for the available memory
21324 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
21325 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
21328 + * dpaa2_caam_priv - driver private data
21329 + * @dpseci_id: DPSECI object unique ID
21330 + * @major_ver: DPSECI major version
21331 + * @minor_ver: DPSECI minor version
21332 + * @dpseci_attr: DPSECI attributes
21333 + * @sec_attr: SEC engine attributes
21334 + * @rx_queue_attr: array of Rx queue attributes
21335 + * @tx_queue_attr: array of Tx queue attributes
21336 + * @cscn_mem: pointer to memory region containing the
21337 + * dpaa2_cscn struct; it's size is larger than
21338 + * sizeof(struct dpaa2_cscn) to accommodate alignment
21339 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
21340 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
21341 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
21342 + * @dev: device associated with the DPSECI object
21343 + * @mc_io: pointer to MC portal's I/O object
21344 + * @domain: IOMMU domain
21345 + * @ppriv: per CPU pointers to privata data
21347 +struct dpaa2_caam_priv {
21353 + struct dpseci_attr dpseci_attr;
21354 + struct dpseci_sec_attr sec_attr;
21355 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
21356 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
21361 + void *cscn_mem_aligned;
21362 + dma_addr_t cscn_dma;
21364 + struct device *dev;
21365 + struct fsl_mc_io *mc_io;
21366 + struct iommu_domain *domain;
21368 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
21372 + * dpaa2_caam_priv_per_cpu - per CPU private data
21373 + * @napi: napi structure
21374 + * @net_dev: netdev used by napi
21375 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
21376 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
21377 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
21378 + * @nctx: notification context of response FQ
21379 + * @store: where dequeued frames are stored
21380 + * @priv: backpointer to dpaa2_caam_priv
21382 +struct dpaa2_caam_priv_per_cpu {
21383 + struct napi_struct napi;
21384 + struct net_device net_dev;
21388 + struct dpaa2_io_notification_ctx nctx;
21389 + struct dpaa2_io_store *store;
21390 + struct dpaa2_caam_priv *priv;
21394 + * The CAAM QI hardware constructs a job descriptor which points
21395 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
21396 + * When the job descriptor is executed by deco, the whole job
21397 + * descriptor together with shared descriptor gets loaded in
21398 + * deco buffer which is 64 words long (each 32-bit).
21400 + * The job descriptor constructed by QI hardware has layout:
21402 + * HEADER (1 word)
21403 + * Shdesc ptr (1 or 2 words)
21404 + * SEQ_OUT_PTR (1 word)
21405 + * Out ptr (1 or 2 words)
21406 + * Out length (1 word)
21407 + * SEQ_IN_PTR (1 word)
21408 + * In ptr (1 or 2 words)
21409 + * In length (1 word)
21411 + * The shdesc ptr is used to fetch shared descriptor contents
21412 + * into deco buffer.
21414 + * Apart from shdesc contents, the total number of words that
21415 + * get loaded in deco buffer are '8' or '11'. The remaining words
21416 + * in deco buffer can be used for storing shared descriptor.
21418 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
21420 +/* Length of a single buffer in the QI driver memory cache */
21421 +#define CAAM_QI_MEMCACHE_SIZE 512
21424 + * aead_edesc - s/w-extended aead descriptor
21425 + * @src_nents: number of segments in input scatterlist
21426 + * @dst_nents: number of segments in output scatterlist
21427 + * @iv_dma: dma address of iv for checking continuity and link table
21428 + * @qm_sg_bytes: length of dma mapped h/w link table
21429 + * @qm_sg_dma: bus physical mapped address of h/w link table
21430 + * @assoclen_dma: bus physical mapped address of req->assoclen
21431 + * @sgt: the h/w link table
21433 +struct aead_edesc {
21436 + dma_addr_t iv_dma;
21438 + dma_addr_t qm_sg_dma;
21439 + dma_addr_t assoclen_dma;
21440 +#define CAAM_QI_MAX_AEAD_SG \
21441 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
21442 + sizeof(struct dpaa2_sg_entry))
21443 + struct dpaa2_sg_entry sgt[0];
21447 + * tls_edesc - s/w-extended tls descriptor
21448 + * @src_nents: number of segments in input scatterlist
21449 + * @dst_nents: number of segments in output scatterlist
21450 + * @iv_dma: dma address of iv for checking continuity and link table
21451 + * @qm_sg_bytes: length of dma mapped h/w link table
21452 + * @qm_sg_dma: bus physical mapped address of h/w link table
21453 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
21454 + * @dst: pointer to output scatterlist, usefull for unmapping
21455 + * @sgt: the h/w link table
21457 +struct tls_edesc {
21460 + dma_addr_t iv_dma;
21462 + dma_addr_t qm_sg_dma;
21463 + struct scatterlist tmp[2];
21464 + struct scatterlist *dst;
21465 + struct dpaa2_sg_entry sgt[0];
21469 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
21470 + * @src_nents: number of segments in input scatterlist
21471 + * @dst_nents: number of segments in output scatterlist
21472 + * @iv_dma: dma address of iv for checking continuity and link table
21473 + * @qm_sg_bytes: length of dma mapped qm_sg space
21474 + * @qm_sg_dma: I/O virtual address of h/w link table
21475 + * @sgt: the h/w link table
21477 +struct ablkcipher_edesc {
21480 + dma_addr_t iv_dma;
21482 + dma_addr_t qm_sg_dma;
21483 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
21484 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
21485 + sizeof(struct dpaa2_sg_entry))
21486 + struct dpaa2_sg_entry sgt[0];
21490 + * ahash_edesc - s/w-extended ahash descriptor
21491 + * @dst_dma: I/O virtual address of req->result
21492 + * @qm_sg_dma: I/O virtual address of h/w link table
21493 + * @src_nents: number of segments in input scatterlist
21494 + * @qm_sg_bytes: length of dma mapped qm_sg space
21495 + * @sgt: pointer to h/w link table
21497 +struct ahash_edesc {
21498 + dma_addr_t dst_dma;
21499 + dma_addr_t qm_sg_dma;
21502 + struct dpaa2_sg_entry sgt[0];
21506 + * caam_flc - Flow Context (FLC)
21507 + * @flc: Flow Context options
21508 + * @sh_desc: Shared Descriptor
21512 + u32 sh_desc[MAX_SDLEN];
21513 +} ____cacheline_aligned;
21523 + * caam_request - the request structure the driver application should fill while
21524 + * submitting a job to driver.
21525 + * @fd_flt: Frame list table defining input and output
21526 + * fd_flt[0] - FLE pointing to output buffer
21527 + * fd_flt[1] - FLE pointing to input buffer
21528 + * @fd_flt_dma: DMA address for the frame list table
21529 + * @flc: Flow Context
21530 + * @flc_dma: I/O virtual address of Flow Context
21531 + * @op_type: operation type
21532 + * @cbk: Callback function to invoke when job is completed
21533 + * @ctx: arbit context attached with request by the application
21534 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
21536 +struct caam_request {
21537 + struct dpaa2_fl_entry fd_flt[2];
21538 + dma_addr_t fd_flt_dma;
21539 + struct caam_flc *flc;
21540 + dma_addr_t flc_dma;
21541 + enum optype op_type;
21542 + void (*cbk)(void *ctx, u32 err);
21548 + * dpaa2_caam_enqueue() - enqueue a crypto request
21549 + * @dev: device associated with the DPSECI object
21550 + * @req: pointer to caam_request
21552 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
21554 +#endif /* _CAAMALG_QI2_H_ */
21555 --- a/drivers/crypto/caam/caamhash.c
21556 +++ b/drivers/crypto/caam/caamhash.c
21559 #include "sg_sw_sec4.h"
21560 #include "key_gen.h"
21561 +#include "caamhash_desc.h"
21563 #define CAAM_CRA_PRIORITY 3000
21566 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
21567 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
21569 -/* length of descriptors text */
21570 -#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
21571 -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
21572 -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
21573 -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
21574 -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
21575 -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
21577 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
21578 CAAM_MAX_HASH_KEY_SIZE)
21579 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
21580 @@ -103,20 +96,14 @@ struct caam_hash_ctx {
21581 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21582 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21583 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21584 - u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21585 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
21586 dma_addr_t sh_desc_update_first_dma;
21587 dma_addr_t sh_desc_fin_dma;
21588 dma_addr_t sh_desc_digest_dma;
21589 - dma_addr_t sh_desc_finup_dma;
21590 struct device *jrdev;
21593 u8 key[CAAM_MAX_HASH_KEY_SIZE];
21594 - dma_addr_t key_dma;
21596 - unsigned int split_key_len;
21597 - unsigned int split_key_pad_len;
21598 + struct alginfo adata;
21602 @@ -143,6 +130,31 @@ struct caam_export_state {
21603 int (*finup)(struct ahash_request *req);
21606 +static inline void switch_buf(struct caam_hash_state *state)
21608 + state->current_buf ^= 1;
21611 +static inline u8 *current_buf(struct caam_hash_state *state)
21613 + return state->current_buf ? state->buf_1 : state->buf_0;
21616 +static inline u8 *alt_buf(struct caam_hash_state *state)
21618 + return state->current_buf ? state->buf_0 : state->buf_1;
21621 +static inline int *current_buflen(struct caam_hash_state *state)
21623 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
21626 +static inline int *alt_buflen(struct caam_hash_state *state)
21628 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
21631 /* Common job descriptor seq in/out ptr routines */
21633 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
21634 @@ -175,40 +187,31 @@ static inline dma_addr_t map_seq_out_ptr
21638 -/* Map current buffer in state and put it in link table */
21639 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
21640 - struct sec4_sg_entry *sec4_sg,
21641 - u8 *buf, int buflen)
21643 - dma_addr_t buf_dma;
21644 +/* Map current buffer in state (if length > 0) and put it in link table */
21645 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
21646 + struct sec4_sg_entry *sec4_sg,
21647 + struct caam_hash_state *state)
21649 + int buflen = *current_buflen(state);
21654 + state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
21656 + if (dma_mapping_error(jrdev, state->buf_dma)) {
21657 + dev_err(jrdev, "unable to map buf\n");
21658 + state->buf_dma = 0;
21662 - buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
21663 - dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
21664 + dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
21670 - * Only put buffer in link table if it contains data, which is possible,
21671 - * since a buffer has previously been used, and needs to be unmapped,
21673 -static inline dma_addr_t
21674 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
21675 - u8 *buf, dma_addr_t buf_dma, int buflen,
21678 - if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
21679 - dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
21681 - buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
21689 /* Map state->caam_ctx, and add it to link table */
21690 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
21691 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
21692 struct caam_hash_state *state, int ctx_len,
21693 struct sec4_sg_entry *sec4_sg, u32 flag)
21695 @@ -224,124 +227,22 @@ static inline int ctx_map_to_sec4_sg(u32
21699 -/* Common shared descriptor commands */
21700 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
21702 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
21703 - ctx->split_key_len, CLASS_2 |
21704 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
21707 -/* Append key if it has been set */
21708 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
21710 - u32 *key_jump_cmd;
21712 - init_sh_desc(desc, HDR_SHARE_SERIAL);
21714 - if (ctx->split_key_len) {
21715 - /* Skip if already shared */
21716 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
21719 - append_key_ahash(desc, ctx);
21721 - set_jump_tgt_here(desc, key_jump_cmd);
21724 - /* Propagate errors from shared to job descriptor */
21725 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
21729 - * For ahash read data from seqin following state->caam_ctx,
21730 - * and write resulting class2 context to seqout, which may be state->caam_ctx
21733 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
21735 - /* Calculate remaining bytes to read */
21736 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
21738 - /* Read remaining bytes */
21739 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
21740 - FIFOLD_TYPE_MSG | KEY_VLF);
21742 - /* Store class2 context bytes */
21743 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
21744 - LDST_SRCDST_BYTE_CONTEXT);
21748 - * For ahash update, final and finup, import context, read and write to seqout
21750 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
21752 - struct caam_hash_ctx *ctx)
21754 - init_sh_desc_key_ahash(desc, ctx);
21756 - /* Import context from software */
21757 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
21758 - LDST_CLASS_2_CCB | ctx->ctx_len);
21760 - /* Class 2 operation */
21761 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
21764 - * Load from buf and/or src and write to req->result or state->context
21766 - ahash_append_load_str(desc, digestsize);
21769 -/* For ahash firsts and digest, read and write to seqout */
21770 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
21771 - int digestsize, struct caam_hash_ctx *ctx)
21773 - init_sh_desc_key_ahash(desc, ctx);
21775 - /* Class 2 operation */
21776 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
21779 - * Load from buf and/or src and write to req->result or state->context
21781 - ahash_append_load_str(desc, digestsize);
21784 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
21786 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
21787 int digestsize = crypto_ahash_digestsize(ahash);
21788 struct device *jrdev = ctx->jrdev;
21789 - u32 have_key = 0;
21790 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
21793 - if (ctx->split_key_len)
21794 - have_key = OP_ALG_AAI_HMAC_PRECOMP;
21795 + ctx->adata.key_virt = ctx->key;
21797 /* ahash_update shared descriptor */
21798 desc = ctx->sh_desc_update;
21800 - init_sh_desc(desc, HDR_SHARE_SERIAL);
21802 - /* Import context from software */
21803 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
21804 - LDST_CLASS_2_CCB | ctx->ctx_len);
21806 - /* Class 2 operation */
21807 - append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
21810 - /* Load data and write to result or context */
21811 - ahash_append_load_str(desc, ctx->ctx_len);
21813 - ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21815 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
21816 - dev_err(jrdev, "unable to map shared descriptor\n");
21819 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
21820 + ctx->ctx_len, true, ctrlpriv->era);
21821 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
21822 + desc_bytes(desc), DMA_TO_DEVICE);
21824 print_hex_dump(KERN_ERR,
21825 "ahash update shdesc@"__stringify(__LINE__)": ",
21826 @@ -350,17 +251,10 @@ static int ahash_set_sh_desc(struct cryp
21828 /* ahash_update_first shared descriptor */
21829 desc = ctx->sh_desc_update_first;
21831 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
21832 - ctx->ctx_len, ctx);
21834 - ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
21835 - desc_bytes(desc),
21837 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
21838 - dev_err(jrdev, "unable to map shared descriptor\n");
21841 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
21842 + ctx->ctx_len, false, ctrlpriv->era);
21843 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
21844 + desc_bytes(desc), DMA_TO_DEVICE);
21846 print_hex_dump(KERN_ERR,
21847 "ahash update first shdesc@"__stringify(__LINE__)": ",
21848 @@ -369,53 +263,22 @@ static int ahash_set_sh_desc(struct cryp
21850 /* ahash_final shared descriptor */
21851 desc = ctx->sh_desc_fin;
21853 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
21854 - OP_ALG_AS_FINALIZE, digestsize, ctx);
21856 - ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21858 - if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
21859 - dev_err(jrdev, "unable to map shared descriptor\n");
21862 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
21863 + ctx->ctx_len, true, ctrlpriv->era);
21864 + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
21865 + desc_bytes(desc), DMA_TO_DEVICE);
21867 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
21868 DUMP_PREFIX_ADDRESS, 16, 4, desc,
21869 desc_bytes(desc), 1);
21872 - /* ahash_finup shared descriptor */
21873 - desc = ctx->sh_desc_finup;
21875 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
21876 - OP_ALG_AS_FINALIZE, digestsize, ctx);
21878 - ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21880 - if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
21881 - dev_err(jrdev, "unable to map shared descriptor\n");
21885 - print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
21886 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
21887 - desc_bytes(desc), 1);
21890 /* ahash_digest shared descriptor */
21891 desc = ctx->sh_desc_digest;
21893 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
21894 - digestsize, ctx);
21896 - ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
21897 - desc_bytes(desc),
21899 - if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
21900 - dev_err(jrdev, "unable to map shared descriptor\n");
21903 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
21904 + ctx->ctx_len, false, ctrlpriv->era);
21905 + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
21906 + desc_bytes(desc), DMA_TO_DEVICE);
21908 print_hex_dump(KERN_ERR,
21909 "ahash digest shdesc@"__stringify(__LINE__)": ",
21910 @@ -426,14 +289,6 @@ static int ahash_set_sh_desc(struct cryp
21914 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
21917 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
21918 - ctx->split_key_pad_len, key_in, keylen,
21922 /* Digest hash size if it is too large */
21923 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
21924 u32 *keylen, u8 *key_out, u32 digestsize)
21925 @@ -469,7 +324,7 @@ static int hash_digest_key(struct caam_h
21928 /* Job descriptor to perform unkeyed hash on key_in */
21929 - append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
21930 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
21931 OP_ALG_AS_INITFINAL);
21932 append_seq_in_ptr(desc, src_dma, *keylen, 0);
21933 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
21934 @@ -513,12 +368,10 @@ static int hash_digest_key(struct caam_h
21935 static int ahash_setkey(struct crypto_ahash *ahash,
21936 const u8 *key, unsigned int keylen)
21938 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
21939 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
21940 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
21941 - struct device *jrdev = ctx->jrdev;
21942 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
21943 int digestsize = crypto_ahash_digestsize(ahash);
21944 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
21946 u8 *hashed_key = NULL;
21948 @@ -539,43 +392,29 @@ static int ahash_setkey(struct crypto_ah
21952 - /* Pick class 2 key length from algorithm submask */
21953 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
21954 - OP_ALG_ALGSEL_SHIFT] * 2;
21955 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
21958 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
21959 - ctx->split_key_len, ctx->split_key_pad_len);
21960 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
21961 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
21964 + * If DKP is supported, use it in the shared descriptor to generate
21967 + if (ctrlpriv->era >= 6) {
21968 + ctx->adata.key_inline = true;
21969 + ctx->adata.keylen = keylen;
21970 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
21971 + OP_ALG_ALGSEL_MASK);
21973 - ret = gen_split_hash_key(ctx, key, keylen);
21975 - goto bad_free_key;
21976 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
21977 + goto bad_free_key;
21979 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
21981 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
21982 - dev_err(jrdev, "unable to map key i/o memory\n");
21984 - goto error_free_key;
21985 + memcpy(ctx->key, key, keylen);
21987 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
21988 + keylen, CAAM_MAX_HASH_KEY_SIZE);
21990 + goto bad_free_key;
21993 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
21994 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
21995 - ctx->split_key_pad_len, 1);
21998 - ret = ahash_set_sh_desc(ahash);
22000 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
22006 + return ahash_set_sh_desc(ahash);
22009 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
22010 @@ -604,6 +443,8 @@ static inline void ahash_unmap(struct de
22011 struct ahash_edesc *edesc,
22012 struct ahash_request *req, int dst_len)
22014 + struct caam_hash_state *state = ahash_request_ctx(req);
22016 if (edesc->src_nents)
22017 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
22018 if (edesc->dst_dma)
22019 @@ -612,6 +453,12 @@ static inline void ahash_unmap(struct de
22020 if (edesc->sec4_sg_bytes)
22021 dma_unmap_single(dev, edesc->sec4_sg_dma,
22022 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
22024 + if (state->buf_dma) {
22025 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
22027 + state->buf_dma = 0;
22031 static inline void ahash_unmap_ctx(struct device *dev,
22032 @@ -643,8 +490,7 @@ static void ahash_done(struct device *jr
22033 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22036 - edesc = (struct ahash_edesc *)((char *)desc -
22037 - offsetof(struct ahash_edesc, hw_desc));
22038 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22040 caam_jr_strstatus(jrdev, err);
22042 @@ -671,19 +517,19 @@ static void ahash_done_bi(struct device
22043 struct ahash_edesc *edesc;
22044 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22045 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22047 struct caam_hash_state *state = ahash_request_ctx(req);
22049 int digestsize = crypto_ahash_digestsize(ahash);
22051 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22054 - edesc = (struct ahash_edesc *)((char *)desc -
22055 - offsetof(struct ahash_edesc, hw_desc));
22056 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22058 caam_jr_strstatus(jrdev, err);
22060 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
22061 + switch_buf(state);
22065 @@ -713,8 +559,7 @@ static void ahash_done_ctx_src(struct de
22066 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22069 - edesc = (struct ahash_edesc *)((char *)desc -
22070 - offsetof(struct ahash_edesc, hw_desc));
22071 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22073 caam_jr_strstatus(jrdev, err);
22075 @@ -741,19 +586,19 @@ static void ahash_done_ctx_dst(struct de
22076 struct ahash_edesc *edesc;
22077 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22078 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22080 struct caam_hash_state *state = ahash_request_ctx(req);
22082 int digestsize = crypto_ahash_digestsize(ahash);
22084 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22087 - edesc = (struct ahash_edesc *)((char *)desc -
22088 - offsetof(struct ahash_edesc, hw_desc));
22089 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22091 caam_jr_strstatus(jrdev, err);
22093 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
22094 + switch_buf(state);
22098 @@ -835,13 +680,12 @@ static int ahash_update_ctx(struct ahash
22099 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22100 struct caam_hash_state *state = ahash_request_ctx(req);
22101 struct device *jrdev = ctx->jrdev;
22102 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22103 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22104 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22105 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
22106 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
22107 - int *next_buflen = state->current_buf ? &state->buflen_0 :
22108 - &state->buflen_1, last_buflen;
22109 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22110 + GFP_KERNEL : GFP_ATOMIC;
22111 + u8 *buf = current_buf(state);
22112 + int *buflen = current_buflen(state);
22113 + u8 *next_buf = alt_buf(state);
22114 + int *next_buflen = alt_buflen(state), last_buflen;
22115 int in_len = *buflen + req->nbytes, to_hash;
22117 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
22118 @@ -890,15 +734,14 @@ static int ahash_update_ctx(struct ahash
22119 edesc->src_nents = src_nents;
22120 edesc->sec4_sg_bytes = sec4_sg_bytes;
22122 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22123 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22124 edesc->sec4_sg, DMA_BIDIRECTIONAL);
22128 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
22129 - edesc->sec4_sg + 1,
22130 - buf, state->buf_dma,
22131 - *buflen, last_buflen);
22132 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22136 if (mapped_nents) {
22137 sg_to_sec4_sg_last(req->src, mapped_nents,
22138 @@ -909,12 +752,10 @@ static int ahash_update_ctx(struct ahash
22142 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
22143 - cpu_to_caam32(SEC4_SG_LEN_FIN);
22144 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
22148 - state->current_buf = !state->current_buf;
22150 desc = edesc->hw_desc;
22152 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22153 @@ -969,12 +810,9 @@ static int ahash_final_ctx(struct ahash_
22154 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22155 struct caam_hash_state *state = ahash_request_ctx(req);
22156 struct device *jrdev = ctx->jrdev;
22157 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22158 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22159 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22160 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22161 - int last_buflen = state->current_buf ? state->buflen_0 :
22163 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22164 + GFP_KERNEL : GFP_ATOMIC;
22165 + int buflen = *current_buflen(state);
22167 int sec4_sg_bytes, sec4_sg_src_index;
22168 int digestsize = crypto_ahash_digestsize(ahash);
22169 @@ -994,18 +832,17 @@ static int ahash_final_ctx(struct ahash_
22170 desc = edesc->hw_desc;
22172 edesc->sec4_sg_bytes = sec4_sg_bytes;
22173 - edesc->src_nents = 0;
22175 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22176 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22177 edesc->sec4_sg, DMA_TO_DEVICE);
22181 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
22182 - buf, state->buf_dma, buflen,
22184 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
22185 - cpu_to_caam32(SEC4_SG_LEN_FIN);
22186 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22190 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
22192 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22193 sec4_sg_bytes, DMA_TO_DEVICE);
22194 @@ -1048,12 +885,9 @@ static int ahash_finup_ctx(struct ahash_
22195 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22196 struct caam_hash_state *state = ahash_request_ctx(req);
22197 struct device *jrdev = ctx->jrdev;
22198 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22199 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22200 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22201 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22202 - int last_buflen = state->current_buf ? state->buflen_0 :
22204 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22205 + GFP_KERNEL : GFP_ATOMIC;
22206 + int buflen = *current_buflen(state);
22208 int sec4_sg_src_index;
22209 int src_nents, mapped_nents;
22210 @@ -1082,7 +916,7 @@ static int ahash_finup_ctx(struct ahash_
22212 /* allocate space for base edesc and hw desc commands, link tables */
22213 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
22214 - ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
22215 + ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
22218 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
22219 @@ -1093,14 +927,14 @@ static int ahash_finup_ctx(struct ahash_
22221 edesc->src_nents = src_nents;
22223 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22224 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22225 edesc->sec4_sg, DMA_TO_DEVICE);
22229 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
22230 - buf, state->buf_dma, buflen,
22232 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22236 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
22237 sec4_sg_src_index, ctx->ctx_len + buflen,
22238 @@ -1136,15 +970,18 @@ static int ahash_digest(struct ahash_req
22240 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22241 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22242 + struct caam_hash_state *state = ahash_request_ctx(req);
22243 struct device *jrdev = ctx->jrdev;
22244 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22245 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22246 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22247 + GFP_KERNEL : GFP_ATOMIC;
22249 int digestsize = crypto_ahash_digestsize(ahash);
22250 int src_nents, mapped_nents;
22251 struct ahash_edesc *edesc;
22254 + state->buf_dma = 0;
22256 src_nents = sg_nents_for_len(req->src, req->nbytes);
22257 if (src_nents < 0) {
22258 dev_err(jrdev, "Invalid number of src SG.\n");
22259 @@ -1215,10 +1052,10 @@ static int ahash_final_no_ctx(struct aha
22260 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22261 struct caam_hash_state *state = ahash_request_ctx(req);
22262 struct device *jrdev = ctx->jrdev;
22263 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22264 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22265 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22266 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22267 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22268 + GFP_KERNEL : GFP_ATOMIC;
22269 + u8 *buf = current_buf(state);
22270 + int buflen = *current_buflen(state);
22272 int digestsize = crypto_ahash_digestsize(ahash);
22273 struct ahash_edesc *edesc;
22274 @@ -1249,7 +1086,6 @@ static int ahash_final_no_ctx(struct aha
22275 dev_err(jrdev, "unable to map dst\n");
22278 - edesc->src_nents = 0;
22281 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
22282 @@ -1279,13 +1115,12 @@ static int ahash_update_no_ctx(struct ah
22283 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22284 struct caam_hash_state *state = ahash_request_ctx(req);
22285 struct device *jrdev = ctx->jrdev;
22286 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22287 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22288 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22289 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
22290 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
22291 - int *next_buflen = state->current_buf ? &state->buflen_0 :
22292 - &state->buflen_1;
22293 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22294 + GFP_KERNEL : GFP_ATOMIC;
22295 + u8 *buf = current_buf(state);
22296 + int *buflen = current_buflen(state);
22297 + u8 *next_buf = alt_buf(state);
22298 + int *next_buflen = alt_buflen(state);
22299 int in_len = *buflen + req->nbytes, to_hash;
22300 int sec4_sg_bytes, src_nents, mapped_nents;
22301 struct ahash_edesc *edesc;
22302 @@ -1332,10 +1167,11 @@ static int ahash_update_no_ctx(struct ah
22304 edesc->src_nents = src_nents;
22305 edesc->sec4_sg_bytes = sec4_sg_bytes;
22306 - edesc->dst_dma = 0;
22308 - state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
22310 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
22314 sg_to_sec4_sg_last(req->src, mapped_nents,
22315 edesc->sec4_sg + 1, 0);
22317 @@ -1345,8 +1181,6 @@ static int ahash_update_no_ctx(struct ah
22321 - state->current_buf = !state->current_buf;
22323 desc = edesc->hw_desc;
22325 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22326 @@ -1406,12 +1240,9 @@ static int ahash_finup_no_ctx(struct aha
22327 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22328 struct caam_hash_state *state = ahash_request_ctx(req);
22329 struct device *jrdev = ctx->jrdev;
22330 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22331 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22332 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22333 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22334 - int last_buflen = state->current_buf ? state->buflen_0 :
22336 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22337 + GFP_KERNEL : GFP_ATOMIC;
22338 + int buflen = *current_buflen(state);
22340 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
22341 int digestsize = crypto_ahash_digestsize(ahash);
22342 @@ -1453,9 +1284,9 @@ static int ahash_finup_no_ctx(struct aha
22343 edesc->src_nents = src_nents;
22344 edesc->sec4_sg_bytes = sec4_sg_bytes;
22346 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
22347 - state->buf_dma, buflen,
22349 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
22353 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
22355 @@ -1499,11 +1330,10 @@ static int ahash_update_first(struct aha
22356 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22357 struct caam_hash_state *state = ahash_request_ctx(req);
22358 struct device *jrdev = ctx->jrdev;
22359 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22360 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22361 - u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
22362 - int *next_buflen = state->current_buf ?
22363 - &state->buflen_1 : &state->buflen_0;
22364 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22365 + GFP_KERNEL : GFP_ATOMIC;
22366 + u8 *next_buf = alt_buf(state);
22367 + int *next_buflen = alt_buflen(state);
22370 int src_nents, mapped_nents;
22371 @@ -1548,7 +1378,6 @@ static int ahash_update_first(struct aha
22374 edesc->src_nents = src_nents;
22375 - edesc->dst_dma = 0;
22377 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
22379 @@ -1585,6 +1414,7 @@ static int ahash_update_first(struct aha
22380 state->final = ahash_final_no_ctx;
22381 scatterwalk_map_and_copy(next_buf, req->src, 0,
22383 + switch_buf(state);
22386 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
22387 @@ -1691,7 +1521,6 @@ struct caam_hash_template {
22388 unsigned int blocksize;
22389 struct ahash_alg template_ahash;
22394 /* ahash descriptors */
22395 @@ -1717,7 +1546,6 @@ static struct caam_hash_template driver_
22398 .alg_type = OP_ALG_ALGSEL_SHA1,
22399 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
22402 .driver_name = "sha224-caam",
22403 @@ -1739,7 +1567,6 @@ static struct caam_hash_template driver_
22406 .alg_type = OP_ALG_ALGSEL_SHA224,
22407 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
22410 .driver_name = "sha256-caam",
22411 @@ -1761,7 +1588,6 @@ static struct caam_hash_template driver_
22414 .alg_type = OP_ALG_ALGSEL_SHA256,
22415 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
22418 .driver_name = "sha384-caam",
22419 @@ -1783,7 +1609,6 @@ static struct caam_hash_template driver_
22422 .alg_type = OP_ALG_ALGSEL_SHA384,
22423 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
22426 .driver_name = "sha512-caam",
22427 @@ -1805,7 +1630,6 @@ static struct caam_hash_template driver_
22430 .alg_type = OP_ALG_ALGSEL_SHA512,
22431 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
22434 .driver_name = "md5-caam",
22435 @@ -1827,14 +1651,12 @@ static struct caam_hash_template driver_
22438 .alg_type = OP_ALG_ALGSEL_MD5,
22439 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
22443 struct caam_hash_alg {
22444 struct list_head entry;
22447 struct ahash_alg ahash_alg;
22450 @@ -1856,6 +1678,7 @@ static int caam_hash_cra_init(struct cry
22451 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
22453 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
22454 + dma_addr_t dma_addr;
22457 * Get a Job ring from Job Ring driver to ensure in-order
22458 @@ -1866,11 +1689,31 @@ static int caam_hash_cra_init(struct cry
22459 pr_err("Job Ring Device allocation for transform failed\n");
22460 return PTR_ERR(ctx->jrdev);
22463 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
22464 + offsetof(struct caam_hash_ctx,
22465 + sh_desc_update_dma),
22466 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
22467 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
22468 + dev_err(ctx->jrdev, "unable to map shared descriptors\n");
22469 + caam_jr_free(ctx->jrdev);
22473 + ctx->sh_desc_update_dma = dma_addr;
22474 + ctx->sh_desc_update_first_dma = dma_addr +
22475 + offsetof(struct caam_hash_ctx,
22476 + sh_desc_update_first);
22477 + ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
22479 + ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
22482 /* copy descriptor header template value */
22483 - ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
22484 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
22485 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
22487 - ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
22488 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
22489 + OP_ALG_ALGSEL_SUBMASK) >>
22490 OP_ALG_ALGSEL_SHIFT];
22492 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
22493 @@ -1882,30 +1725,10 @@ static void caam_hash_cra_exit(struct cr
22495 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
22497 - if (ctx->sh_desc_update_dma &&
22498 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
22499 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
22500 - desc_bytes(ctx->sh_desc_update),
22502 - if (ctx->sh_desc_update_first_dma &&
22503 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
22504 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
22505 - desc_bytes(ctx->sh_desc_update_first),
22507 - if (ctx->sh_desc_fin_dma &&
22508 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
22509 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
22510 - desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
22511 - if (ctx->sh_desc_digest_dma &&
22512 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
22513 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
22514 - desc_bytes(ctx->sh_desc_digest),
22516 - if (ctx->sh_desc_finup_dma &&
22517 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
22518 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
22519 - desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
22521 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
22522 + offsetof(struct caam_hash_ctx,
22523 + sh_desc_update_dma),
22524 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
22525 caam_jr_free(ctx->jrdev);
22528 @@ -1964,7 +1787,6 @@ caam_hash_alloc(struct caam_hash_templat
22529 alg->cra_type = &crypto_ahash_type;
22531 t_alg->alg_type = template->alg_type;
22532 - t_alg->alg_op = template->alg_op;
22537 +++ b/drivers/crypto/caam/caamhash_desc.c
22540 + * Shared descriptors for ahash algorithms
22542 + * Copyright 2017 NXP
22544 + * Redistribution and use in source and binary forms, with or without
22545 + * modification, are permitted provided that the following conditions are met:
22546 + * * Redistributions of source code must retain the above copyright
22547 + * notice, this list of conditions and the following disclaimer.
22548 + * * Redistributions in binary form must reproduce the above copyright
22549 + * notice, this list of conditions and the following disclaimer in the
22550 + * documentation and/or other materials provided with the distribution.
22551 + * * Neither the names of the above-listed copyright holders nor the
22552 + * names of any contributors may be used to endorse or promote products
22553 + * derived from this software without specific prior written permission.
22556 + * ALTERNATIVELY, this software may be distributed under the terms of the
22557 + * GNU General Public License ("GPL") as published by the Free Software
22558 + * Foundation, either version 2 of that License or (at your option) any
22561 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22562 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22563 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22564 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22565 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22566 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22567 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22568 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22569 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22570 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22571 + * POSSIBILITY OF SUCH DAMAGE.
22574 +#include "compat.h"
22575 +#include "desc_constr.h"
22576 +#include "caamhash_desc.h"
22579 + * cnstr_shdsc_ahash - ahash shared descriptor
22580 + * @desc: pointer to buffer used for descriptor construction
22581 + * @adata: pointer to authentication transform definitions.
22582 + * A split key is required for SEC Era < 6; the size of the split key
22583 + * is specified in this case.
22584 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
22585 + * SHA256, SHA384, SHA512}.
22586 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
22587 + * @digestsize: algorithm's digest size
22588 + * @ctx_len: size of Context Register
22589 + * @import_ctx: true if previous Context Register needs to be restored
22590 + * must be true for ahash update and final
22591 + * must be false for for ahash first and digest
22594 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
22595 + int digestsize, int ctx_len, bool import_ctx, int era)
22597 + u32 op = adata->algtype;
22599 + init_sh_desc(desc, HDR_SHARE_SERIAL);
22601 + /* Append key if it has been set; ahash update excluded */
22602 + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
22603 + u32 *skip_key_load;
22605 + /* Skip key loading if already shared */
22606 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
22610 + append_key_as_imm(desc, adata->key_virt,
22611 + adata->keylen_pad,
22612 + adata->keylen, CLASS_2 |
22613 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
22615 + append_proto_dkp(desc, adata);
22617 + set_jump_tgt_here(desc, skip_key_load);
22619 + op |= OP_ALG_AAI_HMAC_PRECOMP;
22622 + /* If needed, import context from software */
22624 + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
22625 + LDST_SRCDST_BYTE_CONTEXT);
22627 + /* Class 2 operation */
22628 + append_operation(desc, op | state | OP_ALG_ENCRYPT);
22631 + * Load from buf and/or src and write to req->result or state->context
22632 + * Calculate remaining bytes to read
22634 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
22635 + /* Read remaining bytes */
22636 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
22637 + FIFOLD_TYPE_MSG | KEY_VLF);
22638 + /* Store class2 context bytes */
22639 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
22640 + LDST_SRCDST_BYTE_CONTEXT);
22642 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
22644 +MODULE_LICENSE("Dual BSD/GPL");
22645 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
22646 +MODULE_AUTHOR("NXP Semiconductors");
22648 +++ b/drivers/crypto/caam/caamhash_desc.h
22651 + * Shared descriptors for ahash algorithms
22653 + * Copyright 2017 NXP
22655 + * Redistribution and use in source and binary forms, with or without
22656 + * modification, are permitted provided that the following conditions are met:
22657 + * * Redistributions of source code must retain the above copyright
22658 + * notice, this list of conditions and the following disclaimer.
22659 + * * Redistributions in binary form must reproduce the above copyright
22660 + * notice, this list of conditions and the following disclaimer in the
22661 + * documentation and/or other materials provided with the distribution.
22662 + * * Neither the names of the above-listed copyright holders nor the
22663 + * names of any contributors may be used to endorse or promote products
22664 + * derived from this software without specific prior written permission.
22667 + * ALTERNATIVELY, this software may be distributed under the terms of the
22668 + * GNU General Public License ("GPL") as published by the Free Software
22669 + * Foundation, either version 2 of that License or (at your option) any
22672 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22673 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22674 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22675 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22676 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22677 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22678 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22679 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22680 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22681 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22682 + * POSSIBILITY OF SUCH DAMAGE.
22685 +#ifndef _CAAMHASH_DESC_H_
22686 +#define _CAAMHASH_DESC_H_
22688 +/* length of descriptors text */
22689 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
22690 +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
22691 +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
22692 +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
22693 +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
22695 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
22696 + int digestsize, int ctx_len, bool import_ctx, int era);
22698 +#endif /* _CAAMHASH_DESC_H_ */
22699 --- a/drivers/crypto/caam/caampkc.c
22700 +++ b/drivers/crypto/caam/caampkc.c
22702 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
22703 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22704 sizeof(struct rsa_priv_f1_pdb))
22705 +#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22706 + sizeof(struct rsa_priv_f2_pdb))
22707 +#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
22708 + sizeof(struct rsa_priv_f3_pdb))
22710 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
22711 struct akcipher_request *req)
22712 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct dev
22713 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22716 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
22717 + struct akcipher_request *req)
22719 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22720 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22721 + struct caam_rsa_key *key = &ctx->key;
22722 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
22723 + size_t p_sz = key->p_sz;
22724 + size_t q_sz = key->p_sz;
22726 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22727 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22728 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22729 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22730 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
22733 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
22734 + struct akcipher_request *req)
22736 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22737 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22738 + struct caam_rsa_key *key = &ctx->key;
22739 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
22740 + size_t p_sz = key->p_sz;
22741 + size_t q_sz = key->p_sz;
22743 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22744 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22745 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
22746 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
22747 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
22748 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22749 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
22752 /* RSA Job Completion handler */
22753 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
22755 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct devi
22756 akcipher_request_complete(req, err);
22759 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
22762 + struct akcipher_request *req = context;
22763 + struct rsa_edesc *edesc;
22766 + caam_jr_strstatus(dev, err);
22768 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
22770 + rsa_priv_f2_unmap(dev, edesc, req);
22771 + rsa_io_unmap(dev, edesc, req);
22774 + akcipher_request_complete(req, err);
22777 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
22780 + struct akcipher_request *req = context;
22781 + struct rsa_edesc *edesc;
22784 + caam_jr_strstatus(dev, err);
22786 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
22788 + rsa_priv_f3_unmap(dev, edesc, req);
22789 + rsa_io_unmap(dev, edesc, req);
22792 + akcipher_request_complete(req, err);
22795 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
22798 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc
22799 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22800 struct device *dev = ctx->dev;
22801 struct rsa_edesc *edesc;
22802 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22803 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22804 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22805 + GFP_KERNEL : GFP_ATOMIC;
22807 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
22808 int src_nents, dst_nents;
22809 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct ak
22813 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
22814 + struct rsa_edesc *edesc)
22816 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22817 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22818 + struct caam_rsa_key *key = &ctx->key;
22819 + struct device *dev = ctx->dev;
22820 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
22821 + int sec4_sg_index = 0;
22822 + size_t p_sz = key->p_sz;
22823 + size_t q_sz = key->p_sz;
22825 + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
22826 + if (dma_mapping_error(dev, pdb->d_dma)) {
22827 + dev_err(dev, "Unable to map RSA private exponent memory\n");
22831 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
22832 + if (dma_mapping_error(dev, pdb->p_dma)) {
22833 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
22837 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
22838 + if (dma_mapping_error(dev, pdb->q_dma)) {
22839 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
22843 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
22844 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
22845 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
22849 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
22850 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
22851 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
22855 + if (edesc->src_nents > 1) {
22856 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
22857 + pdb->g_dma = edesc->sec4_sg_dma;
22858 + sec4_sg_index += edesc->src_nents;
22860 + pdb->g_dma = sg_dma_address(req->src);
22863 + if (edesc->dst_nents > 1) {
22864 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
22865 + pdb->f_dma = edesc->sec4_sg_dma +
22866 + sec4_sg_index * sizeof(struct sec4_sg_entry);
22868 + pdb->f_dma = sg_dma_address(req->dst);
22871 + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
22872 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
22877 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22879 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22881 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22883 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22888 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
22889 + struct rsa_edesc *edesc)
22891 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22892 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22893 + struct caam_rsa_key *key = &ctx->key;
22894 + struct device *dev = ctx->dev;
22895 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
22896 + int sec4_sg_index = 0;
22897 + size_t p_sz = key->p_sz;
22898 + size_t q_sz = key->p_sz;
22900 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
22901 + if (dma_mapping_error(dev, pdb->p_dma)) {
22902 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
22906 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
22907 + if (dma_mapping_error(dev, pdb->q_dma)) {
22908 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
22912 + pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
22913 + if (dma_mapping_error(dev, pdb->dp_dma)) {
22914 + dev_err(dev, "Unable to map RSA exponent dp memory\n");
22918 + pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
22919 + if (dma_mapping_error(dev, pdb->dq_dma)) {
22920 + dev_err(dev, "Unable to map RSA exponent dq memory\n");
22924 + pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
22925 + if (dma_mapping_error(dev, pdb->c_dma)) {
22926 + dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
22930 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
22931 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
22932 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
22936 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
22937 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
22938 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
22942 + if (edesc->src_nents > 1) {
22943 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
22944 + pdb->g_dma = edesc->sec4_sg_dma;
22945 + sec4_sg_index += edesc->src_nents;
22947 + pdb->g_dma = sg_dma_address(req->src);
22950 + if (edesc->dst_nents > 1) {
22951 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
22952 + pdb->f_dma = edesc->sec4_sg_dma +
22953 + sec4_sg_index * sizeof(struct sec4_sg_entry);
22955 + pdb->f_dma = sg_dma_address(req->dst);
22958 + pdb->sgf |= key->n_sz;
22959 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
22964 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22966 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
22968 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
22970 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
22972 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22974 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22979 static int caam_rsa_enc(struct akcipher_request *req)
22981 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22982 @@ -301,24 +543,14 @@ init_fail:
22986 -static int caam_rsa_dec(struct akcipher_request *req)
22987 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
22989 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22990 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22991 - struct caam_rsa_key *key = &ctx->key;
22992 struct device *jrdev = ctx->dev;
22993 struct rsa_edesc *edesc;
22996 - if (unlikely(!key->n || !key->d))
22999 - if (req->dst_len < key->n_sz) {
23000 - req->dst_len = key->n_sz;
23001 - dev_err(jrdev, "Output buffer length less than parameter n\n");
23002 - return -EOVERFLOW;
23005 /* Allocate extended descriptor */
23006 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
23008 @@ -344,17 +576,147 @@ init_fail:
23012 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
23014 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23015 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23016 + struct device *jrdev = ctx->dev;
23017 + struct rsa_edesc *edesc;
23020 + /* Allocate extended descriptor */
23021 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
23022 + if (IS_ERR(edesc))
23023 + return PTR_ERR(edesc);
23025 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
23026 + ret = set_rsa_priv_f2_pdb(req, edesc);
23030 + /* Initialize Job Descriptor */
23031 + init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
23033 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
23035 + return -EINPROGRESS;
23037 + rsa_priv_f2_unmap(jrdev, edesc, req);
23040 + rsa_io_unmap(jrdev, edesc, req);
23045 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
23047 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23048 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23049 + struct device *jrdev = ctx->dev;
23050 + struct rsa_edesc *edesc;
23053 + /* Allocate extended descriptor */
23054 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
23055 + if (IS_ERR(edesc))
23056 + return PTR_ERR(edesc);
23058 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
23059 + ret = set_rsa_priv_f3_pdb(req, edesc);
23063 + /* Initialize Job Descriptor */
23064 + init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
23066 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
23068 + return -EINPROGRESS;
23070 + rsa_priv_f3_unmap(jrdev, edesc, req);
23073 + rsa_io_unmap(jrdev, edesc, req);
23078 +static int caam_rsa_dec(struct akcipher_request *req)
23080 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23081 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23082 + struct caam_rsa_key *key = &ctx->key;
23085 + if (unlikely(!key->n || !key->d))
23088 + if (req->dst_len < key->n_sz) {
23089 + req->dst_len = key->n_sz;
23090 + dev_err(ctx->dev, "Output buffer length less than parameter n\n");
23091 + return -EOVERFLOW;
23094 + if (key->priv_form == FORM3)
23095 + ret = caam_rsa_dec_priv_f3(req);
23096 + else if (key->priv_form == FORM2)
23097 + ret = caam_rsa_dec_priv_f2(req);
23099 + ret = caam_rsa_dec_priv_f1(req);
23104 static void caam_rsa_free_key(struct caam_rsa_key *key)
23111 + kzfree(key->qinv);
23112 + kzfree(key->tmp1);
23113 + kzfree(key->tmp2);
23122 + memset(key, 0, sizeof(*key));
23125 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
23127 + while (!**ptr && *nbytes) {
23134 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
23135 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
23136 + * BER-encoding requires that the minimum number of bytes be used to encode the
23137 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
23140 + * @ptr : pointer to {dP, dQ, qInv} CRT member
23141 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
23142 + * @dstlen: length in bytes of corresponding p or q prime factor
23144 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
23148 + caam_rsa_drop_leading_zeros(&ptr, &nbytes);
23152 + dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
23156 + memcpy(dst + (dstlen - nbytes), ptr, nbytes);
23162 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(con
23166 - while (!*buf && *nbytes) {
23170 + caam_rsa_drop_leading_zeros(&buf, nbytes);
23174 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
23176 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct c
23177 unsigned int keylen)
23179 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23180 - struct rsa_key raw_key = {0};
23181 + struct rsa_key raw_key = {NULL};
23182 struct caam_rsa_key *rsa_key = &ctx->key;
23185 @@ -437,11 +798,69 @@ err:
23189 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
23190 + struct rsa_key *raw_key)
23192 + struct caam_rsa_key *rsa_key = &ctx->key;
23193 + size_t p_sz = raw_key->p_sz;
23194 + size_t q_sz = raw_key->q_sz;
23196 + rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
23199 + rsa_key->p_sz = p_sz;
23201 + rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
23204 + rsa_key->q_sz = q_sz;
23206 + rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
23207 + if (!rsa_key->tmp1)
23210 + rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
23211 + if (!rsa_key->tmp2)
23214 + rsa_key->priv_form = FORM2;
23216 + rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
23217 + if (!rsa_key->dp)
23220 + rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
23221 + if (!rsa_key->dq)
23224 + rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
23226 + if (!rsa_key->qinv)
23229 + rsa_key->priv_form = FORM3;
23234 + kzfree(rsa_key->dq);
23236 + kzfree(rsa_key->dp);
23238 + kzfree(rsa_key->tmp2);
23240 + kzfree(rsa_key->tmp1);
23242 + kzfree(rsa_key->q);
23244 + kzfree(rsa_key->p);
23247 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
23248 unsigned int keylen)
23250 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23251 - struct rsa_key raw_key = {0};
23252 + struct rsa_key raw_key = {NULL};
23253 struct caam_rsa_key *rsa_key = &ctx->key;
23256 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct
23257 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
23258 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
23260 + caam_rsa_set_priv_key_form(ctx, &raw_key);
23265 --- a/drivers/crypto/caam/caampkc.h
23266 +++ b/drivers/crypto/caam/caampkc.h
23267 @@ -13,21 +13,75 @@
23271 + * caam_priv_key_form - CAAM RSA private key representation
23272 + * CAAM RSA private key may have either of three forms.
23274 + * 1. The first representation consists of the pair (n, d), where the
23275 + * components have the following meanings:
23276 + * n the RSA modulus
23277 + * d the RSA private exponent
23279 + * 2. The second representation consists of the triplet (p, q, d), where the
23280 + * components have the following meanings:
23281 + * p the first prime factor of the RSA modulus n
23282 + * q the second prime factor of the RSA modulus n
23283 + * d the RSA private exponent
23285 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
23286 + * where the components have the following meanings:
23287 + * p the first prime factor of the RSA modulus n
23288 + * q the second prime factor of the RSA modulus n
23289 + * dP the first factors's CRT exponent
23290 + * dQ the second factors's CRT exponent
23291 + * qInv the (first) CRT coefficient
23293 + * The benefit of using the third or the second key form is lower computational
23294 + * cost for the decryption and signature operations.
23296 +enum caam_priv_key_form {
23303 * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
23304 * @n : RSA modulus raw byte stream
23305 * @e : RSA public exponent raw byte stream
23306 * @d : RSA private exponent raw byte stream
23307 + * @p : RSA prime factor p of RSA modulus n
23308 + * @q : RSA prime factor q of RSA modulus n
23309 + * @dp : RSA CRT exponent of p
23310 + * @dp : RSA CRT exponent of q
23311 + * @qinv : RSA CRT coefficient
23312 + * @tmp1 : CAAM uses this temporary buffer as internal state buffer.
23313 + * It is assumed to be as long as p.
23314 + * @tmp2 : CAAM uses this temporary buffer as internal state buffer.
23315 + * It is assumed to be as long as q.
23316 * @n_sz : length in bytes of RSA modulus n
23317 * @e_sz : length in bytes of RSA public exponent
23318 * @d_sz : length in bytes of RSA private exponent
23319 + * @p_sz : length in bytes of RSA prime factor p of RSA modulus n
23320 + * @q_sz : length in bytes of RSA prime factor q of RSA modulus n
23321 + * @priv_form : CAAM RSA private key representation
23323 struct caam_rsa_key {
23339 + enum caam_priv_key_form priv_form;
23343 @@ -59,6 +113,8 @@ struct rsa_edesc {
23345 struct rsa_pub_pdb pub;
23346 struct rsa_priv_f1_pdb priv_f1;
23347 + struct rsa_priv_f2_pdb priv_f2;
23348 + struct rsa_priv_f3_pdb priv_f3;
23352 @@ -66,5 +122,7 @@ struct rsa_edesc {
23353 /* Descriptor construction primitives. */
23354 void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
23355 void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
23356 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
23357 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
23360 --- a/drivers/crypto/caam/caamrng.c
23361 +++ b/drivers/crypto/caam/caamrng.c
23364 /* length of descriptors */
23365 #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
23366 -#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
23367 +#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
23369 /* Buffer, its dma address and lock */
23371 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrde
23373 struct buf_data *bd;
23375 - bd = (struct buf_data *)((char *)desc -
23376 - offsetof(struct buf_data, hw_desc));
23377 + bd = container_of(desc, struct buf_data, hw_desc[0]);
23380 caam_jr_strstatus(jrdev, err);
23381 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(str
23383 init_sh_desc(desc, HDR_SHARE_SERIAL);
23385 - /* Propagate errors from shared to job descriptor */
23386 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
23388 /* Generate random bytes */
23389 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
23391 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng
23395 - err = caam_init_buf(ctx, 1);
23400 + return caam_init_buf(ctx, 1);
23403 static struct hwrng caam_rng = {
23404 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
23405 pr_err("Job Ring Device allocation for transform failed\n");
23406 return PTR_ERR(dev);
23408 - rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
23409 + rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
23412 goto free_caam_alloc;
23413 --- a/drivers/crypto/caam/compat.h
23414 +++ b/drivers/crypto/caam/compat.h
23416 #include <linux/of_platform.h>
23417 #include <linux/dma-mapping.h>
23418 #include <linux/io.h>
23419 +#include <linux/iommu.h>
23420 #include <linux/spinlock.h>
23421 #include <linux/rtnetlink.h>
23422 #include <linux/in.h>
23423 --- a/drivers/crypto/caam/ctrl.c
23424 +++ b/drivers/crypto/caam/ctrl.c
23426 * Controller-level driver, kernel property detection, initialization
23428 * Copyright 2008-2012 Freescale Semiconductor, Inc.
23429 + * Copyright 2017 NXP
23432 #include <linux/device.h>
23433 #include <linux/of_address.h>
23434 #include <linux/of_irq.h>
23435 +#include <linux/sys_soc.h>
23437 #include "compat.h"
23439 #include "intern.h"
23441 #include "desc_constr.h"
23442 -#include "error.h"
23445 bool caam_little_end;
23446 EXPORT_SYMBOL(caam_little_end);
23448 +EXPORT_SYMBOL(caam_imx);
23450 +EXPORT_SYMBOL(caam_dpaa2);
23452 +#ifdef CONFIG_CAAM_QI
23457 * i.MX targets tend to have clock control subsystems that can
23458 * enable/disable clocking to our device.
23460 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
23461 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
23464 - return devm_clk_get(dev, clk_name);
23467 static inline struct clk *caam_drv_identify_clk(struct device *dev,
23471 + return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
23476 * Descriptor to instantiate RNG State Handle 0 in normal mode and
23477 @@ -274,7 +275,7 @@ static int deinstantiate_rng(struct devi
23479 * If the corresponding bit is set, then it means the state
23480 * handle was initialized by us, and thus it needs to be
23481 - * deintialized as well
23482 + * deinitialized as well
23484 if ((1 << sh_idx) & state_handle_mask) {
23486 @@ -307,20 +308,24 @@ static int caam_remove(struct platform_d
23487 struct device *ctrldev;
23488 struct caam_drv_private *ctrlpriv;
23489 struct caam_ctrl __iomem *ctrl;
23492 ctrldev = &pdev->dev;
23493 ctrlpriv = dev_get_drvdata(ctrldev);
23494 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
23496 - /* Remove platform devices for JobRs */
23497 - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
23498 - if (ctrlpriv->jrpdev[ring])
23499 - of_device_unregister(ctrlpriv->jrpdev[ring]);
23501 + /* Remove platform devices under the crypto node */
23502 + of_platform_depopulate(ctrldev);
23504 +#ifdef CONFIG_CAAM_QI
23505 + if (ctrlpriv->qidev)
23506 + caam_qi_shutdown(ctrlpriv->qidev);
23509 - /* De-initialize RNG state handles initialized by this driver. */
23510 - if (ctrlpriv->rng4_sh_init)
23512 + * De-initialize RNG state handles initialized by this driver.
23513 + * In case of DPAA 2.x, RNG is managed by MC firmware.
23515 + if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
23516 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
23518 /* Shut down debug views */
23519 @@ -335,8 +340,8 @@ static int caam_remove(struct platform_d
23520 clk_disable_unprepare(ctrlpriv->caam_ipg);
23521 clk_disable_unprepare(ctrlpriv->caam_mem);
23522 clk_disable_unprepare(ctrlpriv->caam_aclk);
23523 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23525 + if (ctrlpriv->caam_emi_slow)
23526 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23530 @@ -370,11 +375,8 @@ static void kick_trng(struct platform_de
23532 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
23533 >> RTSDCTL_ENT_DLY_SHIFT;
23534 - if (ent_delay <= val) {
23535 - /* put RNG4 into run mode */
23536 - clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
23539 + if (ent_delay <= val)
23542 val = rd_reg32(&r4tst->rtsdctl);
23543 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
23544 @@ -386,15 +388,12 @@ static void kick_trng(struct platform_de
23545 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
23546 /* read the control register */
23547 val = rd_reg32(&r4tst->rtmctl);
23550 * select raw sampling in both entropy shifter
23551 - * and statistical checker
23552 + * and statistical checker; ; put RNG4 into run mode
23554 - clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
23555 - /* put RNG4 into run mode */
23556 - clrsetbits_32(&val, RTMCTL_PRGM, 0);
23557 - /* write back the control register */
23558 - wr_reg32(&r4tst->rtmctl, val);
23559 + clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
23563 @@ -415,28 +414,26 @@ int caam_get_era(void)
23565 EXPORT_SYMBOL(caam_get_era);
23567 -#ifdef CONFIG_DEBUG_FS
23568 -static int caam_debugfs_u64_get(void *data, u64 *val)
23570 - *val = caam64_to_cpu(*(u64 *)data);
23574 -static int caam_debugfs_u32_get(void *data, u64 *val)
23576 - *val = caam32_to_cpu(*(u32 *)data);
23580 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
23581 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
23583 +static const struct of_device_id caam_match[] = {
23585 + .compatible = "fsl,sec-v4.0",
23588 + .compatible = "fsl,sec4.0",
23592 +MODULE_DEVICE_TABLE(of, caam_match);
23594 /* Probe routine for CAAM top (controller) level */
23595 static int caam_probe(struct platform_device *pdev)
23597 - int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
23598 + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
23600 + static const struct soc_device_attribute imx_soc[] = {
23601 + {.family = "Freescale i.MX"},
23604 struct device *dev;
23605 struct device_node *nprop, *np;
23606 struct caam_ctrl __iomem *ctrl;
23607 @@ -456,9 +453,10 @@ static int caam_probe(struct platform_de
23610 dev_set_drvdata(dev, ctrlpriv);
23611 - ctrlpriv->pdev = pdev;
23612 nprop = pdev->dev.of_node;
23614 + caam_imx = (bool)soc_device_match(imx_soc);
23616 /* Enable clocking */
23617 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
23619 @@ -487,14 +485,16 @@ static int caam_probe(struct platform_de
23621 ctrlpriv->caam_aclk = clk;
23623 - clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
23624 - if (IS_ERR(clk)) {
23625 - ret = PTR_ERR(clk);
23626 - dev_err(&pdev->dev,
23627 - "can't identify CAAM emi_slow clk: %d\n", ret);
23629 + if (!of_machine_is_compatible("fsl,imx6ul")) {
23630 + clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
23631 + if (IS_ERR(clk)) {
23632 + ret = PTR_ERR(clk);
23633 + dev_err(&pdev->dev,
23634 + "can't identify CAAM emi_slow clk: %d\n", ret);
23637 + ctrlpriv->caam_emi_slow = clk;
23639 - ctrlpriv->caam_emi_slow = clk;
23641 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
23643 @@ -515,11 +515,13 @@ static int caam_probe(struct platform_de
23644 goto disable_caam_mem;
23647 - ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
23649 - dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
23651 - goto disable_caam_aclk;
23652 + if (ctrlpriv->caam_emi_slow) {
23653 + ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
23655 + dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
23657 + goto disable_caam_aclk;
23661 /* Get configuration properties from device tree */
23662 @@ -546,13 +548,13 @@ static int caam_probe(struct platform_de
23664 BLOCK_OFFSET = PG_SIZE_64K;
23666 - ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
23667 - ctrlpriv->assure = (struct caam_assurance __force *)
23668 - ((uint8_t *)ctrl +
23669 + ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
23670 + ctrlpriv->assure = (struct caam_assurance __iomem __force *)
23671 + ((__force uint8_t *)ctrl +
23672 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
23674 - ctrlpriv->deco = (struct caam_deco __force *)
23675 - ((uint8_t *)ctrl +
23676 + ctrlpriv->deco = (struct caam_deco __iomem __force *)
23677 + ((__force uint8_t *)ctrl +
23678 BLOCK_OFFSET * DECO_BLOCK_NUMBER
23681 @@ -561,12 +563,17 @@ static int caam_probe(struct platform_de
23684 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
23685 - * long pointers in master configuration register
23686 + * long pointers in master configuration register.
23687 + * In case of DPAA 2.x, Management Complex firmware performs
23688 + * the configuration.
23690 - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
23691 - MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
23692 - MCFGR_WDENABLE | MCFGR_LARGE_BURST |
23693 - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
23694 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
23696 + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
23697 + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
23698 + MCFGR_WDENABLE | MCFGR_LARGE_BURST |
23699 + (sizeof(dma_addr_t) == sizeof(u64) ?
23700 + MCFGR_LONG_PTR : 0));
23703 * Read the Compile Time paramters and SCFGR to determine
23704 @@ -594,64 +601,69 @@ static int caam_probe(struct platform_de
23705 JRSTART_JR1_START | JRSTART_JR2_START |
23706 JRSTART_JR3_START);
23708 - if (sizeof(dma_addr_t) == sizeof(u64))
23709 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
23710 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
23711 + if (sizeof(dma_addr_t) == sizeof(u64)) {
23713 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
23714 + else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
23715 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
23717 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
23719 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
23720 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
23722 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
23725 + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
23726 + goto iounmap_ctrl;
23730 - * Detect and enable JobRs
23731 - * First, find out how many ring spec'ed, allocate references
23732 - * for all, then go probe each one.
23735 - for_each_available_child_of_node(nprop, np)
23736 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
23737 - of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
23739 + ctrlpriv->era = caam_get_era();
23741 - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
23742 - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
23743 - if (ctrlpriv->jrpdev == NULL) {
23745 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
23747 + dev_err(dev, "JR platform devices creation error\n");
23751 +#ifdef CONFIG_DEBUG_FS
23753 + * FIXME: needs better naming distinction, as some amalgamation of
23754 + * "caam" and nprop->full_name. The OF name isn't distinctive,
23755 + * but does separate instances
23757 + perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
23759 + ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
23760 + ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
23763 - ctrlpriv->total_jobrs = 0;
23764 for_each_available_child_of_node(nprop, np)
23765 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
23766 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
23767 - ctrlpriv->jrpdev[ring] =
23768 - of_platform_device_create(np, NULL, dev);
23769 - if (!ctrlpriv->jrpdev[ring]) {
23770 - pr_warn("JR%d Platform device creation error\n",
23774 - ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
23775 - ((uint8_t *)ctrl +
23776 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
23777 + ((__force uint8_t *)ctrl +
23778 (ring + JR_BLOCK_NUMBER) *
23781 ctrlpriv->total_jobrs++;
23786 - /* Check to see if QI present. If so, enable */
23787 - ctrlpriv->qi_present =
23788 - !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
23789 - CTPR_MS_QI_MASK);
23790 - if (ctrlpriv->qi_present) {
23791 - ctrlpriv->qi = (struct caam_queue_if __force *)
23792 - ((uint8_t *)ctrl +
23793 + /* Check to see if (DPAA 1.x) QI present. If so, enable */
23794 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
23795 + if (ctrlpriv->qi_present && !caam_dpaa2) {
23796 + ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
23797 + ((__force uint8_t *)ctrl +
23798 BLOCK_OFFSET * QI_BLOCK_NUMBER
23800 /* This is all that's required to physically enable QI */
23801 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
23803 + /* If QMAN driver is present, init CAAM-QI backend */
23804 +#ifdef CONFIG_CAAM_QI
23805 + ret = caam_qi_init(pdev);
23807 + dev_err(dev, "caam qi i/f init failed: %d\n", ret);
23811 /* If no QI and no rings specified, quit and go home */
23812 @@ -666,8 +678,10 @@ static int caam_probe(struct platform_de
23814 * If SEC has RNG version >= 4 and RNG state handle has not been
23815 * already instantiated, do RNG instantiation
23816 + * In case of DPAA 2.x, RNG is managed by MC firmware.
23818 - if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
23819 + if (!caam_dpaa2 &&
23820 + (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
23821 ctrlpriv->rng4_sh_init =
23822 rd_reg32(&ctrl->r4tst[0].rdsta);
23824 @@ -734,78 +748,47 @@ static int caam_probe(struct platform_de
23826 /* Report "alive" for developer to see */
23827 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
23829 - dev_info(dev, "job rings = %d, qi = %d\n",
23830 - ctrlpriv->total_jobrs, ctrlpriv->qi_present);
23832 + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
23833 + ctrlpriv->total_jobrs, ctrlpriv->qi_present,
23834 + caam_dpaa2 ? "yes" : "no");
23836 #ifdef CONFIG_DEBUG_FS
23838 - * FIXME: needs better naming distinction, as some amalgamation of
23839 - * "caam" and nprop->full_name. The OF name isn't distinctive,
23840 - * but does separate instances
23842 - perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
23844 - ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
23845 - ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
23847 - /* Controller-level - performance monitor counters */
23849 - ctrlpriv->ctl_rq_dequeued =
23850 - debugfs_create_file("rq_dequeued",
23851 - S_IRUSR | S_IRGRP | S_IROTH,
23852 - ctrlpriv->ctl, &perfmon->req_dequeued,
23853 - &caam_fops_u64_ro);
23854 - ctrlpriv->ctl_ob_enc_req =
23855 - debugfs_create_file("ob_rq_encrypted",
23856 - S_IRUSR | S_IRGRP | S_IROTH,
23857 - ctrlpriv->ctl, &perfmon->ob_enc_req,
23858 - &caam_fops_u64_ro);
23859 - ctrlpriv->ctl_ib_dec_req =
23860 - debugfs_create_file("ib_rq_decrypted",
23861 - S_IRUSR | S_IRGRP | S_IROTH,
23862 - ctrlpriv->ctl, &perfmon->ib_dec_req,
23863 - &caam_fops_u64_ro);
23864 - ctrlpriv->ctl_ob_enc_bytes =
23865 - debugfs_create_file("ob_bytes_encrypted",
23866 - S_IRUSR | S_IRGRP | S_IROTH,
23867 - ctrlpriv->ctl, &perfmon->ob_enc_bytes,
23868 - &caam_fops_u64_ro);
23869 - ctrlpriv->ctl_ob_prot_bytes =
23870 - debugfs_create_file("ob_bytes_protected",
23871 - S_IRUSR | S_IRGRP | S_IROTH,
23872 - ctrlpriv->ctl, &perfmon->ob_prot_bytes,
23873 - &caam_fops_u64_ro);
23874 - ctrlpriv->ctl_ib_dec_bytes =
23875 - debugfs_create_file("ib_bytes_decrypted",
23876 - S_IRUSR | S_IRGRP | S_IROTH,
23877 - ctrlpriv->ctl, &perfmon->ib_dec_bytes,
23878 - &caam_fops_u64_ro);
23879 - ctrlpriv->ctl_ib_valid_bytes =
23880 - debugfs_create_file("ib_bytes_validated",
23881 - S_IRUSR | S_IRGRP | S_IROTH,
23882 - ctrlpriv->ctl, &perfmon->ib_valid_bytes,
23883 - &caam_fops_u64_ro);
23884 + debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
23885 + ctrlpriv->ctl, &perfmon->req_dequeued,
23886 + &caam_fops_u64_ro);
23887 + debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
23888 + ctrlpriv->ctl, &perfmon->ob_enc_req,
23889 + &caam_fops_u64_ro);
23890 + debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
23891 + ctrlpriv->ctl, &perfmon->ib_dec_req,
23892 + &caam_fops_u64_ro);
23893 + debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
23894 + ctrlpriv->ctl, &perfmon->ob_enc_bytes,
23895 + &caam_fops_u64_ro);
23896 + debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
23897 + ctrlpriv->ctl, &perfmon->ob_prot_bytes,
23898 + &caam_fops_u64_ro);
23899 + debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
23900 + ctrlpriv->ctl, &perfmon->ib_dec_bytes,
23901 + &caam_fops_u64_ro);
23902 + debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
23903 + ctrlpriv->ctl, &perfmon->ib_valid_bytes,
23904 + &caam_fops_u64_ro);
23906 /* Controller level - global status values */
23907 - ctrlpriv->ctl_faultaddr =
23908 - debugfs_create_file("fault_addr",
23909 - S_IRUSR | S_IRGRP | S_IROTH,
23910 - ctrlpriv->ctl, &perfmon->faultaddr,
23911 - &caam_fops_u32_ro);
23912 - ctrlpriv->ctl_faultdetail =
23913 - debugfs_create_file("fault_detail",
23914 - S_IRUSR | S_IRGRP | S_IROTH,
23915 - ctrlpriv->ctl, &perfmon->faultdetail,
23916 - &caam_fops_u32_ro);
23917 - ctrlpriv->ctl_faultstatus =
23918 - debugfs_create_file("fault_status",
23919 - S_IRUSR | S_IRGRP | S_IROTH,
23920 - ctrlpriv->ctl, &perfmon->status,
23921 - &caam_fops_u32_ro);
23922 + debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
23923 + ctrlpriv->ctl, &perfmon->faultaddr,
23924 + &caam_fops_u32_ro);
23925 + debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
23926 + ctrlpriv->ctl, &perfmon->faultdetail,
23927 + &caam_fops_u32_ro);
23928 + debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
23929 + ctrlpriv->ctl, &perfmon->status,
23930 + &caam_fops_u32_ro);
23932 /* Internal covering keys (useful in non-secure mode only) */
23933 - ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
23934 + ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
23935 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23936 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
23938 @@ -813,7 +796,7 @@ static int caam_probe(struct platform_de
23940 &ctrlpriv->ctl_kek_wrap);
23942 - ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
23943 + ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
23944 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23945 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
23947 @@ -821,7 +804,7 @@ static int caam_probe(struct platform_de
23949 &ctrlpriv->ctl_tkek_wrap);
23951 - ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
23952 + ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
23953 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23954 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
23956 @@ -832,13 +815,17 @@ static int caam_probe(struct platform_de
23960 +#ifdef CONFIG_DEBUG_FS
23961 + debugfs_remove_recursive(ctrlpriv->dfs_root);
23968 disable_caam_emi_slow:
23969 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23970 + if (ctrlpriv->caam_emi_slow)
23971 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23973 clk_disable_unprepare(ctrlpriv->caam_aclk);
23975 @@ -848,17 +835,6 @@ disable_caam_ipg:
23979 -static struct of_device_id caam_match[] = {
23981 - .compatible = "fsl,sec-v4.0",
23984 - .compatible = "fsl,sec4.0",
23988 -MODULE_DEVICE_TABLE(of, caam_match);
23990 static struct platform_driver caam_driver = {
23993 --- a/drivers/crypto/caam/ctrl.h
23994 +++ b/drivers/crypto/caam/ctrl.h
23996 /* Prototypes for backend-level services exposed to APIs */
23997 int caam_get_era(void);
23999 +extern bool caam_dpaa2;
24001 #endif /* CTRL_H */
24002 --- a/drivers/crypto/caam/desc.h
24003 +++ b/drivers/crypto/caam/desc.h
24005 #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
24006 #define SEC4_SG_OFFSET_MASK 0x00001fff
24008 -struct sec4_sg_entry {
24014 /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
24015 #define MAX_CAAM_DESCSIZE 64
24017 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
24018 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
24019 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
24020 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
24021 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
24022 #define CMD_STORE (0x0a << CMD_SHIFT)
24023 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
24024 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
24025 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
24026 #define HDR_ZRO 0x00008000
24028 /* Start Index or SharedDesc Length */
24029 -#define HDR_START_IDX_MASK 0x3f
24030 #define HDR_START_IDX_SHIFT 16
24031 +#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
24033 /* If shared descriptor header, 6-bit length */
24034 #define HDR_DESCLEN_SHR_MASK 0x3f
24035 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
24036 #define HDR_PROP_DNR 0x00000800
24038 /* JobDesc/SharedDesc share property */
24039 -#define HDR_SD_SHARE_MASK 0x03
24040 #define HDR_SD_SHARE_SHIFT 8
24041 -#define HDR_JD_SHARE_MASK 0x07
24042 +#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
24043 #define HDR_JD_SHARE_SHIFT 8
24044 +#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
24046 #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
24047 #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
24048 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
24049 #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
24050 #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
24051 #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
24052 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
24053 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
24054 #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
24055 #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
24056 #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
24057 @@ -360,6 +355,7 @@ struct sec4_sg_entry {
24058 #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
24059 #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
24060 #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
24061 +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
24063 /* Other types. Need to OR in last/flush bits as desired */
24064 #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
24065 @@ -400,7 +396,7 @@ struct sec4_sg_entry {
24066 #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
24067 #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
24068 #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
24069 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
24070 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
24071 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
24072 #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
24073 #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
24074 @@ -413,6 +409,7 @@ struct sec4_sg_entry {
24075 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
24076 #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
24077 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
24078 +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
24079 #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
24082 @@ -449,6 +446,18 @@ struct sec4_sg_entry {
24083 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
24084 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
24085 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
24086 +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
24087 +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
24088 +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
24089 +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
24090 +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
24091 +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
24092 +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
24093 +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
24094 +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
24095 +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
24096 +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
24097 +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
24099 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
24100 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
24101 @@ -1098,6 +1107,22 @@ struct sec4_sg_entry {
24102 /* MacSec protinfos */
24103 #define OP_PCL_MACSEC 0x0001
24105 +/* Derived Key Protocol (DKP) Protinfo */
24106 +#define OP_PCL_DKP_SRC_SHIFT 14
24107 +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
24108 +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
24109 +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
24110 +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
24111 +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
24112 +#define OP_PCL_DKP_DST_SHIFT 12
24113 +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
24114 +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
24115 +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
24116 +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
24117 +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
24118 +#define OP_PCL_DKP_KEY_SHIFT 0
24119 +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
24121 /* PKI unidirectional protocol protinfo bits */
24122 #define OP_PCL_PKPROT_TEST 0x0008
24123 #define OP_PCL_PKPROT_DECRYPT 0x0004
24124 @@ -1107,8 +1132,8 @@ struct sec4_sg_entry {
24125 /* For non-protocol/alg-only op commands */
24126 #define OP_ALG_TYPE_SHIFT 24
24127 #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
24128 -#define OP_ALG_TYPE_CLASS1 2
24129 -#define OP_ALG_TYPE_CLASS2 4
24130 +#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
24131 +#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
24133 #define OP_ALG_ALGSEL_SHIFT 16
24134 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
24135 @@ -1249,7 +1274,7 @@ struct sec4_sg_entry {
24136 #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
24138 /* PKHA mode copy-memory functions */
24139 -#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
24140 +#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
24141 #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
24142 #define OP_ALG_PKMODE_DST_REG_SHIFT 10
24143 #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
24144 @@ -1445,10 +1470,11 @@ struct sec4_sg_entry {
24145 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
24146 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
24147 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
24148 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
24149 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
24150 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
24151 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
24152 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
24153 +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
24155 /* Destination selectors */
24156 #define MATH_DEST_SHIFT 8
24157 @@ -1457,6 +1483,7 @@ struct sec4_sg_entry {
24158 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
24159 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
24160 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
24161 +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
24162 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
24163 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
24164 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
24165 @@ -1629,4 +1656,31 @@ struct sec4_sg_entry {
24166 /* Frame Descriptor Command for Replacement Job Descriptor */
24167 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
24169 +/* CHA Control Register bits */
24170 +#define CCTRL_RESET_CHA_ALL 0x1
24171 +#define CCTRL_RESET_CHA_AESA 0x2
24172 +#define CCTRL_RESET_CHA_DESA 0x4
24173 +#define CCTRL_RESET_CHA_AFHA 0x8
24174 +#define CCTRL_RESET_CHA_KFHA 0x10
24175 +#define CCTRL_RESET_CHA_SF8A 0x20
24176 +#define CCTRL_RESET_CHA_PKHA 0x40
24177 +#define CCTRL_RESET_CHA_MDHA 0x80
24178 +#define CCTRL_RESET_CHA_CRCA 0x100
24179 +#define CCTRL_RESET_CHA_RNG 0x200
24180 +#define CCTRL_RESET_CHA_SF9A 0x400
24181 +#define CCTRL_RESET_CHA_ZUCE 0x800
24182 +#define CCTRL_RESET_CHA_ZUCA 0x1000
24183 +#define CCTRL_UNLOAD_PK_A0 0x10000
24184 +#define CCTRL_UNLOAD_PK_A1 0x20000
24185 +#define CCTRL_UNLOAD_PK_A2 0x40000
24186 +#define CCTRL_UNLOAD_PK_A3 0x80000
24187 +#define CCTRL_UNLOAD_PK_B0 0x100000
24188 +#define CCTRL_UNLOAD_PK_B1 0x200000
24189 +#define CCTRL_UNLOAD_PK_B2 0x400000
24190 +#define CCTRL_UNLOAD_PK_B3 0x800000
24191 +#define CCTRL_UNLOAD_PK_N 0x1000000
24192 +#define CCTRL_UNLOAD_PK_A 0x4000000
24193 +#define CCTRL_UNLOAD_PK_B 0x8000000
24194 +#define CCTRL_UNLOAD_SBOX 0x10000000
24196 #endif /* DESC_H */
24197 --- a/drivers/crypto/caam/desc_constr.h
24198 +++ b/drivers/crypto/caam/desc_constr.h
24200 * Copyright 2008-2012 Freescale Semiconductor, Inc.
24203 +#ifndef DESC_CONSTR_H
24204 +#define DESC_CONSTR_H
24209 @@ -33,38 +36,39 @@
24211 extern bool caam_little_end;
24213 -static inline int desc_len(u32 *desc)
24214 +static inline int desc_len(u32 * const desc)
24216 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
24219 -static inline int desc_bytes(void *desc)
24220 +static inline int desc_bytes(void * const desc)
24222 return desc_len(desc) * CAAM_CMD_SZ;
24225 -static inline u32 *desc_end(u32 *desc)
24226 +static inline u32 *desc_end(u32 * const desc)
24228 return desc + desc_len(desc);
24231 -static inline void *sh_desc_pdb(u32 *desc)
24232 +static inline void *sh_desc_pdb(u32 * const desc)
24237 -static inline void init_desc(u32 *desc, u32 options)
24238 +static inline void init_desc(u32 * const desc, u32 options)
24240 *desc = cpu_to_caam32((options | HDR_ONE) + 1);
24243 -static inline void init_sh_desc(u32 *desc, u32 options)
24244 +static inline void init_sh_desc(u32 * const desc, u32 options)
24247 init_desc(desc, CMD_SHARED_DESC_HDR | options);
24250 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
24251 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
24252 + size_t pdb_bytes)
24254 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
24256 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32
24260 -static inline void init_job_desc(u32 *desc, u32 options)
24261 +static inline void init_job_desc(u32 * const desc, u32 options)
24263 init_desc(desc, CMD_DESC_HDR | options);
24266 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
24267 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
24268 + size_t pdb_bytes)
24270 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
24272 init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
24275 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
24276 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
24278 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
24280 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc,
24281 CAAM_PTR_SZ / CAAM_CMD_SZ);
24284 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
24286 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
24287 + int len, u32 options)
24290 init_job_desc(desc, HDR_SHARED | options |
24291 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(
24292 append_ptr(desc, ptr);
24295 -static inline void append_data(u32 *desc, void *data, int len)
24296 +static inline void append_data(u32 * const desc, const void *data, int len)
24298 u32 *offset = desc_end(desc);
24300 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc
24301 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
24304 -static inline void append_cmd(u32 *desc, u32 command)
24305 +static inline void append_cmd(u32 * const desc, u32 command)
24307 u32 *cmd = desc_end(desc);
24309 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc,
24311 #define append_u32 append_cmd
24313 -static inline void append_u64(u32 *desc, u64 data)
24314 +static inline void append_u64(u32 * const desc, u64 data)
24316 u32 *offset = desc_end(desc);
24318 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc,
24321 /* Write command without affecting header, and return pointer to next word */
24322 -static inline u32 *write_cmd(u32 *desc, u32 command)
24323 +static inline u32 *write_cmd(u32 * const desc, u32 command)
24325 *desc = cpu_to_caam32(command);
24330 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
24331 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
24334 append_cmd(desc, command | len);
24335 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *d
24338 /* Write length after pointer, rather than inside command */
24339 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
24340 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
24341 unsigned int len, u32 command)
24343 append_cmd(desc, command);
24344 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen
24345 append_cmd(desc, len);
24348 -static inline void append_cmd_data(u32 *desc, void *data, int len,
24349 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
24352 append_cmd(desc, command | IMMEDIATE | len);
24353 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *
24356 #define APPEND_CMD_RET(cmd, op) \
24357 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
24358 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
24360 u32 *cmd = desc_end(desc); \
24362 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *des
24364 APPEND_CMD_RET(jump, JUMP)
24365 APPEND_CMD_RET(move, MOVE)
24366 +APPEND_CMD_RET(moveb, MOVEB)
24368 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
24369 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
24371 *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
24372 (desc_len(desc) - (jump_cmd - desc)));
24375 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
24376 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
24378 u32 val = caam32_to_cpu(*move_cmd);
24380 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32
24383 #define APPEND_CMD(cmd, op) \
24384 -static inline void append_##cmd(u32 *desc, u32 options) \
24385 +static inline void append_##cmd(u32 * const desc, u32 options) \
24388 append_cmd(desc, CMD_##op | options); \
24389 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *des
24390 APPEND_CMD(operation, OPERATION)
24392 #define APPEND_CMD_LEN(cmd, op) \
24393 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
24394 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
24398 append_cmd(desc, CMD_##op | len | options); \
24399 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_L
24400 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
24402 #define APPEND_CMD_PTR(cmd, op) \
24403 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
24405 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
24406 + unsigned int len, u32 options) \
24409 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
24410 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
24411 APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
24412 APPEND_CMD_PTR(fifo_store, FIFO_STORE)
24414 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
24416 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
24417 + unsigned int len, u32 options)
24421 @@ -249,7 +256,8 @@ static inline void append_store(u32 *des
24424 #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
24425 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
24426 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
24427 + dma_addr_t ptr, \
24428 unsigned int len, \
24431 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
24432 APPEND_SEQ_PTR_INTLEN(out, OUT)
24434 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
24435 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
24436 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
24437 unsigned int len, u32 options) \
24440 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
24441 APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
24443 #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
24444 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
24445 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
24446 unsigned int len, u32 options) \
24449 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_O
24450 * the size of its type
24452 #define APPEND_CMD_PTR_LEN(cmd, op, type) \
24453 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
24454 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
24455 type len, u32 options) \
24458 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
24459 * from length of immediate data provided, e.g., split keys
24461 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
24462 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
24463 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
24464 unsigned int data_len, \
24465 unsigned int len, u32 options) \
24467 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm
24468 APPEND_CMD_PTR_TO_IMM2(key, KEY);
24470 #define APPEND_CMD_RAW_IMM(cmd, op, type) \
24471 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
24472 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
24476 @@ -426,3 +434,107 @@ do { \
24477 APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
24478 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
24479 APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
24482 + * struct alginfo - Container for algorithm details
24483 + * @algtype: algorithm selector; for valid values, see documentation of the
24484 + * functions where it is used.
24485 + * @keylen: length of the provided algorithm key, in bytes
24486 + * @keylen_pad: padded length of the provided algorithm key, in bytes
24487 + * @key: address where algorithm key resides; virtual address if key_inline
24488 + * is true, dma (bus) address if key_inline is false.
24489 + * @key_inline: true - key can be inlined in the descriptor; false - key is
24490 + * referenced by the descriptor
24494 + unsigned int keylen;
24495 + unsigned int keylen_pad;
24497 + dma_addr_t key_dma;
24498 + const void *key_virt;
24504 + * desc_inline_query() - Provide indications on which data items can be inlined
24505 + * and which shall be referenced in a shared descriptor.
24506 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
24507 + * excluding the data items to be inlined (or corresponding
24508 + * pointer if an item is not inlined). Each cnstr_* function that
24509 + * generates descriptors should have a define mentioning
24510 + * corresponding length.
24511 + * @jd_len: Maximum length of the job descriptor(s) that will be used
24512 + * together with the shared descriptor.
24513 + * @data_len: Array of lengths of the data items trying to be inlined
24514 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
24516 + * @count: Number of data items (size of @data_len array); must be <= 32
24518 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
24519 + * check @inl_mask for details.
24521 +static inline int desc_inline_query(unsigned int sd_base_len,
24522 + unsigned int jd_len, unsigned int *data_len,
24523 + u32 *inl_mask, unsigned int count)
24525 + int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
24529 + for (i = 0; (i < count) && (rem_bytes > 0); i++) {
24530 + if (rem_bytes - (int)(data_len[i] +
24531 + (count - i - 1) * CAAM_PTR_SZ) >= 0) {
24532 + rem_bytes -= data_len[i];
24533 + *inl_mask |= (1 << i);
24535 + rem_bytes -= CAAM_PTR_SZ;
24539 + return (rem_bytes >= 0) ? 0 : -1;
24543 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
24544 + * @desc: pointer to buffer used for descriptor construction
24545 + * @adata: pointer to authentication transform definitions.
24546 + * keylen should be the length of initial key, while keylen_pad
24547 + * the length of the derived (split) key.
24548 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
24549 + * SHA256, SHA384, SHA512}.
24551 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
24556 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
24557 + * to OP_PCLID_DKP_{MD5, SHA*}
24559 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
24560 + (0x20 << OP_ALG_ALGSEL_SHIFT);
24562 + if (adata->key_inline) {
24565 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
24566 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
24568 + append_data(desc, adata->key_virt, adata->keylen);
24570 + /* Reserve space in descriptor buffer for the derived key */
24571 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
24572 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
24574 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
24576 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
24577 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
24579 + append_ptr(desc, adata->key_dma);
24583 +#endif /* DESC_CONSTR_H */
24585 +++ b/drivers/crypto/caam/dpseci.c
24588 + * Copyright 2013-2016 Freescale Semiconductor Inc.
24589 + * Copyright 2017 NXP
24591 + * Redistribution and use in source and binary forms, with or without
24592 + * modification, are permitted provided that the following conditions are met:
24593 + * * Redistributions of source code must retain the above copyright
24594 + * notice, this list of conditions and the following disclaimer.
24595 + * * Redistributions in binary form must reproduce the above copyright
24596 + * notice, this list of conditions and the following disclaimer in the
24597 + * documentation and/or other materials provided with the distribution.
24598 + * * Neither the names of the above-listed copyright holders nor the
24599 + * names of any contributors may be used to endorse or promote products
24600 + * derived from this software without specific prior written permission.
24603 + * ALTERNATIVELY, this software may be distributed under the terms of the
24604 + * GNU General Public License ("GPL") as published by the Free Software
24605 + * Foundation, either version 2 of that License or (at your option) any
24608 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24609 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24610 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24611 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
24612 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24613 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24614 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24615 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24616 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24617 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24618 + * POSSIBILITY OF SUCH DAMAGE.
24621 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
24622 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
24623 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
24624 +#include "dpseci.h"
24625 +#include "dpseci_cmd.h"
24628 + * dpseci_open() - Open a control session for the specified object
24629 + * @mc_io: Pointer to MC portal's I/O object
24630 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24631 + * @dpseci_id: DPSECI unique ID
24632 + * @token: Returned token; use in subsequent API calls
24634 + * This function can be used to open a control session for an already created
24635 + * object; an object may have been declared in the DPL or by calling the
24636 + * dpseci_create() function.
24637 + * This function returns a unique authentication token, associated with the
24638 + * specific object ID and the specific MC portal; this token must be used in all
24639 + * subsequent commands for this specific object.
24641 + * Return: '0' on success, error code otherwise
24643 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
24646 + struct mc_command cmd = { 0 };
24647 + struct dpseci_cmd_open *cmd_params;
24650 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
24653 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
24654 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
24655 + err = mc_send_command(mc_io, &cmd);
24659 + *token = mc_cmd_hdr_read_token(&cmd);
24665 + * dpseci_close() - Close the control session of the object
24666 + * @mc_io: Pointer to MC portal's I/O object
24667 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24668 + * @token: Token of DPSECI object
24670 + * After this function is called, no further operations are allowed on the
24671 + * object without opening a new control session.
24673 + * Return: '0' on success, error code otherwise
24675 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24677 + struct mc_command cmd = { 0 };
24679 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
24682 + return mc_send_command(mc_io, &cmd);
24686 + * dpseci_create() - Create the DPSECI object
24687 + * @mc_io: Pointer to MC portal's I/O object
24688 + * @dprc_token: Parent container token; '0' for default container
24689 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24690 + * @cfg: Configuration structure
24691 + * @obj_id: returned object id
24693 + * Create the DPSECI object, allocate required resources and perform required
24694 + * initialization.
24696 + * The object can be created either by declaring it in the DPL file, or by
24697 + * calling this function.
24699 + * The function accepts an authentication token of a parent container that this
24700 + * object should be assigned to. The token can be '0' so the object will be
24701 + * assigned to the default container.
24702 + * The newly created object can be opened with the returned object id and using
24703 + * the container's associated tokens and MC portals.
24705 + * Return: '0' on success, error code otherwise
24707 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
24708 + const struct dpseci_cfg *cfg, u32 *obj_id)
24710 + struct mc_command cmd = { 0 };
24711 + struct dpseci_cmd_create *cmd_params;
24714 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
24717 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
24718 + for (i = 0; i < 8; i++)
24719 + cmd_params->priorities[i] = cfg->priorities[i];
24720 + cmd_params->num_tx_queues = cfg->num_tx_queues;
24721 + cmd_params->num_rx_queues = cfg->num_rx_queues;
24722 + cmd_params->options = cpu_to_le32(cfg->options);
24723 + err = mc_send_command(mc_io, &cmd);
24727 + *obj_id = mc_cmd_read_object_id(&cmd);
24733 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
24734 + * @mc_io: Pointer to MC portal's I/O object
24735 + * @dprc_token: Parent container token; '0' for default container
24736 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24737 + * @object_id: The object id; it must be a valid id within the container that
24738 + * created this object
24740 + * The function accepts the authentication token of the parent container that
24741 + * created the object (not the one that currently owns the object). The object
24742 + * is searched within parent using the provided 'object_id'.
24743 + * All tokens to the object must be closed before calling destroy.
24745 + * Return: '0' on success, error code otherwise
24747 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
24750 + struct mc_command cmd = { 0 };
24751 + struct dpseci_cmd_destroy *cmd_params;
24753 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
24756 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
24757 + cmd_params->object_id = cpu_to_le32(object_id);
24759 + return mc_send_command(mc_io, &cmd);
24763 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
24764 + * @mc_io: Pointer to MC portal's I/O object
24765 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24766 + * @token: Token of DPSECI object
24768 + * Return: '0' on success, error code otherwise
24770 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24772 + struct mc_command cmd = { 0 };
24774 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
24777 + return mc_send_command(mc_io, &cmd);
24781 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
24782 + * @mc_io: Pointer to MC portal's I/O object
24783 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24784 + * @token: Token of DPSECI object
24786 + * Return: '0' on success, error code otherwise
24788 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24790 + struct mc_command cmd = { 0 };
24792 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
24796 + return mc_send_command(mc_io, &cmd);
24800 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
24801 + * @mc_io: Pointer to MC portal's I/O object
24802 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24803 + * @token: Token of DPSECI object
24804 + * @en: Returns '1' if object is enabled; '0' otherwise
24806 + * Return: '0' on success, error code otherwise
24808 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24811 + struct mc_command cmd = { 0 };
24812 + struct dpseci_rsp_is_enabled *rsp_params;
24815 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
24818 + err = mc_send_command(mc_io, &cmd);
24822 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
24823 + *en = le32_to_cpu(rsp_params->is_enabled);
24829 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
24830 + * @mc_io: Pointer to MC portal's I/O object
24831 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24832 + * @token: Token of DPSECI object
24834 + * Return: '0' on success, error code otherwise
24836 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24838 + struct mc_command cmd = { 0 };
24840 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
24844 + return mc_send_command(mc_io, &cmd);
24848 + * dpseci_get_irq_enable() - Get overall interrupt state
24849 + * @mc_io: Pointer to MC portal's I/O object
24850 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24851 + * @token: Token of DPSECI object
24852 + * @irq_index: The interrupt index to configure
24853 + * @en: Returned Interrupt state - enable = 1, disable = 0
24855 + * Return: '0' on success, error code otherwise
24857 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24858 + u8 irq_index, u8 *en)
24860 + struct mc_command cmd = { 0 };
24861 + struct dpseci_cmd_irq_enable *cmd_params;
24862 + struct dpseci_rsp_get_irq_enable *rsp_params;
24865 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
24868 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
24869 + cmd_params->irq_index = irq_index;
24870 + err = mc_send_command(mc_io, &cmd);
24874 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
24875 + *en = rsp_params->enable_state;
24881 + * dpseci_set_irq_enable() - Set overall interrupt state.
24882 + * @mc_io: Pointer to MC portal's I/O object
24883 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24884 + * @token: Token of DPSECI object
24885 + * @irq_index: The interrupt index to configure
24886 + * @en: Interrupt state - enable = 1, disable = 0
24888 + * Allows GPP software to control when interrupts are generated.
24889 + * Each interrupt can have up to 32 causes. The enable/disable control's the
24890 + * overall interrupt state. If the interrupt is disabled no causes will cause
24893 + * Return: '0' on success, error code otherwise
24895 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24896 + u8 irq_index, u8 en)
24898 + struct mc_command cmd = { 0 };
24899 + struct dpseci_cmd_irq_enable *cmd_params;
24901 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
24904 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
24905 + cmd_params->irq_index = irq_index;
24906 + cmd_params->enable_state = en;
24908 + return mc_send_command(mc_io, &cmd);
24912 + * dpseci_get_irq_mask() - Get interrupt mask.
24913 + * @mc_io: Pointer to MC portal's I/O object
24914 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24915 + * @token: Token of DPSECI object
24916 + * @irq_index: The interrupt index to configure
24917 + * @mask: Returned event mask to trigger interrupt
24919 + * Every interrupt can have up to 32 causes and the interrupt model supports
24920 + * masking/unmasking each cause independently.
24922 + * Return: '0' on success, error code otherwise
24924 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24925 + u8 irq_index, u32 *mask)
24927 + struct mc_command cmd = { 0 };
24928 + struct dpseci_cmd_irq_mask *cmd_params;
24931 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
24934 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
24935 + cmd_params->irq_index = irq_index;
24936 + err = mc_send_command(mc_io, &cmd);
24940 + *mask = le32_to_cpu(cmd_params->mask);
24946 + * dpseci_set_irq_mask() - Set interrupt mask.
24947 + * @mc_io: Pointer to MC portal's I/O object
24948 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24949 + * @token: Token of DPSECI object
24950 + * @irq_index: The interrupt index to configure
24951 + * @mask: event mask to trigger interrupt;
24953 + * 0 = ignore event
24954 + * 1 = consider event for asserting IRQ
24956 + * Every interrupt can have up to 32 causes and the interrupt model supports
24957 + * masking/unmasking each cause independently
24959 + * Return: '0' on success, error code otherwise
24961 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24962 + u8 irq_index, u32 mask)
24964 + struct mc_command cmd = { 0 };
24965 + struct dpseci_cmd_irq_mask *cmd_params;
24967 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
24970 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
24971 + cmd_params->mask = cpu_to_le32(mask);
24972 + cmd_params->irq_index = irq_index;
24974 + return mc_send_command(mc_io, &cmd);
24978 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
24979 + * @mc_io: Pointer to MC portal's I/O object
24980 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24981 + * @token: Token of DPSECI object
24982 + * @irq_index: The interrupt index to configure
24983 + * @status: Returned interrupts status - one bit per cause:
24984 + * 0 = no interrupt pending
24985 + * 1 = interrupt pending
24987 + * Return: '0' on success, error code otherwise
24989 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24990 + u8 irq_index, u32 *status)
24992 + struct mc_command cmd = { 0 };
24993 + struct dpseci_cmd_irq_status *cmd_params;
24996 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
24999 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
25000 + cmd_params->status = cpu_to_le32(*status);
25001 + cmd_params->irq_index = irq_index;
25002 + err = mc_send_command(mc_io, &cmd);
25006 + *status = le32_to_cpu(cmd_params->status);
25012 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
25013 + * @mc_io: Pointer to MC portal's I/O object
25014 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25015 + * @token: Token of DPSECI object
25016 + * @irq_index: The interrupt index to configure
25017 + * @status: bits to clear (W1C) - one bit per cause:
25018 + * 0 = don't change
25019 + * 1 = clear status bit
25021 + * Return: '0' on success, error code otherwise
25023 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25024 + u8 irq_index, u32 status)
25026 + struct mc_command cmd = { 0 };
25027 + struct dpseci_cmd_irq_status *cmd_params;
25029 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
25032 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
25033 + cmd_params->status = cpu_to_le32(status);
25034 + cmd_params->irq_index = irq_index;
25036 + return mc_send_command(mc_io, &cmd);
25040 + * dpseci_get_attributes() - Retrieve DPSECI attributes
25041 + * @mc_io: Pointer to MC portal's I/O object
25042 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25043 + * @token: Token of DPSECI object
25044 + * @attr: Returned object's attributes
25046 + * Return: '0' on success, error code otherwise
25048 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25049 + struct dpseci_attr *attr)
25051 + struct mc_command cmd = { 0 };
25052 + struct dpseci_rsp_get_attributes *rsp_params;
25055 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
25058 + err = mc_send_command(mc_io, &cmd);
25062 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
25063 + attr->id = le32_to_cpu(rsp_params->id);
25064 + attr->num_tx_queues = rsp_params->num_tx_queues;
25065 + attr->num_rx_queues = rsp_params->num_rx_queues;
25066 + attr->options = le32_to_cpu(rsp_params->options);
25072 + * dpseci_set_rx_queue() - Set Rx queue configuration
25073 + * @mc_io: Pointer to MC portal's I/O object
25074 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25075 + * @token: Token of DPSECI object
25076 + * @queue: Select the queue relative to number of priorities configured at
25077 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
25078 + * Rx queues identically.
25079 + * @cfg: Rx queue configuration
25081 + * Return: '0' on success, error code otherwise
25083 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25084 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
25086 + struct mc_command cmd = { 0 };
25087 + struct dpseci_cmd_queue *cmd_params;
25089 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
25092 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25093 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
25094 + cmd_params->priority = cfg->dest_cfg.priority;
25095 + cmd_params->queue = queue;
25096 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
25097 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
25098 + cmd_params->options = cpu_to_le32(cfg->options);
25099 + cmd_params->order_preservation_en =
25100 + cpu_to_le32(cfg->order_preservation_en);
25102 + return mc_send_command(mc_io, &cmd);
25106 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
25107 + * @mc_io: Pointer to MC portal's I/O object
25108 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25109 + * @token: Token of DPSECI object
25110 + * @queue: Select the queue relative to number of priorities configured at
25111 + * DPSECI creation
25112 + * @attr: Returned Rx queue attributes
25114 + * Return: '0' on success, error code otherwise
25116 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25117 + u8 queue, struct dpseci_rx_queue_attr *attr)
25119 + struct mc_command cmd = { 0 };
25120 + struct dpseci_cmd_queue *cmd_params;
25123 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
25126 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25127 + cmd_params->queue = queue;
25128 + err = mc_send_command(mc_io, &cmd);
25132 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
25133 + attr->dest_cfg.priority = cmd_params->priority;
25134 + attr->dest_cfg.dest_type = cmd_params->dest_type;
25135 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
25136 + attr->fqid = le32_to_cpu(cmd_params->fqid);
25137 + attr->order_preservation_en =
25138 + le32_to_cpu(cmd_params->order_preservation_en);
25144 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
25145 + * @mc_io: Pointer to MC portal's I/O object
25146 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25147 + * @token: Token of DPSECI object
25148 + * @queue: Select the queue relative to number of priorities configured at
25149 + * DPSECI creation
25150 + * @attr: Returned Tx queue attributes
25152 + * Return: '0' on success, error code otherwise
25154 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25155 + u8 queue, struct dpseci_tx_queue_attr *attr)
25157 + struct mc_command cmd = { 0 };
25158 + struct dpseci_cmd_queue *cmd_params;
25159 + struct dpseci_rsp_get_tx_queue *rsp_params;
25162 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
25165 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25166 + cmd_params->queue = queue;
25167 + err = mc_send_command(mc_io, &cmd);
25171 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
25172 + attr->fqid = le32_to_cpu(rsp_params->fqid);
25173 + attr->priority = rsp_params->priority;
25179 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
25180 + * @mc_io: Pointer to MC portal's I/O object
25181 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25182 + * @token: Token of DPSECI object
25183 + * @attr: Returned SEC attributes
25185 + * Return: '0' on success, error code otherwise
25187 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25188 + struct dpseci_sec_attr *attr)
25190 + struct mc_command cmd = { 0 };
25191 + struct dpseci_rsp_get_sec_attr *rsp_params;
25194 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
25197 + err = mc_send_command(mc_io, &cmd);
25201 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
25202 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
25203 + attr->major_rev = rsp_params->major_rev;
25204 + attr->minor_rev = rsp_params->minor_rev;
25205 + attr->era = rsp_params->era;
25206 + attr->deco_num = rsp_params->deco_num;
25207 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
25208 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
25209 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
25210 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
25211 + attr->crc_acc_num = rsp_params->crc_acc_num;
25212 + attr->pk_acc_num = rsp_params->pk_acc_num;
25213 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
25214 + attr->rng_acc_num = rsp_params->rng_acc_num;
25215 + attr->md_acc_num = rsp_params->md_acc_num;
25216 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
25217 + attr->des_acc_num = rsp_params->des_acc_num;
25218 + attr->aes_acc_num = rsp_params->aes_acc_num;
25224 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
25225 + * @mc_io: Pointer to MC portal's I/O object
25226 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25227 + * @token: Token of DPSECI object
25228 + * @counters: Returned SEC counters
25230 + * Return: '0' on success, error code otherwise
25232 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25233 + struct dpseci_sec_counters *counters)
25235 + struct mc_command cmd = { 0 };
25236 + struct dpseci_rsp_get_sec_counters *rsp_params;
25239 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
25242 + err = mc_send_command(mc_io, &cmd);
25246 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
25247 + counters->dequeued_requests =
25248 + le64_to_cpu(rsp_params->dequeued_requests);
25249 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
25250 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
25251 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
25252 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
25253 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
25254 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
25260 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
25261 + * @mc_io: Pointer to MC portal's I/O object
25262 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25263 + * @major_ver: Major version of data path sec API
25264 + * @minor_ver: Minor version of data path sec API
25266 + * Return: '0' on success, error code otherwise
25268 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
25269 + u16 *major_ver, u16 *minor_ver)
25271 + struct mc_command cmd = { 0 };
25272 + struct dpseci_rsp_get_api_version *rsp_params;
25275 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
25277 + err = mc_send_command(mc_io, &cmd);
25281 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
25282 + *major_ver = le16_to_cpu(rsp_params->major);
25283 + *minor_ver = le16_to_cpu(rsp_params->minor);
25289 + * dpseci_set_opr() - Set Order Restoration configuration
25290 + * @mc_io: Pointer to MC portal's I/O object
25291 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25292 + * @token: Token of DPSECI object
25293 + * @index: The queue index
25294 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
25296 + * @cfg: Configuration options for the OPR
25298 + * Return: '0' on success, error code otherwise
25300 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25301 + u8 options, struct opr_cfg *cfg)
25303 + struct mc_command cmd = { 0 };
25304 + struct dpseci_cmd_opr *cmd_params;
25306 + cmd.header = mc_encode_cmd_header(
25307 + DPSECI_CMDID_SET_OPR,
25310 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
25311 + cmd_params->index = index;
25312 + cmd_params->options = options;
25313 + cmd_params->oloe = cfg->oloe;
25314 + cmd_params->oeane = cfg->oeane;
25315 + cmd_params->olws = cfg->olws;
25316 + cmd_params->oa = cfg->oa;
25317 + cmd_params->oprrws = cfg->oprrws;
25319 + return mc_send_command(mc_io, &cmd);
25323 + * dpseci_get_opr() - Retrieve Order Restoration config and query
25324 + * @mc_io: Pointer to MC portal's I/O object
25325 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25326 + * @token: Token of DPSECI object
25327 + * @index: The queue index
25328 + * @cfg: Returned OPR configuration
25329 + * @qry: Returned OPR query
25331 + * Return: '0' on success, error code otherwise
25333 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25334 + struct opr_cfg *cfg, struct opr_qry *qry)
25336 + struct mc_command cmd = { 0 };
25337 + struct dpseci_cmd_opr *cmd_params;
25338 + struct dpseci_rsp_get_opr *rsp_params;
25341 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
25344 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
25345 + cmd_params->index = index;
25346 + err = mc_send_command(mc_io, &cmd);
25350 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
25351 + qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
25352 + qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
25353 + cfg->oloe = rsp_params->oloe;
25354 + cfg->oeane = rsp_params->oeane;
25355 + cfg->olws = rsp_params->olws;
25356 + cfg->oa = rsp_params->oa;
25357 + cfg->oprrws = rsp_params->oprrws;
25358 + qry->nesn = le16_to_cpu(rsp_params->nesn);
25359 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
25360 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
25361 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
25362 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
25363 + qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
25364 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
25365 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
25366 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
25367 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
25373 + * dpseci_set_congestion_notification() - Set congestion group
25374 + * notification configuration
25375 + * @mc_io: Pointer to MC portal's I/O object
25376 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25377 + * @token: Token of DPSECI object
25378 + * @cfg: congestion notification configuration
25380 + * Return: '0' on success, error code otherwise
25382 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25383 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
25385 + struct mc_command cmd = { 0 };
25386 + struct dpseci_cmd_congestion_notification *cmd_params;
25388 + cmd.header = mc_encode_cmd_header(
25389 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
25392 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
25393 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
25394 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
25395 + cmd_params->priority = cfg->dest_cfg.priority;
25396 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
25397 + cfg->dest_cfg.dest_type);
25398 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
25399 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
25400 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
25401 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
25402 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
25404 + return mc_send_command(mc_io, &cmd);
25408 + * dpseci_get_congestion_notification() - Get congestion group notification
25410 + * @mc_io: Pointer to MC portal's I/O object
25411 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25412 + * @token: Token of DPSECI object
25413 + * @cfg: congestion notification configuration
25415 + * Return: '0' on success, error code otherwise
25417 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25418 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
25420 + struct mc_command cmd = { 0 };
25421 + struct dpseci_cmd_congestion_notification *rsp_params;
25424 + cmd.header = mc_encode_cmd_header(
25425 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
25428 + err = mc_send_command(mc_io, &cmd);
25432 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
25433 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
25434 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
25435 + cfg->dest_cfg.priority = rsp_params->priority;
25436 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
25438 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
25439 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
25440 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
25441 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
25442 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
25447 +++ b/drivers/crypto/caam/dpseci.h
25450 + * Copyright 2013-2016 Freescale Semiconductor Inc.
25451 + * Copyright 2017 NXP
25453 + * Redistribution and use in source and binary forms, with or without
25454 + * modification, are permitted provided that the following conditions are met:
25455 + * * Redistributions of source code must retain the above copyright
25456 + * notice, this list of conditions and the following disclaimer.
25457 + * * Redistributions in binary form must reproduce the above copyright
25458 + * notice, this list of conditions and the following disclaimer in the
25459 + * documentation and/or other materials provided with the distribution.
25460 + * * Neither the names of the above-listed copyright holders nor the
25461 + * names of any contributors may be used to endorse or promote products
25462 + * derived from this software without specific prior written permission.
25465 + * ALTERNATIVELY, this software may be distributed under the terms of the
25466 + * GNU General Public License ("GPL") as published by the Free Software
25467 + * Foundation, either version 2 of that License or (at your option) any
25470 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25471 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25472 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25473 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25474 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25475 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25476 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25477 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25478 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25479 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25480 + * POSSIBILITY OF SUCH DAMAGE.
25482 +#ifndef _DPSECI_H_
25483 +#define _DPSECI_H_
25486 + * Data Path SEC Interface API
25487 + * Contains initialization APIs and runtime control APIs for DPSECI
25495 + * General DPSECI macros
25499 + * Maximum number of Tx/Rx priorities per DPSECI object
25501 +#define DPSECI_PRIO_NUM 8
25504 + * All queues considered; see dpseci_set_rx_queue()
25506 +#define DPSECI_ALL_QUEUES (u8)(-1)
25508 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
25511 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25514 + * Enable the Congestion Group support
25516 +#define DPSECI_OPT_HAS_CG 0x000020
25519 + * Enable the Order Restoration support
25521 +#define DPSECI_OPT_HAS_OPR 0x000040
25524 + * Order Point Records are shared for the entire DPSECI
25526 +#define DPSECI_OPT_OPR_SHARED 0x000080
25529 + * struct dpseci_cfg - Structure representing DPSECI configuration
25530 + * @options: Any combination of the following options:
25531 + * DPSECI_OPT_HAS_CG
25532 + * DPSECI_OPT_HAS_OPR
25533 + * DPSECI_OPT_OPR_SHARED
25534 + * @num_tx_queues: num of queues towards the SEC
25535 + * @num_rx_queues: num of queues back from the SEC
25536 + * @priorities: Priorities for the SEC hardware processing;
25537 + * each place in the array is the priority of the tx queue
25538 + * towards the SEC;
25539 + * valid priorities are configured with values 1-8;
25541 +struct dpseci_cfg {
25543 + u8 num_tx_queues;
25544 + u8 num_rx_queues;
25545 + u8 priorities[DPSECI_PRIO_NUM];
25548 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25549 + const struct dpseci_cfg *cfg, u32 *obj_id);
25551 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25554 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25556 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25558 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25561 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25563 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25564 + u8 irq_index, u8 *en);
25566 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25567 + u8 irq_index, u8 en);
25569 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25570 + u8 irq_index, u32 *mask);
25572 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25573 + u8 irq_index, u32 mask);
25575 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25576 + u8 irq_index, u32 *status);
25578 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25579 + u8 irq_index, u32 status);
25582 + * struct dpseci_attr - Structure representing DPSECI attributes
25583 + * @id: DPSECI object ID
25584 + * @num_tx_queues: number of queues towards the SEC
25585 + * @num_rx_queues: number of queues back from the SEC
25586 + * @options: any combination of the following options:
25587 + * DPSECI_OPT_HAS_CG
25588 + * DPSECI_OPT_HAS_OPR
25589 + * DPSECI_OPT_OPR_SHARED
25591 +struct dpseci_attr {
25593 + u8 num_tx_queues;
25594 + u8 num_rx_queues;
25598 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25599 + struct dpseci_attr *attr);
25602 + * enum dpseci_dest - DPSECI destination types
25603 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
25604 + * and does not generate FQDAN notifications; user is expected to dequeue
25605 + * from the queue based on polling or other user-defined method
25606 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
25607 + * notifications to the specified DPIO; user is expected to dequeue from
25608 + * the queue only after notification is received
25609 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
25610 + * FQDAN notifications, but is connected to the specified DPCON object;
25611 + * user is expected to dequeue from the DPCON channel
25613 +enum dpseci_dest {
25614 + DPSECI_DEST_NONE = 0,
25615 + DPSECI_DEST_DPIO,
25616 + DPSECI_DEST_DPCON
25620 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
25621 + * @dest_type: Destination type
25622 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
25623 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
25624 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
25625 + * not relevant for 'DPSECI_DEST_NONE' option
25627 +struct dpseci_dest_cfg {
25628 + enum dpseci_dest dest_type;
25634 + * DPSECI queue modification options
25638 + * Select to modify the user's context associated with the queue
25640 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
25643 + * Select to modify the queue's destination
25645 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
25648 + * Select to modify the queue's order preservation
25650 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
25653 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
25654 + * @options: Flags representing the suggested modifications to the queue;
25655 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
25656 + * @order_preservation_en: order preservation configuration for the rx queue
25657 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
25658 + * @user_ctx: User context value provided in the frame descriptor of each
25659 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
25661 + * @dest_cfg: Queue destination parameters; valid only if
25662 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
25664 +struct dpseci_rx_queue_cfg {
25666 + int order_preservation_en;
25668 + struct dpseci_dest_cfg dest_cfg;
25671 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25672 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
25675 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
25676 + * @user_ctx: User context value provided in the frame descriptor of each
25678 + * @order_preservation_en: Status of the order preservation configuration on the
25680 + * @dest_cfg: Queue destination configuration
25681 + * @fqid: Virtual FQID value to be used for dequeue operations
25683 +struct dpseci_rx_queue_attr {
25685 + int order_preservation_en;
25686 + struct dpseci_dest_cfg dest_cfg;
25690 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25691 + u8 queue, struct dpseci_rx_queue_attr *attr);
25694 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
25695 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
25696 + * @priority: SEC hardware processing priority for the queue
25698 +struct dpseci_tx_queue_attr {
25703 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25704 + u8 queue, struct dpseci_tx_queue_attr *attr);
25707 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
25708 + * hardware accelerator
25709 + * @ip_id: ID for SEC
25710 + * @major_rev: Major revision number for SEC
25711 + * @minor_rev: Minor revision number for SEC
25713 + * @deco_num: The number of copies of the DECO that are implemented in this
25715 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
25717 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
25719 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
25720 + * implemented in this version of SEC
25721 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
25722 + * implemented in this version of SEC
25723 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
25724 + * this version of SEC
25725 + * @pk_acc_num: The number of copies of the Public Key module that are
25726 + * implemented in this version of SEC
25727 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
25728 + * implemented in this version of SEC
25729 + * @rng_acc_num: The number of copies of the Random Number Generator that are
25730 + * implemented in this version of SEC
25731 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
25732 + * implemented in this version of SEC
25733 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
25734 + * in this version of SEC
25735 + * @des_acc_num: The number of copies of the DES module that are implemented in
25736 + * this version of SEC
25737 + * @aes_acc_num: The number of copies of the AES module that are implemented in
25738 + * this version of SEC
25740 +struct dpseci_sec_attr {
25746 + u8 zuc_auth_acc_num;
25747 + u8 zuc_enc_acc_num;
25748 + u8 snow_f8_acc_num;
25749 + u8 snow_f9_acc_num;
25752 + u8 kasumi_acc_num;
25760 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25761 + struct dpseci_sec_attr *attr);
25764 + * struct dpseci_sec_counters - Structure representing global SEC counters and
25765 + * not per dpseci counters
25766 + * @dequeued_requests: Number of Requests Dequeued
25767 + * @ob_enc_requests: Number of Outbound Encrypt Requests
25768 + * @ib_dec_requests: Number of Inbound Decrypt Requests
25769 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
25770 + * @ob_prot_bytes: Number of Outbound Bytes Protected
25771 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
25772 + * @ib_valid_bytes: Number of Inbound Bytes Validated
25774 +struct dpseci_sec_counters {
25775 + u64 dequeued_requests;
25776 + u64 ob_enc_requests;
25777 + u64 ib_dec_requests;
25778 + u64 ob_enc_bytes;
25779 + u64 ob_prot_bytes;
25780 + u64 ib_dec_bytes;
25781 + u64 ib_valid_bytes;
25784 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25785 + struct dpseci_sec_counters *counters);
25787 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
25788 + u16 *major_ver, u16 *minor_ver);
25790 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25791 + u8 options, struct opr_cfg *cfg);
25793 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25794 + struct opr_cfg *cfg, struct opr_qry *qry);
25797 + * enum dpseci_congestion_unit - DPSECI congestion units
25798 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
25799 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
25801 +enum dpseci_congestion_unit {
25802 + DPSECI_CONGESTION_UNIT_BYTES = 0,
25803 + DPSECI_CONGESTION_UNIT_FRAMES
25806 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
25807 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
25808 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
25809 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
25810 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
25811 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
25814 + * struct dpseci_congestion_notification_cfg - congestion notification
25816 + * @units: units type
25817 + * @threshold_entry: above this threshold we enter a congestion state.
25818 + * set it to '0' to disable it
25819 + * @threshold_exit: below this threshold we exit the congestion state.
25820 + * @message_ctx: The context that will be part of the CSCN message
25821 + * @message_iova: I/O virtual address (must be in DMA-able memory),
25822 + * must be 16B aligned;
25823 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
25824 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
25827 +struct dpseci_congestion_notification_cfg {
25828 + enum dpseci_congestion_unit units;
25829 + u32 threshold_entry;
25830 + u32 threshold_exit;
25832 + u64 message_iova;
25833 + struct dpseci_dest_cfg dest_cfg;
25834 + u16 notification_mode;
25837 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25838 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
25840 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25841 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
25843 +#endif /* _DPSECI_H_ */
25845 +++ b/drivers/crypto/caam/dpseci_cmd.h
25848 + * Copyright 2013-2016 Freescale Semiconductor Inc.
25849 + * Copyright 2017 NXP
25851 + * Redistribution and use in source and binary forms, with or without
25852 + * modification, are permitted provided that the following conditions are met:
25853 + * * Redistributions of source code must retain the above copyright
25854 + * notice, this list of conditions and the following disclaimer.
25855 + * * Redistributions in binary form must reproduce the above copyright
25856 + * notice, this list of conditions and the following disclaimer in the
25857 + * documentation and/or other materials provided with the distribution.
25858 + * * Neither the names of the above-listed copyright holders nor the
25859 + * names of any contributors may be used to endorse or promote products
25860 + * derived from this software without specific prior written permission.
25863 + * ALTERNATIVELY, this software may be distributed under the terms of the
25864 + * GNU General Public License ("GPL") as published by the Free Software
25865 + * Foundation, either version 2 of that License or (at your option) any
25868 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25869 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25870 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25871 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25872 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25873 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25874 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25875 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25876 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25877 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25878 + * POSSIBILITY OF SUCH DAMAGE.
25881 +#ifndef _DPSECI_CMD_H_
25882 +#define _DPSECI_CMD_H_
25884 +/* DPSECI Version */
25885 +#define DPSECI_VER_MAJOR 5
25886 +#define DPSECI_VER_MINOR 1
25888 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
25889 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
25893 +#define DPSECI_CMDID_CLOSE 0x8001
25894 +#define DPSECI_CMDID_OPEN 0x8091
25895 +#define DPSECI_CMDID_CREATE 0x9092
25896 +#define DPSECI_CMDID_DESTROY 0x9891
25897 +#define DPSECI_CMDID_GET_API_VERSION 0xa091
25899 +#define DPSECI_CMDID_ENABLE 0x0021
25900 +#define DPSECI_CMDID_DISABLE 0x0031
25901 +#define DPSECI_CMDID_GET_ATTR 0x0041
25902 +#define DPSECI_CMDID_RESET 0x0051
25903 +#define DPSECI_CMDID_IS_ENABLED 0x0061
25905 +#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121
25906 +#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131
25907 +#define DPSECI_CMDID_SET_IRQ_MASK 0x0141
25908 +#define DPSECI_CMDID_GET_IRQ_MASK 0x0151
25909 +#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161
25910 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171
25912 +#define DPSECI_CMDID_SET_RX_QUEUE 0x1941
25913 +#define DPSECI_CMDID_GET_RX_QUEUE 0x1961
25914 +#define DPSECI_CMDID_GET_TX_QUEUE 0x1971
25915 +#define DPSECI_CMDID_GET_SEC_ATTR 0x1981
25916 +#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991
25917 +#define DPSECI_CMDID_SET_OPR 0x19A1
25918 +#define DPSECI_CMDID_GET_OPR 0x19B1
25920 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701
25921 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711
25923 +/* Macros for accessing command fields smaller than 1 byte */
25924 +#define DPSECI_MASK(field) \
25925 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
25926 + DPSECI_##field##_SHIFT)
25928 +#define dpseci_set_field(var, field, val) \
25929 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
25931 +#define dpseci_get_field(var, field) \
25932 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
25934 +struct dpseci_cmd_open {
25935 + __le32 dpseci_id;
25938 +struct dpseci_cmd_create {
25939 + u8 priorities[8];
25940 + u8 num_tx_queues;
25941 + u8 num_rx_queues;
25946 +struct dpseci_cmd_destroy {
25947 + __le32 object_id;
25950 +struct dpseci_rsp_is_enabled {
25951 + __le32 is_enabled;
25954 +struct dpseci_cmd_irq_enable {
25960 +struct dpseci_rsp_get_irq_enable {
25964 +struct dpseci_cmd_irq_mask {
25969 +struct dpseci_cmd_irq_status {
25974 +struct dpseci_rsp_get_attributes {
25977 + u8 num_tx_queues;
25978 + u8 num_rx_queues;
25983 +struct dpseci_cmd_queue {
25994 + __le32 order_preservation_en;
25997 +struct dpseci_rsp_get_tx_queue {
26003 +struct dpseci_rsp_get_sec_attr {
26010 + u8 zuc_auth_acc_num;
26011 + u8 zuc_enc_acc_num;
26013 + u8 snow_f8_acc_num;
26014 + u8 snow_f9_acc_num;
26018 + u8 kasumi_acc_num;
26027 +struct dpseci_rsp_get_sec_counters {
26028 + __le64 dequeued_requests;
26029 + __le64 ob_enc_requests;
26030 + __le64 ib_dec_requests;
26031 + __le64 ob_enc_bytes;
26032 + __le64 ob_prot_bytes;
26033 + __le64 ib_dec_bytes;
26034 + __le64 ib_valid_bytes;
26037 +struct dpseci_rsp_get_api_version {
26042 +struct dpseci_cmd_opr {
26054 +#define DPSECI_OPR_RIP_SHIFT 0
26055 +#define DPSECI_OPR_RIP_SIZE 1
26056 +#define DPSECI_OPR_ENABLE_SHIFT 1
26057 +#define DPSECI_OPR_ENABLE_SIZE 1
26058 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1
26059 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
26060 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1
26061 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
26063 +struct dpseci_rsp_get_opr {
26091 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
26092 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
26093 +#define DPSECI_CGN_UNITS_SHIFT 4
26094 +#define DPSECI_CGN_UNITS_SIZE 2
26096 +struct dpseci_cmd_congestion_notification {
26098 + __le16 notification_mode;
26101 + __le64 message_iova;
26102 + __le64 message_ctx;
26103 + __le32 threshold_entry;
26104 + __le32 threshold_exit;
26107 +#endif /* _DPSECI_CMD_H_ */
26108 --- a/drivers/crypto/caam/error.c
26109 +++ b/drivers/crypto/caam/error.c
26112 #include "compat.h"
26114 -#include "intern.h"
26121 +#include <linux/highmem.h>
26123 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26124 + int rowsize, int groupsize, struct scatterlist *sg,
26125 + size_t tlen, bool ascii)
26127 + struct scatterlist *it;
26132 + for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
26134 + * make sure the scatterlist's page
26135 + * has a valid virtual memory mapping
26137 + it_page = kmap_atomic(sg_page(it));
26138 + if (unlikely(!it_page)) {
26139 + pr_err("caam_dump_sg: kmap failed\n");
26143 + buf = it_page + it->offset;
26144 + len = min_t(size_t, tlen, it->length);
26145 + print_hex_dump(level, prefix_str, prefix_type, rowsize,
26146 + groupsize, buf, len, ascii);
26149 + kunmap_atomic(it_page);
26155 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26156 + int rowsize, int groupsize, struct scatterlist *sg,
26157 + size_t tlen, bool ascii)
26162 +EXPORT_SYMBOL(caam_dump_sg);
26164 static const struct {
26166 const char *error_text;
26167 @@ -69,6 +112,54 @@ static const struct {
26168 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
26171 +static const struct {
26173 + const char *error_text;
26174 +} qi_error_list[] = {
26175 + { 0x1F, "Job terminated by FQ or ICID flush" },
26176 + { 0x20, "FD format error"},
26177 + { 0x21, "FD command format error"},
26178 + { 0x23, "FL format error"},
26179 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
26180 + { 0x30, "Max. buffer size too small"},
26181 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
26182 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
26183 + { 0x33, "Size over/underflow (allocate mode)"},
26184 + { 0x34, "Size over/underflow (reuse mode)"},
26185 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
26186 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
26187 + { 0x41, "SBC frame format not supported (allocate mode)"},
26188 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
26189 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
26190 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
26191 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
26192 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
26193 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
26194 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
26195 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
26196 + { 0x51, "Unsupported IF reuse mode"},
26197 + { 0x52, "Unsupported FL use mode"},
26198 + { 0x53, "Unsupported RJD use mode"},
26199 + { 0x54, "Unsupported inline descriptor use mode"},
26200 + { 0xC0, "Table buffer pool 0 depletion"},
26201 + { 0xC1, "Table buffer pool 1 depletion"},
26202 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
26203 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
26204 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
26205 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
26206 + { 0xD0, "FLC read error"},
26207 + { 0xD1, "FL read error"},
26208 + { 0xD2, "FL write error"},
26209 + { 0xD3, "OF SGT write error"},
26210 + { 0xD4, "PTA read error"},
26211 + { 0xD5, "PTA write error"},
26212 + { 0xD6, "OF SGT F-bit write error"},
26213 + { 0xD7, "ASA write error"},
26214 + { 0xE1, "FLC[ICR]=0 ICID error"},
26215 + { 0xE2, "FLC[ICR]=1 ICID error"},
26216 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
26219 static const char * const cha_id_list[] = {
26222 @@ -146,10 +237,9 @@ static void report_ccb_status(struct dev
26223 strlen(rng_err_id_list[err_id])) {
26224 /* RNG-only error */
26225 err_str = rng_err_id_list[err_id];
26226 - } else if (err_id < ARRAY_SIZE(err_id_list))
26228 err_str = err_id_list[err_id];
26230 - snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
26234 * CCB ICV check failures are part of normal operation life;
26235 @@ -198,6 +288,27 @@ static void report_deco_status(struct de
26236 status, error, idx_str, idx, err_str, err_err_code);
26239 +static void report_qi_status(struct device *qidev, const u32 status,
26240 + const char *error)
26242 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
26243 + const char *err_str = "unidentified error value 0x";
26244 + char err_err_code[3] = { 0 };
26247 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
26248 + if (qi_error_list[i].value == err_id)
26251 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
26252 + err_str = qi_error_list[i].error_text;
26254 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
26256 + dev_err(qidev, "%08x: %s: %s%s\n",
26257 + status, error, err_str, err_err_code);
26260 static void report_jr_status(struct device *jrdev, const u32 status,
26263 @@ -212,7 +323,7 @@ static void report_cond_code_status(stru
26264 status, error, __func__);
26267 -void caam_jr_strstatus(struct device *jrdev, u32 status)
26268 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
26270 static const struct stat_src {
26271 void (*report_ssed)(struct device *jrdev, const u32 status,
26272 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jr
26273 { report_ccb_status, "CCB" },
26274 { report_jump_status, "Jump" },
26275 { report_deco_status, "DECO" },
26276 - { NULL, "Queue Manager Interface" },
26277 + { report_qi_status, "Queue Manager Interface" },
26278 { report_jr_status, "Job Ring" },
26279 { report_cond_code_status, "Condition Code" },
26281 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jr
26283 dev_err(jrdev, "%d: unknown error source\n", ssrc);
26285 -EXPORT_SYMBOL(caam_jr_strstatus);
26286 +EXPORT_SYMBOL(caam_strstatus);
26287 --- a/drivers/crypto/caam/error.h
26288 +++ b/drivers/crypto/caam/error.h
26290 #ifndef CAAM_ERROR_H
26291 #define CAAM_ERROR_H
26292 #define CAAM_ERROR_STR_MAX 302
26293 -void caam_jr_strstatus(struct device *jrdev, u32 status);
26295 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
26297 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
26298 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
26300 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26301 + int rowsize, int groupsize, struct scatterlist *sg,
26302 + size_t tlen, bool ascii);
26303 #endif /* CAAM_ERROR_H */
26304 --- a/drivers/crypto/caam/intern.h
26305 +++ b/drivers/crypto/caam/intern.h
26306 @@ -64,10 +64,9 @@ struct caam_drv_private_jr {
26307 * Driver-private storage for a single CAAM block instance
26309 struct caam_drv_private {
26311 - struct device *dev;
26312 - struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
26313 - struct platform_device *pdev;
26314 +#ifdef CONFIG_CAAM_QI
26315 + struct device *qidev;
26318 /* Physical-presence section */
26319 struct caam_ctrl __iomem *ctrl; /* controller region */
26320 @@ -84,6 +83,7 @@ struct caam_drv_private {
26321 u8 qi_present; /* Nonzero if QI present in device */
26322 int secvio_irq; /* Security violation interrupt number */
26323 int virt_en; /* Virtualization enabled in CAAM */
26324 + int era; /* CAAM Era (internal HW revision) */
26326 #define RNG4_MAX_HANDLES 2
26328 @@ -103,11 +103,6 @@ struct caam_drv_private {
26329 #ifdef CONFIG_DEBUG_FS
26330 struct dentry *dfs_root;
26331 struct dentry *ctl; /* controller dir */
26332 - struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
26333 - struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
26334 - struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
26335 - struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
26337 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
26338 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
26340 @@ -115,4 +110,22 @@ struct caam_drv_private {
26342 void caam_jr_algapi_init(struct device *dev);
26343 void caam_jr_algapi_remove(struct device *dev);
26345 +#ifdef CONFIG_DEBUG_FS
26346 +static int caam_debugfs_u64_get(void *data, u64 *val)
26348 + *val = caam64_to_cpu(*(u64 *)data);
26352 +static int caam_debugfs_u32_get(void *data, u64 *val)
26354 + *val = caam32_to_cpu(*(u32 *)data);
26358 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
26359 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
26362 #endif /* INTERN_H */
26363 --- a/drivers/crypto/caam/jr.c
26364 +++ b/drivers/crypto/caam/jr.c
26366 #include <linux/of_address.h>
26368 #include "compat.h"
26373 @@ -22,6 +23,14 @@ struct jr_driver_data {
26375 static struct jr_driver_data driver_data;
26377 +static int jr_driver_probed;
26379 +int caam_jr_driver_probed(void)
26381 + return jr_driver_probed;
26383 +EXPORT_SYMBOL(caam_jr_driver_probed);
26385 static int caam_reset_hw_jr(struct device *dev)
26387 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
26388 @@ -118,6 +127,8 @@ static int caam_jr_remove(struct platfor
26389 dev_err(jrdev, "Failed to shut down job ring\n");
26390 irq_dispose_mapping(jrpriv->irq);
26392 + jr_driver_probed--;
26397 @@ -281,6 +292,36 @@ struct device *caam_jr_alloc(void)
26398 EXPORT_SYMBOL(caam_jr_alloc);
26401 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
26403 + * returns : pointer to the newly allocated physical
26404 + * JobR dev can be written to if successful.
26406 +struct device *caam_jridx_alloc(int idx)
26408 + struct caam_drv_private_jr *jrpriv;
26409 + struct device *dev = ERR_PTR(-ENODEV);
26411 + spin_lock(&driver_data.jr_alloc_lock);
26413 + if (list_empty(&driver_data.jr_list))
26416 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
26417 + if (jrpriv->ridx == idx) {
26418 + atomic_inc(&jrpriv->tfm_count);
26419 + dev = jrpriv->dev;
26425 + spin_unlock(&driver_data.jr_alloc_lock);
26428 +EXPORT_SYMBOL(caam_jridx_alloc);
26431 * caam_jr_free() - Free the Job Ring
26432 * @rdev - points to the dev that identifies the Job ring to
26434 @@ -497,15 +538,28 @@ static int caam_jr_probe(struct platform
26438 - jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
26439 + jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
26441 - if (sizeof(dma_addr_t) == sizeof(u64))
26442 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
26443 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
26444 + if (sizeof(dma_addr_t) == sizeof(u64)) {
26446 + error = dma_set_mask_and_coherent(jrdev,
26447 + DMA_BIT_MASK(49));
26448 + else if (of_device_is_compatible(nprop,
26449 + "fsl,sec-v5.0-job-ring"))
26450 + error = dma_set_mask_and_coherent(jrdev,
26451 + DMA_BIT_MASK(40));
26453 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
26455 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
26456 + error = dma_set_mask_and_coherent(jrdev,
26457 + DMA_BIT_MASK(36));
26459 + error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
26462 + dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
26468 /* Identify the interrupt */
26469 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
26470 @@ -525,10 +579,12 @@ static int caam_jr_probe(struct platform
26472 atomic_set(&jrpriv->tfm_count, 0);
26474 + jr_driver_probed++;
26479 -static struct of_device_id caam_jr_match[] = {
26480 +static const struct of_device_id caam_jr_match[] = {
26482 .compatible = "fsl,sec-v4.0-job-ring",
26484 --- a/drivers/crypto/caam/jr.h
26485 +++ b/drivers/crypto/caam/jr.h
26489 /* Prototypes for backend-level services exposed to APIs */
26490 +int caam_jr_driver_probed(void);
26491 struct device *caam_jr_alloc(void);
26492 +struct device *caam_jridx_alloc(int idx);
26493 void caam_jr_free(struct device *rdev);
26494 int caam_jr_enqueue(struct device *dev, u32 *desc,
26495 void (*cbk)(struct device *dev, u32 *desc, u32 status,
26496 --- a/drivers/crypto/caam/key_gen.c
26497 +++ b/drivers/crypto/caam/key_gen.c
26498 @@ -41,15 +41,29 @@ Split key generation--------------------
26499 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
26502 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
26503 - int split_key_pad_len, const u8 *key_in, u32 keylen,
26505 +int gen_split_key(struct device *jrdev, u8 *key_out,
26506 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
26510 struct split_key_result result;
26511 dma_addr_t dma_addr_in, dma_addr_out;
26514 + adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
26515 + adata->keylen_pad = split_key_pad_len(adata->algtype &
26516 + OP_ALG_ALGSEL_MASK);
26519 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
26520 + adata->keylen, adata->keylen_pad);
26521 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
26522 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
26525 + if (adata->keylen_pad > max_keylen)
26528 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
26530 dev_err(jrdev, "unable to allocate key input memory\n");
26531 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev,
26535 - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
26536 + dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
26538 if (dma_mapping_error(jrdev, dma_addr_out)) {
26539 dev_err(jrdev, "unable to map key output memory\n");
26540 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev,
26541 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
26543 /* Sets MDHA up into an HMAC-INIT */
26544 - append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
26545 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
26546 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
26550 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
26551 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev,
26552 * FIFO_STORE with the explicit split-key content store
26553 * (0x26 output type)
26555 - append_fifo_store(desc, dma_addr_out, split_key_len,
26556 + append_fifo_store(desc, dma_addr_out, adata->keylen,
26557 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
26560 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev,
26562 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
26563 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
26564 - split_key_pad_len, 1);
26565 + adata->keylen_pad, 1);
26569 - dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
26570 + dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
26573 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
26574 --- a/drivers/crypto/caam/key_gen.h
26575 +++ b/drivers/crypto/caam/key_gen.h
26581 + * split_key_len - Compute MDHA split key length for a given algorithm
26582 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
26583 + * SHA224, SHA384, SHA512.
26585 + * Return: MDHA split key length
26587 +static inline u32 split_key_len(u32 hash)
26589 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
26590 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
26593 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
26595 + return (u32)(mdpadlen[idx] * 2);
26599 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
26600 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
26601 + * SHA224, SHA384, SHA512.
26603 + * Return: MDHA split key pad length
26605 +static inline u32 split_key_pad_len(u32 hash)
26607 + return ALIGN(split_key_len(hash), 16);
26610 struct split_key_result {
26611 struct completion completion;
26613 @@ -12,6 +42,6 @@ struct split_key_result {
26615 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
26617 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
26618 - int split_key_pad_len, const u8 *key_in, u32 keylen,
26620 +int gen_split_key(struct device *jrdev, u8 *key_out,
26621 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
26623 --- a/drivers/crypto/caam/pdb.h
26624 +++ b/drivers/crypto/caam/pdb.h
26625 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
26626 #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
26627 #define RSA_PDB_D_SHIFT 12
26628 #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
26629 +#define RSA_PDB_Q_SHIFT 12
26630 +#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
26632 #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
26633 #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
26634 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
26635 #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
26637 #define RSA_PRIV_KEY_FRM_1 0
26638 +#define RSA_PRIV_KEY_FRM_2 1
26639 +#define RSA_PRIV_KEY_FRM_3 2
26642 * RSA Encrypt Protocol Data Block
26643 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
26648 + * RSA Decrypt PDB - Private Key Form #2
26649 + * @sgf : scatter-gather field
26650 + * @g_dma : dma address of encrypted input data
26651 + * @f_dma : dma address of output data
26652 + * @d_dma : dma address of RSA private exponent
26653 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
26654 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
26655 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26656 + * as internal state buffer. It is assumed to be as long as p.
26657 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26658 + * as internal state buffer. It is assumed to be as long as q.
26659 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
26661 +struct rsa_priv_f2_pdb {
26663 + dma_addr_t g_dma;
26664 + dma_addr_t f_dma;
26665 + dma_addr_t d_dma;
26666 + dma_addr_t p_dma;
26667 + dma_addr_t q_dma;
26668 + dma_addr_t tmp1_dma;
26669 + dma_addr_t tmp2_dma;
26674 + * RSA Decrypt PDB - Private Key Form #3
26675 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
26676 + * the RSA modulus.
26677 + * @sgf : scatter-gather field
26678 + * @g_dma : dma address of encrypted input data
26679 + * @f_dma : dma address of output data
26680 + * @c_dma : dma address of RSA CRT coefficient
26681 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
26682 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
26683 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
26684 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
26685 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26686 + * as internal state buffer. It is assumed to be as long as p.
26687 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26688 + * as internal state buffer. It is assumed to be as long as q.
26689 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
26691 +struct rsa_priv_f3_pdb {
26693 + dma_addr_t g_dma;
26694 + dma_addr_t f_dma;
26695 + dma_addr_t c_dma;
26696 + dma_addr_t p_dma;
26697 + dma_addr_t q_dma;
26698 + dma_addr_t dp_dma;
26699 + dma_addr_t dq_dma;
26700 + dma_addr_t tmp1_dma;
26701 + dma_addr_t tmp2_dma;
26706 --- a/drivers/crypto/caam/pkc_desc.c
26707 +++ b/drivers/crypto/caam/pkc_desc.c
26708 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, st
26709 append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26710 RSA_PRIV_KEY_FRM_1);
26713 +/* Descriptor for RSA Private operation - Private Key Form #2 */
26714 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
26716 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
26717 + append_cmd(desc, pdb->sgf);
26718 + append_ptr(desc, pdb->g_dma);
26719 + append_ptr(desc, pdb->f_dma);
26720 + append_ptr(desc, pdb->d_dma);
26721 + append_ptr(desc, pdb->p_dma);
26722 + append_ptr(desc, pdb->q_dma);
26723 + append_ptr(desc, pdb->tmp1_dma);
26724 + append_ptr(desc, pdb->tmp2_dma);
26725 + append_cmd(desc, pdb->p_q_len);
26726 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26727 + RSA_PRIV_KEY_FRM_2);
26730 +/* Descriptor for RSA Private operation - Private Key Form #3 */
26731 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
26733 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
26734 + append_cmd(desc, pdb->sgf);
26735 + append_ptr(desc, pdb->g_dma);
26736 + append_ptr(desc, pdb->f_dma);
26737 + append_ptr(desc, pdb->c_dma);
26738 + append_ptr(desc, pdb->p_dma);
26739 + append_ptr(desc, pdb->q_dma);
26740 + append_ptr(desc, pdb->dp_dma);
26741 + append_ptr(desc, pdb->dq_dma);
26742 + append_ptr(desc, pdb->tmp1_dma);
26743 + append_ptr(desc, pdb->tmp2_dma);
26744 + append_cmd(desc, pdb->p_q_len);
26745 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26746 + RSA_PRIV_KEY_FRM_3);
26749 +++ b/drivers/crypto/caam/qi.c
26752 + * CAAM/SEC 4.x QI transport/backend driver
26753 + * Queue Interface backend functionality
26755 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
26756 + * Copyright 2016-2017 NXP
26759 +#include <linux/cpumask.h>
26760 +#include <linux/kthread.h>
26761 +#include <linux/fsl_qman.h>
26766 +#include "intern.h"
26767 +#include "desc_constr.h"
26769 +#define PREHDR_RSLS_SHIFT 31
26772 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
26773 + * so that resources used by the in-flight buffers do not become a memory hog.
26775 +#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
26777 +#define CAAM_QI_ENQUEUE_RETRIES 10000
26779 +#define CAAM_NAPI_WEIGHT 63
26782 + * caam_napi - struct holding CAAM NAPI-related params
26783 + * @irqtask: IRQ task for QI backend
26784 + * @p: QMan portal
26786 +struct caam_napi {
26787 + struct napi_struct irqtask;
26788 + struct qman_portal *p;
26792 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
26793 + * responses expected on each cpu.
26794 + * @caam_napi: CAAM NAPI params
26795 + * @net_dev: netdev used by NAPI
26796 + * @rsp_fq: response FQ from CAAM
26798 +struct caam_qi_pcpu_priv {
26799 + struct caam_napi caam_napi;
26800 + struct net_device net_dev;
26801 + struct qman_fq *rsp_fq;
26802 +} ____cacheline_aligned;
26804 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
26805 +static DEFINE_PER_CPU(int, last_cpu);
26808 + * caam_qi_priv - CAAM QI backend private params
26809 + * @cgr: QMan congestion group
26810 + * @qi_pdev: platform device for QI backend
26812 +struct caam_qi_priv {
26813 + struct qman_cgr cgr;
26814 + struct platform_device *qi_pdev;
26817 +static struct caam_qi_priv qipriv ____cacheline_aligned;
26820 + * This is written by only one core - the one that initialized the CGR - and
26821 + * read by multiple cores (all the others).
26823 +bool caam_congested __read_mostly;
26824 +EXPORT_SYMBOL(caam_congested);
26826 +#ifdef CONFIG_DEBUG_FS
26828 + * This is a counter for the number of times the congestion group (where all
26829 + * the request and response queueus are) reached congestion. Incremented
26830 + * each time the congestion callback is called with congested == true.
26832 +static u64 times_congested;
26836 + * CPU from where the module initialised. This is required because QMan driver
26837 + * requires CGRs to be removed from same CPU from where they were originally
26840 +static int mod_init_cpu;
26843 + * This is a a cache of buffers, from which the users of CAAM QI driver
26844 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
26845 + * doing malloc on the hotpath.
26846 + * NOTE: A more elegant solution would be to have some headroom in the frames
26847 + * being processed. This could be added by the dpaa-ethernet driver.
26848 + * This would pose a problem for userspace application processing which
26849 + * cannot know of this limitation. So for now, this will work.
26850 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
26852 +static struct kmem_cache *qi_cache;
26854 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
26858 + int num_retries = 0;
26861 + fd.format = qm_fd_compound;
26862 + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
26863 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
26864 + DMA_BIDIRECTIONAL);
26865 + if (dma_mapping_error(qidev, fd.addr)) {
26866 + dev_err(qidev, "DMA mapping error for QI enqueue request\n");
26871 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
26872 + if (likely(!ret))
26875 + if (ret != -EBUSY)
26878 + } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
26880 + dev_err(qidev, "qman_enqueue failed: %d\n", ret);
26884 +EXPORT_SYMBOL(caam_qi_enqueue);
26886 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
26887 + const struct qm_mr_entry *msg)
26889 + const struct qm_fd *fd;
26890 + struct caam_drv_req *drv_req;
26891 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
26893 + fd = &msg->ern.fd;
26895 + if (fd->format != qm_fd_compound) {
26896 + dev_err(qidev, "Non-compound FD from CAAM\n");
26900 + drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
26903 + "Can't find original request for CAAM response\n");
26907 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
26908 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
26910 + drv_req->cbk(drv_req, -EIO);
26913 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
26914 + struct qman_fq *rsp_fq,
26915 + dma_addr_t hwdesc,
26916 + int fq_sched_flag)
26919 + struct qman_fq *req_fq;
26920 + struct qm_mcc_initfq opts;
26922 + req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
26924 + return ERR_PTR(-ENOMEM);
26926 + req_fq->cb.ern = caam_fq_ern_cb;
26927 + req_fq->cb.fqs = NULL;
26929 + ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
26930 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
26933 + dev_err(qidev, "Failed to create session req FQ\n");
26934 + goto create_req_fq_fail;
26937 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
26938 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
26939 + QM_INITFQ_WE_CGID;
26940 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
26941 + opts.fqd.dest.channel = qm_channel_caam;
26942 + opts.fqd.dest.wq = 2;
26943 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
26944 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
26945 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
26946 + opts.fqd.cgid = qipriv.cgr.cgrid;
26948 + ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
26950 + dev_err(qidev, "Failed to init session req FQ\n");
26951 + goto init_req_fq_fail;
26954 + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
26955 + smp_processor_id());
26959 + qman_destroy_fq(req_fq, 0);
26960 +create_req_fq_fail:
26962 + return ERR_PTR(ret);
26965 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
26969 + ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
26970 + QMAN_VOLATILE_FLAG_FINISH,
26971 + QM_VDQCR_PRECEDENCE_VDQCR |
26972 + QM_VDQCR_NUMFRAMES_TILLEMPTY);
26974 + dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
26979 + struct qman_portal *p;
26981 + p = qman_get_affine_portal(smp_processor_id());
26982 + qman_p_poll_dqrr(p, 16);
26983 + } while (fq->flags & QMAN_FQ_STATE_NE);
26988 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
26993 + ret = qman_retire_fq(fq, &flags);
26995 + dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
27002 + /* Async FQ retirement condition */
27004 + /* Retry till FQ gets in retired state */
27007 + } while (fq->state != qman_fq_state_retired);
27009 + WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
27010 + WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
27014 + if (fq->flags & QMAN_FQ_STATE_NE) {
27015 + ret = empty_retired_fq(qidev, fq);
27017 + dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
27023 + ret = qman_oos_fq(fq);
27025 + dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
27027 + qman_destroy_fq(fq, 0);
27033 +static int empty_caam_fq(struct qman_fq *fq)
27036 + struct qm_mcr_queryfq_np np;
27038 + /* Wait till the older CAAM FQ get empty */
27040 + ret = qman_query_fq_np(fq, &np);
27051 + * Give extra time for pending jobs from this FQ in holding tanks
27052 + * to get processed
27058 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
27062 + struct qman_fq *new_fq, *old_fq;
27063 + struct device *qidev = drv_ctx->qidev;
27065 + num_words = desc_len(sh_desc);
27066 + if (num_words > MAX_SDLEN) {
27067 + dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
27071 + /* Note down older req FQ */
27072 + old_fq = drv_ctx->req_fq;
27074 + /* Create a new req FQ in parked state */
27075 + new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
27076 + drv_ctx->context_a, 0);
27077 + if (unlikely(IS_ERR_OR_NULL(new_fq))) {
27078 + dev_err(qidev, "FQ allocation for shdesc update failed\n");
27079 + return PTR_ERR(new_fq);
27082 + /* Hook up new FQ to context so that new requests keep queuing */
27083 + drv_ctx->req_fq = new_fq;
27085 + /* Empty and remove the older FQ */
27086 + ret = empty_caam_fq(old_fq);
27088 + dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
27090 + /* We can revert to older FQ */
27091 + drv_ctx->req_fq = old_fq;
27093 + if (kill_fq(qidev, new_fq))
27094 + dev_warn(qidev, "New CAAM FQ kill failed\n");
27100 + * Re-initialise pre-header. Set RSLS and SDLEN.
27101 + * Update the shared descriptor for driver context.
27103 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
27105 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
27106 + dma_sync_single_for_device(qidev, drv_ctx->context_a,
27107 + sizeof(drv_ctx->sh_desc) +
27108 + sizeof(drv_ctx->prehdr),
27109 + DMA_BIDIRECTIONAL);
27111 + /* Put the new FQ in scheduled state */
27112 + ret = qman_schedule_fq(new_fq);
27114 + dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
27117 + * We can kill new FQ and revert to old FQ.
27118 + * Since the desc is already modified, it is success case
27121 + drv_ctx->req_fq = old_fq;
27123 + if (kill_fq(qidev, new_fq))
27124 + dev_warn(qidev, "New CAAM FQ kill failed\n");
27125 + } else if (kill_fq(qidev, old_fq)) {
27126 + dev_warn(qidev, "Old CAAM FQ kill failed\n");
27131 +EXPORT_SYMBOL(caam_drv_ctx_update);
27133 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
27139 + dma_addr_t hwdesc;
27140 + struct caam_drv_ctx *drv_ctx;
27141 + const cpumask_t *cpus = qman_affine_cpus();
27143 + num_words = desc_len(sh_desc);
27144 + if (num_words > MAX_SDLEN) {
27145 + dev_err(qidev, "Invalid descriptor len: %d words\n",
27147 + return ERR_PTR(-EINVAL);
27150 + drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
27152 + return ERR_PTR(-ENOMEM);
27155 + * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
27156 + * and dma-map them.
27158 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
27160 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
27161 + size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
27162 + hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
27163 + DMA_BIDIRECTIONAL);
27164 + if (dma_mapping_error(qidev, hwdesc)) {
27165 + dev_err(qidev, "DMA map error for preheader + shdesc\n");
27167 + return ERR_PTR(-ENOMEM);
27169 + drv_ctx->context_a = hwdesc;
27171 + /* If given CPU does not own the portal, choose another one that does */
27172 + if (!cpumask_test_cpu(*cpu, cpus)) {
27173 + int *pcpu = &get_cpu_var(last_cpu);
27175 + *pcpu = cpumask_next(*pcpu, cpus);
27176 + if (*pcpu >= nr_cpu_ids)
27177 + *pcpu = cpumask_first(cpus);
27180 + put_cpu_var(last_cpu);
27182 + drv_ctx->cpu = *cpu;
27184 + /* Find response FQ hooked with this CPU */
27185 + drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
27187 + /* Attach request FQ */
27188 + drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
27189 + QMAN_INITFQ_FLAG_SCHED);
27190 + if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
27191 + dev_err(qidev, "create_caam_req_fq failed\n");
27192 + dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
27194 + return ERR_PTR(-ENOMEM);
27197 + drv_ctx->qidev = qidev;
27200 +EXPORT_SYMBOL(caam_drv_ctx_init);
27202 +void *qi_cache_alloc(gfp_t flags)
27204 + return kmem_cache_alloc(qi_cache, flags);
27206 +EXPORT_SYMBOL(qi_cache_alloc);
27208 +void qi_cache_free(void *obj)
27210 + kmem_cache_free(qi_cache, obj);
27212 +EXPORT_SYMBOL(qi_cache_free);
27214 +static int caam_qi_poll(struct napi_struct *napi, int budget)
27216 + struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
27218 + int cleaned = qman_p_poll_dqrr(np->p, budget);
27220 + if (cleaned < budget) {
27221 + napi_complete(napi);
27222 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
27228 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
27230 + if (IS_ERR_OR_NULL(drv_ctx))
27233 + /* Remove request FQ */
27234 + if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
27235 + dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
27237 + dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
27238 + sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
27239 + DMA_BIDIRECTIONAL);
27242 +EXPORT_SYMBOL(caam_drv_ctx_rel);
27244 +int caam_qi_shutdown(struct device *qidev)
27247 + struct caam_qi_priv *priv = dev_get_drvdata(qidev);
27248 + const cpumask_t *cpus = qman_affine_cpus();
27249 + struct cpumask old_cpumask = current->cpus_allowed;
27251 + for_each_cpu(i, cpus) {
27252 + struct napi_struct *irqtask;
27254 + irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
27255 + napi_disable(irqtask);
27256 + netif_napi_del(irqtask);
27258 + if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
27259 + dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
27263 + * QMan driver requires CGRs to be deleted from same CPU from where they
27264 + * were instantiated. Hence we get the module removal execute from the
27265 + * same CPU from where it was originally inserted.
27267 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
27269 + ret = qman_delete_cgr(&priv->cgr);
27271 + dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
27273 + qman_release_cgrid(priv->cgr.cgrid);
27275 + kmem_cache_destroy(qi_cache);
27277 + /* Now that we're done with the CGRs, restore the cpus allowed mask */
27278 + set_cpus_allowed_ptr(current, &old_cpumask);
27280 + platform_device_unregister(priv->qi_pdev);
27284 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
27286 + caam_congested = congested;
27289 +#ifdef CONFIG_DEBUG_FS
27290 + times_congested++;
27292 + pr_debug_ratelimited("CAAM entered congestion\n");
27295 + pr_debug_ratelimited("CAAM exited congestion\n");
27299 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
27302 + * In case of threaded ISR, for RT kernels in_irq() does not return
27303 + * appropriate value, so use in_serving_softirq to distinguish between
27304 + * softirq and irq contexts.
27306 + if (unlikely(in_irq() || !in_serving_softirq())) {
27307 + /* Disable QMan IRQ source and invoke NAPI */
27308 + qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
27310 + napi_schedule(&np->irqtask);
27316 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
27317 + struct qman_fq *rsp_fq,
27318 + const struct qm_dqrr_entry *dqrr)
27320 + struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
27321 + struct caam_drv_req *drv_req;
27322 + const struct qm_fd *fd;
27323 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
27325 + if (caam_qi_napi_schedule(p, caam_napi))
27326 + return qman_cb_dqrr_stop;
27329 + if (unlikely(fd->status))
27330 + dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
27332 + if (unlikely(fd->format != fd->format)) {
27333 + dev_err(qidev, "Non-compound FD from CAAM\n");
27334 + return qman_cb_dqrr_consume;
27337 + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
27338 + if (unlikely(!drv_req)) {
27340 + "Can't find original request for caam response\n");
27341 + return qman_cb_dqrr_consume;
27344 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
27345 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
27347 + drv_req->cbk(drv_req, fd->status);
27348 + return qman_cb_dqrr_consume;
27351 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
27353 + struct qm_mcc_initfq opts;
27354 + struct qman_fq *fq;
27357 + fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
27361 + fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
27363 + ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
27364 + QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
27366 + dev_err(qidev, "Rsp FQ create failed\n");
27371 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
27372 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
27373 + QM_INITFQ_WE_CGID;
27374 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
27376 + opts.fqd.dest.channel = qman_affine_channel(cpu);
27377 + opts.fqd.dest.wq = 3;
27378 + opts.fqd.cgid = qipriv.cgr.cgrid;
27379 + opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
27380 + QM_STASHING_EXCL_DATA;
27381 + opts.fqd.context_a.stashing.data_cl = 1;
27382 + opts.fqd.context_a.stashing.context_cl = 1;
27384 + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
27386 + dev_err(qidev, "Rsp FQ init failed\n");
27391 + per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
27393 + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
27397 +static int init_cgr(struct device *qidev)
27400 + struct qm_mcc_initcgr opts;
27401 + const u64 cpus = *(u64 *)qman_affine_cpus();
27402 + const int num_cpus = hweight64(cpus);
27403 + const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
27405 + ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
27407 + dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
27411 + qipriv.cgr.cb = cgr_cb;
27412 + memset(&opts, 0, sizeof(opts));
27413 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
27414 + opts.cgr.cscn_en = QM_CGR_EN;
27415 + opts.cgr.mode = QMAN_CGR_MODE_FRAME;
27416 + qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
27418 + ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
27420 + dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
27421 + qipriv.cgr.cgrid);
27425 + dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
27429 +static int alloc_rsp_fqs(struct device *qidev)
27432 + const cpumask_t *cpus = qman_affine_cpus();
27434 + /*Now create response FQs*/
27435 + for_each_cpu(i, cpus) {
27436 + ret = alloc_rsp_fq_cpu(qidev, i);
27438 + dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
27446 +static void free_rsp_fqs(void)
27449 + const cpumask_t *cpus = qman_affine_cpus();
27451 + for_each_cpu(i, cpus)
27452 + kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
27455 +int caam_qi_init(struct platform_device *caam_pdev)
27458 + struct platform_device *qi_pdev;
27459 + struct device *ctrldev = &caam_pdev->dev, *qidev;
27460 + struct caam_drv_private *ctrlpriv;
27461 + const cpumask_t *cpus = qman_affine_cpus();
27462 + struct cpumask old_cpumask = current->cpus_allowed;
27463 + static struct platform_device_info qi_pdev_info = {
27464 + .name = "caam_qi",
27465 + .id = PLATFORM_DEVID_NONE
27469 + * QMAN requires CGRs to be removed from same CPU+portal from where it
27470 + * was originally allocated. Hence we need to note down the
27471 + * initialisation CPU and use the same CPU for module exit.
27472 + * We select the first CPU to from the list of portal owning CPUs.
27473 + * Then we pin module init to this CPU.
27475 + mod_init_cpu = cpumask_first(cpus);
27476 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
27478 + qi_pdev_info.parent = ctrldev;
27479 + qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
27480 + qi_pdev = platform_device_register_full(&qi_pdev_info);
27481 + if (IS_ERR(qi_pdev))
27482 + return PTR_ERR(qi_pdev);
27483 + arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
27485 + ctrlpriv = dev_get_drvdata(ctrldev);
27486 + qidev = &qi_pdev->dev;
27488 + qipriv.qi_pdev = qi_pdev;
27489 + dev_set_drvdata(qidev, &qipriv);
27491 + /* Initialize the congestion detection */
27492 + err = init_cgr(qidev);
27494 + dev_err(qidev, "CGR initialization failed: %d\n", err);
27495 + platform_device_unregister(qi_pdev);
27499 + /* Initialise response FQs */
27500 + err = alloc_rsp_fqs(qidev);
27502 + dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
27504 + platform_device_unregister(qi_pdev);
27509 + * Enable the NAPI contexts on each of the core which has an affine
27512 + for_each_cpu(i, cpus) {
27513 + struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
27514 + struct caam_napi *caam_napi = &priv->caam_napi;
27515 + struct napi_struct *irqtask = &caam_napi->irqtask;
27516 + struct net_device *net_dev = &priv->net_dev;
27518 + net_dev->dev = *qidev;
27519 + INIT_LIST_HEAD(&net_dev->napi_list);
27521 + netif_napi_add(net_dev, irqtask, caam_qi_poll,
27522 + CAAM_NAPI_WEIGHT);
27524 + napi_enable(irqtask);
27527 + /* Hook up QI device to parent controlling caam device */
27528 + ctrlpriv->qidev = qidev;
27530 + qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
27531 + SLAB_CACHE_DMA, NULL);
27533 + dev_err(qidev, "Can't allocate CAAM cache\n");
27535 + platform_device_unregister(qi_pdev);
27539 + /* Done with the CGRs; restore the cpus allowed mask */
27540 + set_cpus_allowed_ptr(current, &old_cpumask);
27541 +#ifdef CONFIG_DEBUG_FS
27542 + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
27543 + ×_congested, &caam_fops_u64_ro);
27545 + dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
27549 +++ b/drivers/crypto/caam/qi.h
27552 + * Public definitions for the CAAM/QI (Queue Interface) backend.
27554 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
27555 + * Copyright 2016-2017 NXP
27561 +#include <linux/fsl_qman.h>
27562 +#include "compat.h"
27564 +#include "desc_constr.h"
27567 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
27568 + * (as pointed by context_a of to-CAAM FQ).
27569 + * When the job descriptor is executed by DECO, the whole job descriptor
27570 + * together with shared descriptor gets loaded in DECO buffer, which is
27571 + * 64 words (each 32-bit) long.
27573 + * The job descriptor constructed by CAAM hardware has the following layout:
27575 + * HEADER (1 word)
27576 + * Shdesc ptr (1 or 2 words)
27577 + * SEQ_OUT_PTR (1 word)
27578 + * Out ptr (1 or 2 words)
27579 + * Out length (1 word)
27580 + * SEQ_IN_PTR (1 word)
27581 + * In ptr (1 or 2 words)
27582 + * In length (1 word)
27584 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
27586 + * Apart from shdesc contents, the total number of words that get loaded in DECO
27587 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
27588 + * storing shared descriptor.
27590 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
27592 +/* Length of a single buffer in the QI driver memory cache */
27593 +#define CAAM_QI_MEMCACHE_SIZE 768
27595 +extern bool caam_congested __read_mostly;
27598 + * This is the request structure the driver application should fill while
27599 + * submitting a job to driver.
27601 +struct caam_drv_req;
27604 + * caam_qi_cbk - application's callback function invoked by the driver when the
27605 + * request has been successfully processed.
27606 + * @drv_req: original request that was submitted
27607 + * @status: completion status of request (0 - success, non-zero - error code)
27609 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
27619 + * caam_drv_ctx - CAAM/QI backend driver context
27621 + * The jobs are processed by the driver against a driver context.
27622 + * With every cryptographic context, a driver context is attached.
27623 + * The driver context contains data for private use by driver.
27624 + * For the applications, this is an opaque structure.
27626 + * @prehdr: preheader placed before shrd desc
27627 + * @sh_desc: shared descriptor
27628 + * @context_a: shared descriptor dma address
27629 + * @req_fq: to-CAAM request frame queue
27630 + * @rsp_fq: from-CAAM response frame queue
27631 + * @cpu: cpu on which to receive CAAM response
27632 + * @op_type: operation type
27633 + * @qidev: device pointer for CAAM/QI backend
27635 +struct caam_drv_ctx {
27637 + u32 sh_desc[MAX_SDLEN];
27638 + dma_addr_t context_a;
27639 + struct qman_fq *req_fq;
27640 + struct qman_fq *rsp_fq;
27642 + enum optype op_type;
27643 + struct device *qidev;
27644 +} ____cacheline_aligned;
27647 + * caam_drv_req - The request structure the driver application should fill while
27648 + * submitting a job to driver.
27649 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
27651 + * @cbk: callback function to invoke when job is completed
27652 + * @app_ctx: arbitrary context attached with request by the application
27654 + * The fields mentioned below should not be used by application.
27655 + * These are for private use by driver.
27657 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
27658 + * @hwaddr: DMA address for the S/G table.
27660 +struct caam_drv_req {
27661 + struct qm_sg_entry fd_sgt[2];
27662 + struct caam_drv_ctx *drv_ctx;
27665 +} ____cacheline_aligned;
27668 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
27670 + * A CAAM/QI driver context must be attached with each cryptographic context.
27671 + * This function allocates memory for CAAM/QI context and returns a handle to
27672 + * the application. This handle must be submitted along with each enqueue
27673 + * request to the driver by the application.
27675 + * @cpu: CPU where the application prefers to the driver to receive CAAM
27676 + * responses. The request completion callback would be issued from this
27678 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
27681 + * Returns a driver context on success or negative error code on failure.
27683 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
27687 + * caam_qi_enqueue - Submit a request to QI backend driver.
27689 + * The request structure must be properly filled as described above.
27691 + * @qidev: device pointer for QI backend
27692 + * @req: CAAM QI request structure
27694 + * Returns 0 on success or negative error code on failure.
27696 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
27699 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
27700 + * or too many CAAM responses are pending to be processed.
27701 + * @drv_ctx: driver context for which job is to be submitted
27703 + * Returns caam congestion status 'true/false'
27705 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
27708 + * caam_drv_ctx_update - Update QI driver context
27710 + * Invoked when shared descriptor is required to be change in driver context.
27712 + * @drv_ctx: driver context to be updated
27713 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
27715 + * Returns 0 on success or negative error code on failure.
27717 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
27720 + * caam_drv_ctx_rel - Release a QI driver context
27721 + * @drv_ctx: context to be released
27723 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
27725 +int caam_qi_init(struct platform_device *pdev);
27726 +int caam_qi_shutdown(struct device *dev);
27729 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
27731 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
27732 + * to be allocated on the hotpath. Instead of using malloc, one can use the
27733 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
27734 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
27736 + * @flags: flags that would be used for the equivalent malloc(..) call
27738 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
27740 +void *qi_cache_alloc(gfp_t flags);
27743 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
27745 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
27746 + * the buffer previously allocated by a qi_cache_alloc call.
27747 + * No checking is being done, the call is a passthrough call to
27748 + * kmem_cache_free(...)
27750 + * @obj: object previously allocated using qi_cache_alloc()
27752 +void qi_cache_free(void *obj);
27754 +#endif /* __QI_H__ */
27755 --- a/drivers/crypto/caam/regs.h
27756 +++ b/drivers/crypto/caam/regs.h
27758 * CAAM hardware register-level view
27760 * Copyright 2008-2011 Freescale Semiconductor, Inc.
27761 + * Copyright 2017 NXP
27768 extern bool caam_little_end;
27769 +extern bool caam_imx;
27771 #define caam_to_cpu(len) \
27772 static inline u##len caam##len ## _to_cpu(u##len val) \
27773 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem
27774 #else /* CONFIG_64BIT */
27775 static inline void wr_reg64(void __iomem *reg, u64 data)
27777 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27778 - if (caam_little_end) {
27779 + if (!caam_imx && caam_little_end) {
27780 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
27781 wr_reg32((u32 __iomem *)(reg), data);
27786 wr_reg32((u32 __iomem *)(reg), data >> 32);
27787 wr_reg32((u32 __iomem *)(reg) + 1, data);
27789 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem
27791 static inline u64 rd_reg64(void __iomem *reg)
27793 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27794 - if (caam_little_end)
27795 + if (!caam_imx && caam_little_end)
27796 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
27797 (u64)rd_reg32((u32 __iomem *)(reg)));
27800 - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
27801 - (u64)rd_reg32((u32 __iomem *)(reg) + 1));
27803 + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
27804 + (u64)rd_reg32((u32 __iomem *)(reg) + 1));
27806 #endif /* CONFIG_64BIT */
27808 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
27811 + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
27812 + (u64)cpu_to_caam32(upper_32_bits(value)));
27814 + return cpu_to_caam64(value);
27817 +static inline u64 caam_dma64_to_cpu(u64 value)
27820 + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
27821 + (u64)caam32_to_cpu(upper_32_bits(value)));
27823 + return caam64_to_cpu(value);
27826 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
27827 -#ifdef CONFIG_SOC_IMX7D
27828 -#define cpu_to_caam_dma(value) \
27829 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
27830 - (u64)cpu_to_caam32(upper_32_bits(value)))
27831 -#define caam_dma_to_cpu(value) \
27832 - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
27833 - (u64)caam32_to_cpu(upper_32_bits(value)))
27835 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
27836 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
27837 -#endif /* CONFIG_SOC_IMX7D */
27838 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
27839 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
27841 #define cpu_to_caam_dma(value) cpu_to_caam32(value)
27842 #define caam_dma_to_cpu(value) caam32_to_cpu(value)
27843 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
27845 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27846 -#define cpu_to_caam_dma64(value) \
27847 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
27848 - (u64)cpu_to_caam32(upper_32_bits(value)))
27850 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
27852 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
27856 @@ -293,6 +291,7 @@ struct caam_perfmon {
27857 u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
27858 #define CTPR_MS_QI_SHIFT 25
27859 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
27860 +#define CTPR_MS_DPAA2 BIT(13)
27861 #define CTPR_MS_VIRT_EN_INCL 0x00000001
27862 #define CTPR_MS_VIRT_EN_POR 0x00000002
27863 #define CTPR_MS_PG_SZ_MASK 0x10
27864 @@ -628,6 +627,8 @@ struct caam_job_ring {
27865 #define JRSTA_DECOERR_INVSIGN 0x86
27866 #define JRSTA_DECOERR_DSASIGN 0x87
27868 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
27870 #define JRSTA_CCBERR_JUMP 0x08000000
27871 #define JRSTA_CCBERR_INDEX_MASK 0xff00
27872 #define JRSTA_CCBERR_INDEX_SHIFT 8
27874 +++ b/drivers/crypto/caam/sg_sw_qm.h
27877 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
27878 + * Copyright 2016-2017 NXP
27880 + * Redistribution and use in source and binary forms, with or without
27881 + * modification, are permitted provided that the following conditions are met:
27882 + * * Redistributions of source code must retain the above copyright
27883 + * notice, this list of conditions and the following disclaimer.
27884 + * * Redistributions in binary form must reproduce the above copyright
27885 + * notice, this list of conditions and the following disclaimer in the
27886 + * documentation and/or other materials provided with the distribution.
27887 + * * Neither the name of Freescale Semiconductor nor the
27888 + * names of its contributors may be used to endorse or promote products
27889 + * derived from this software without specific prior written permission.
27892 + * ALTERNATIVELY, this software may be distributed under the terms of the
27893 + * GNU General Public License ("GPL") as published by the Free Software
27894 + * Foundation, either version 2 of that License or (at your option) any
27897 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
27898 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27899 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27900 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27901 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27902 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27903 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27904 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27905 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27906 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27909 +#ifndef __SG_SW_QM_H
27910 +#define __SG_SW_QM_H
27912 +#include <linux/fsl_qman.h>
27915 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
27917 + dma_addr_t addr = qm_sg_ptr->opaque;
27919 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
27920 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
27923 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
27924 + u32 len, u16 offset)
27926 + qm_sg_ptr->addr = dma;
27927 + qm_sg_ptr->length = len;
27928 + qm_sg_ptr->__reserved2 = 0;
27929 + qm_sg_ptr->bpid = 0;
27930 + qm_sg_ptr->__reserved3 = 0;
27931 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
27933 + cpu_to_hw_sg(qm_sg_ptr);
27936 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
27937 + dma_addr_t dma, u32 len, u16 offset)
27939 + qm_sg_ptr->extension = 0;
27940 + qm_sg_ptr->final = 0;
27941 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27944 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
27945 + dma_addr_t dma, u32 len, u16 offset)
27947 + qm_sg_ptr->extension = 0;
27948 + qm_sg_ptr->final = 1;
27949 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27952 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
27953 + dma_addr_t dma, u32 len, u16 offset)
27955 + qm_sg_ptr->extension = 1;
27956 + qm_sg_ptr->final = 0;
27957 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27960 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
27961 + dma_addr_t dma, u32 len,
27964 + qm_sg_ptr->extension = 1;
27965 + qm_sg_ptr->final = 1;
27966 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27970 + * convert scatterlist to h/w link table format
27971 + * but does not have final bit; instead, returns last entry
27973 +static inline struct qm_sg_entry *
27974 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
27975 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
27977 + while (sg_count && sg) {
27978 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
27979 + sg_dma_len(sg), offset);
27981 + sg = sg_next(sg);
27984 + return qm_sg_ptr - 1;
27988 + * convert scatterlist to h/w link table format
27989 + * scatterlist must have been previously dma mapped
27991 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
27992 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
27994 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
27996 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
27997 + qm_sg_ptr->final = 1;
27998 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
28001 +#endif /* __SG_SW_QM_H */
28003 +++ b/drivers/crypto/caam/sg_sw_qm2.h
28006 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
28007 + * Copyright 2017 NXP
28009 + * Redistribution and use in source and binary forms, with or without
28010 + * modification, are permitted provided that the following conditions are met:
28011 + * * Redistributions of source code must retain the above copyright
28012 + * notice, this list of conditions and the following disclaimer.
28013 + * * Redistributions in binary form must reproduce the above copyright
28014 + * notice, this list of conditions and the following disclaimer in the
28015 + * documentation and/or other materials provided with the distribution.
28016 + * * Neither the names of the above-listed copyright holders nor the
28017 + * names of any contributors may be used to endorse or promote products
28018 + * derived from this software without specific prior written permission.
28021 + * ALTERNATIVELY, this software may be distributed under the terms of the
28022 + * GNU General Public License ("GPL") as published by the Free Software
28023 + * Foundation, either version 2 of that License or (at your option) any
28026 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28027 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28028 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28029 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
28030 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28031 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28032 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28033 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28034 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28035 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28036 + * POSSIBILITY OF SUCH DAMAGE.
28039 +#ifndef _SG_SW_QM2_H_
28040 +#define _SG_SW_QM2_H_
28042 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
28044 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
28045 + dma_addr_t dma, u32 len, u16 offset)
28047 + dpaa2_sg_set_addr(qm_sg_ptr, dma);
28048 + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
28049 + dpaa2_sg_set_final(qm_sg_ptr, false);
28050 + dpaa2_sg_set_len(qm_sg_ptr, len);
28051 + dpaa2_sg_set_bpid(qm_sg_ptr, 0);
28052 + dpaa2_sg_set_offset(qm_sg_ptr, offset);
28056 + * convert scatterlist to h/w link table format
28057 + * but does not have final bit; instead, returns last entry
28059 +static inline struct dpaa2_sg_entry *
28060 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
28061 + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
28063 + while (sg_count && sg) {
28064 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
28065 + sg_dma_len(sg), offset);
28067 + sg = sg_next(sg);
28070 + return qm_sg_ptr - 1;
28074 + * convert scatterlist to h/w link table format
28075 + * scatterlist must have been previously dma mapped
28077 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
28078 + struct dpaa2_sg_entry *qm_sg_ptr,
28081 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
28082 + dpaa2_sg_set_final(qm_sg_ptr, true);
28085 +#endif /* _SG_SW_QM2_H_ */
28086 --- a/drivers/crypto/caam/sg_sw_sec4.h
28087 +++ b/drivers/crypto/caam/sg_sw_sec4.h
28092 +#ifndef _SG_SW_SEC4_H_
28093 +#define _SG_SW_SEC4_H_
28097 +#include "sg_sw_qm2.h"
28098 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
28100 -struct sec4_sg_entry;
28101 +struct sec4_sg_entry {
28108 * convert single dma address to h/w link table format
28109 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
28110 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
28111 dma_addr_t dma, u32 len, u16 offset)
28113 - sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
28114 - sec4_sg_ptr->len = cpu_to_caam32(len);
28115 - sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
28116 + if (caam_dpaa2) {
28117 + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
28120 + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
28121 + sec4_sg_ptr->len = cpu_to_caam32(len);
28122 + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
28123 + SEC4_SG_OFFSET_MASK);
28126 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
28127 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
28128 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, in
28129 return sec4_sg_ptr - 1;
28132 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
28135 + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
28137 + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
28141 * convert scatterlist to h/w link table format
28142 * scatterlist must have been previously dma mapped
28143 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(st
28146 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
28147 - sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
28150 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
28151 - struct scatterlist *sg, unsigned int total,
28152 - struct sec4_sg_entry *sec4_sg_ptr)
28155 - unsigned int len = min(sg_dma_len(sg), total);
28157 - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
28159 - sg = sg_next(sg);
28162 - return sec4_sg_ptr - 1;
28163 + sg_to_sec4_set_last(sec4_sg_ptr);
28166 -/* derive number of elements in scatterlist, but return 0 for 1 */
28167 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
28169 - int sg_nents = sg_nents_for_len(sg_list, nbytes);
28171 - if (likely(sg_nents == 1))
28176 +#endif /* _SG_SW_SEC4_H_ */
28177 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
28178 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
28179 @@ -516,7 +516,7 @@ err:
28182 * rsi_disconnect() - This function performs the reverse of the probe function,
28183 - * it deintialize the driver structure.
28184 + * it deinitialize the driver structure.
28185 * @pfunction: Pointer to the USB interface structure.
28188 --- a/drivers/staging/wilc1000/linux_wlan.c
28189 +++ b/drivers/staging/wilc1000/linux_wlan.c
28190 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device
28191 vif = netdev_priv(dev);
28194 - /* Deintialize IRQ */
28195 + /* Deinitialize IRQ */
28196 if (wilc->dev_irq_num) {
28197 free_irq(wilc->dev_irq_num, wilc);
28198 gpio_free(wilc->gpio);
28199 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
28200 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
28201 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_devi
28202 del_timer_sync(&wilc_during_ip_timer);
28205 - netdev_err(net, "Error while deintializing host interface\n");
28206 + netdev_err(net, "Error while deinitializing host interface\n");
28211 +++ b/include/crypto/acompress.h
28214 + * Asynchronous Compression operations
28216 + * Copyright (c) 2016, Intel Corporation
28217 + * Authors: Weigang Li <weigang.li@intel.com>
28218 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28220 + * This program is free software; you can redistribute it and/or modify it
28221 + * under the terms of the GNU General Public License as published by the Free
28222 + * Software Foundation; either version 2 of the License, or (at your option)
28223 + * any later version.
28226 +#ifndef _CRYPTO_ACOMP_H
28227 +#define _CRYPTO_ACOMP_H
28228 +#include <linux/crypto.h>
28230 +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
28233 + * struct acomp_req - asynchronous (de)compression request
28235 + * @base: Common attributes for asynchronous crypto requests
28236 + * @src: Source Data
28237 + * @dst: Destination data
28238 + * @slen: Size of the input buffer
28239 + * @dlen: Size of the output buffer and number of bytes produced
28240 + * @flags: Internal flags
28241 + * @__ctx: Start of private context data
28243 +struct acomp_req {
28244 + struct crypto_async_request base;
28245 + struct scatterlist *src;
28246 + struct scatterlist *dst;
28247 + unsigned int slen;
28248 + unsigned int dlen;
28250 + void *__ctx[] CRYPTO_MINALIGN_ATTR;
28254 + * struct crypto_acomp - user-instantiated objects which encapsulate
28255 + * algorithms and core processing logic
28257 + * @compress: Function performs a compress operation
28258 + * @decompress: Function performs a de-compress operation
28259 + * @dst_free: Frees destination buffer if allocated inside the
28261 + * @reqsize: Context size for (de)compression requests
28262 + * @base: Common crypto API algorithm data structure
28264 +struct crypto_acomp {
28265 + int (*compress)(struct acomp_req *req);
28266 + int (*decompress)(struct acomp_req *req);
28267 + void (*dst_free)(struct scatterlist *dst);
28268 + unsigned int reqsize;
28269 + struct crypto_tfm base;
28273 + * struct acomp_alg - asynchronous compression algorithm
28275 + * @compress: Function performs a compress operation
28276 + * @decompress: Function performs a de-compress operation
28277 + * @dst_free: Frees destination buffer if allocated inside the algorithm
28278 + * @init: Initialize the cryptographic transformation object.
28279 + * This function is used to initialize the cryptographic
28280 + * transformation object. This function is called only once at
28281 + * the instantiation time, right after the transformation context
28282 + * was allocated. In case the cryptographic hardware has some
28283 + * special requirements which need to be handled by software, this
28284 + * function shall check for the precise requirement of the
28285 + * transformation and put any software fallbacks in place.
28286 + * @exit: Deinitialize the cryptographic transformation object. This is a
28287 + * counterpart to @init, used to remove various changes set in
28290 + * @reqsize: Context size for (de)compression requests
28291 + * @base: Common crypto API algorithm data structure
28293 +struct acomp_alg {
28294 + int (*compress)(struct acomp_req *req);
28295 + int (*decompress)(struct acomp_req *req);
28296 + void (*dst_free)(struct scatterlist *dst);
28297 + int (*init)(struct crypto_acomp *tfm);
28298 + void (*exit)(struct crypto_acomp *tfm);
28299 + unsigned int reqsize;
28300 + struct crypto_alg base;
28304 + * DOC: Asynchronous Compression API
28306 + * The Asynchronous Compression API is used with the algorithms of type
28307 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
28311 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
28312 + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
28313 + * compression algorithm e.g. "deflate"
28314 + * @type: specifies the type of the algorithm
28315 + * @mask: specifies the mask for the algorithm
28317 + * Allocate a handle for a compression algorithm. The returned struct
28318 + * crypto_acomp is the handle that is required for any subsequent
28319 + * API invocation for the compression operations.
28321 + * Return: allocated handle in case of success; IS_ERR() is true in case
28322 + * of an error, PTR_ERR() returns the error code.
28324 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
28327 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
28329 + return &tfm->base;
28332 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
28334 + return container_of(alg, struct acomp_alg, base);
28337 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
28339 + return container_of(tfm, struct crypto_acomp, base);
28342 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
28344 + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
28347 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
28349 + return tfm->reqsize;
28352 +static inline void acomp_request_set_tfm(struct acomp_req *req,
28353 + struct crypto_acomp *tfm)
28355 + req->base.tfm = crypto_acomp_tfm(tfm);
28358 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
28360 + return __crypto_acomp_tfm(req->base.tfm);
28364 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
28366 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
28368 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
28370 + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
28373 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
28375 + type &= ~CRYPTO_ALG_TYPE_MASK;
28376 + type |= CRYPTO_ALG_TYPE_ACOMPRESS;
28377 + mask |= CRYPTO_ALG_TYPE_MASK;
28379 + return crypto_has_alg(alg_name, type, mask);
28383 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
28385 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
28387 + * Return: allocated handle in case of success or NULL in case of an error
28389 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
28392 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
28393 + * request as well as the output buffer if allocated
28394 + * inside the algorithm
28396 + * @req: request to free
28398 +void acomp_request_free(struct acomp_req *req);
28401 + * acomp_request_set_callback() -- Sets an asynchronous callback
28403 + * Callback will be called when an asynchronous operation on a given
28404 + * request is finished.
28406 + * @req: request that the callback will be set for
28407 + * @flgs: specify for instance if the operation may backlog
28408 + * @cmlp: callback which will be called
28409 + * @data: private data used by the caller
28411 +static inline void acomp_request_set_callback(struct acomp_req *req,
28413 + crypto_completion_t cmpl,
28416 + req->base.complete = cmpl;
28417 + req->base.data = data;
28418 + req->base.flags = flgs;
28422 + * acomp_request_set_params() -- Sets request parameters
28424 + * Sets parameters required by an acomp operation
28426 + * @req: asynchronous compress request
28427 + * @src: pointer to input buffer scatterlist
28428 + * @dst: pointer to output buffer scatterlist. If this is NULL, the
28429 + * acomp layer will allocate the output memory
28430 + * @slen: size of the input buffer
28431 + * @dlen: size of the output buffer. If dst is NULL, this can be used by
28432 + * the user to specify the maximum amount of memory to allocate
28434 +static inline void acomp_request_set_params(struct acomp_req *req,
28435 + struct scatterlist *src,
28436 + struct scatterlist *dst,
28437 + unsigned int slen,
28438 + unsigned int dlen)
28442 + req->slen = slen;
28443 + req->dlen = dlen;
28446 + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
28450 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
28452 + * Function invokes the asynchronous compress operation
28454 + * @req: asynchronous compress request
28456 + * Return: zero on success; error code in case of error
28458 +static inline int crypto_acomp_compress(struct acomp_req *req)
28460 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
28462 + return tfm->compress(req);
28466 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
28468 + * Function invokes the asynchronous decompress operation
28470 + * @req: asynchronous compress request
28472 + * Return: zero on success; error code in case of error
28474 +static inline int crypto_acomp_decompress(struct acomp_req *req)
28476 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
28478 + return tfm->decompress(req);
28483 +++ b/include/crypto/internal/acompress.h
28486 + * Asynchronous Compression operations
28488 + * Copyright (c) 2016, Intel Corporation
28489 + * Authors: Weigang Li <weigang.li@intel.com>
28490 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28492 + * This program is free software; you can redistribute it and/or modify it
28493 + * under the terms of the GNU General Public License as published by the Free
28494 + * Software Foundation; either version 2 of the License, or (at your option)
28495 + * any later version.
28498 +#ifndef _CRYPTO_ACOMP_INT_H
28499 +#define _CRYPTO_ACOMP_INT_H
28500 +#include <crypto/acompress.h>
28503 + * Transform internal helpers.
28505 +static inline void *acomp_request_ctx(struct acomp_req *req)
28507 + return req->__ctx;
28510 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
28512 + return tfm->base.__crt_ctx;
28515 +static inline void acomp_request_complete(struct acomp_req *req,
28518 + req->base.complete(&req->base, err);
28521 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
28523 + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
28526 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
28528 + struct acomp_req *req;
28530 + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
28532 + acomp_request_set_tfm(req, tfm);
28536 +static inline void __acomp_request_free(struct acomp_req *req)
28542 + * crypto_register_acomp() -- Register asynchronous compression algorithm
28544 + * Function registers an implementation of an asynchronous
28545 + * compression algorithm
28547 + * @alg: algorithm definition
28549 + * Return: zero on success; error code in case of error
28551 +int crypto_register_acomp(struct acomp_alg *alg);
28554 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
28556 + * Function unregisters an implementation of an asynchronous
28557 + * compression algorithm
28559 + * @alg: algorithm definition
28561 + * Return: zero on success; error code in case of error
28563 +int crypto_unregister_acomp(struct acomp_alg *alg);
28567 +++ b/include/crypto/internal/scompress.h
28570 + * Synchronous Compression operations
28572 + * Copyright 2015 LG Electronics Inc.
28573 + * Copyright (c) 2016, Intel Corporation
28574 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28576 + * This program is free software; you can redistribute it and/or modify it
28577 + * under the terms of the GNU General Public License as published by the Free
28578 + * Software Foundation; either version 2 of the License, or (at your option)
28579 + * any later version.
28582 +#ifndef _CRYPTO_SCOMP_INT_H
28583 +#define _CRYPTO_SCOMP_INT_H
28584 +#include <linux/crypto.h>
28586 +#define SCOMP_SCRATCH_SIZE 131072
28588 +struct crypto_scomp {
28589 + struct crypto_tfm base;
28593 + * struct scomp_alg - synchronous compression algorithm
28595 + * @alloc_ctx: Function allocates algorithm specific context
28596 + * @free_ctx: Function frees context allocated with alloc_ctx
28597 + * @compress: Function performs a compress operation
28598 + * @decompress: Function performs a de-compress operation
28599 + * @init: Initialize the cryptographic transformation object.
28600 + * This function is used to initialize the cryptographic
28601 + * transformation object. This function is called only once at
28602 + * the instantiation time, right after the transformation context
28603 + * was allocated. In case the cryptographic hardware has some
28604 + * special requirements which need to be handled by software, this
28605 + * function shall check for the precise requirement of the
28606 + * transformation and put any software fallbacks in place.
28607 + * @exit: Deinitialize the cryptographic transformation object. This is a
28608 + * counterpart to @init, used to remove various changes set in
28610 + * @base: Common crypto API algorithm data structure
28612 +struct scomp_alg {
28613 + void *(*alloc_ctx)(struct crypto_scomp *tfm);
28614 + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
28615 + int (*compress)(struct crypto_scomp *tfm, const u8 *src,
28616 + unsigned int slen, u8 *dst, unsigned int *dlen,
28618 + int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
28619 + unsigned int slen, u8 *dst, unsigned int *dlen,
28621 + struct crypto_alg base;
28624 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
28626 + return container_of(alg, struct scomp_alg, base);
28629 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
28631 + return container_of(tfm, struct crypto_scomp, base);
28634 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
28636 + return &tfm->base;
28639 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
28641 + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
28644 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
28646 + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
28649 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
28651 + return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
28654 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
28657 + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
28660 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
28661 + const u8 *src, unsigned int slen,
28662 + u8 *dst, unsigned int *dlen, void *ctx)
28664 + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
28667 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
28668 + const u8 *src, unsigned int slen,
28669 + u8 *dst, unsigned int *dlen,
28672 + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
28676 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
28677 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
28678 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
28681 + * crypto_register_scomp() -- Register synchronous compression algorithm
28683 + * Function registers an implementation of a synchronous
28684 + * compression algorithm
28686 + * @alg: algorithm definition
28688 + * Return: zero on success; error code in case of error
28690 +int crypto_register_scomp(struct scomp_alg *alg);
28693 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
28695 + * Function unregisters an implementation of a synchronous
28696 + * compression algorithm
28698 + * @alg: algorithm definition
28700 + * Return: zero on success; error code in case of error
28702 +int crypto_unregister_scomp(struct scomp_alg *alg);
28705 --- a/include/linux/crypto.h
28706 +++ b/include/linux/crypto.h
28708 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
28709 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
28710 #define CRYPTO_ALG_TYPE_KPP 0x00000008
28711 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
28712 +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
28713 #define CRYPTO_ALG_TYPE_RNG 0x0000000c
28714 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
28715 #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
28717 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
28718 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
28719 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
28720 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
28722 #define CRYPTO_ALG_LARVAL 0x00000010
28723 #define CRYPTO_ALG_DEAD 0x00000020
28724 --- a/include/uapi/linux/cryptouser.h
28725 +++ b/include/uapi/linux/cryptouser.h
28726 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
28727 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
28728 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
28729 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
28730 + CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
28733 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
28734 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
28735 char type[CRYPTO_MAX_NAME];
28738 +struct crypto_report_acomp {
28739 + char type[CRYPTO_MAX_NAME];
28742 #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
28743 sizeof(struct crypto_report_blkcipher))
28744 --- a/scripts/spelling.txt
28745 +++ b/scripts/spelling.txt
28746 @@ -305,6 +305,9 @@ defintion||definition
28747 defintions||definitions
28750 +deintializing||deinitializing
28751 +deintialize||deinitialize
28752 +deintialized||deinitialized
28756 --- a/sound/soc/amd/acp-pcm-dma.c
28757 +++ b/sound/soc/amd/acp-pcm-dma.c
28758 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mm
28762 -/* Deintialize ACP */
28763 +/* Deinitialize ACP */
28764 static int acp_deinit(void __iomem *acp_mmio)