kernel: bump 4.9 to 4.9.146
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 804-crypto-support-layerscape.patch
1 From 2a0aa9bd187f6f5693982a8f79665585af772237 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 5 Jul 2018 17:29:41 +0800
4 Subject: [PATCH 16/32] crypto: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch for layerscape sec support.
10
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Singed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
31 ---
32 crypto/Kconfig | 30 +
33 crypto/Makefile | 4 +
34 crypto/acompress.c | 169 +
35 crypto/algboss.c | 12 +-
36 crypto/crypto_user.c | 19 +
37 crypto/scompress.c | 356 +
38 crypto/tcrypt.c | 17 +-
39 crypto/testmgr.c | 1708 ++---
40 crypto/testmgr.h | 1125 ++--
41 crypto/tls.c | 607 ++
42 drivers/crypto/caam/Kconfig | 77 +-
43 drivers/crypto/caam/Makefile | 16 +-
44 drivers/crypto/caam/caamalg.c | 2185 ++----
45 drivers/crypto/caam/caamalg_desc.c | 1961 ++++++
46 drivers/crypto/caam/caamalg_desc.h | 127 +
47 drivers/crypto/caam/caamalg_qi.c | 3321 +++++++++
48 drivers/crypto/caam/caamalg_qi2.c | 5938 +++++++++++++++++
49 drivers/crypto/caam/caamalg_qi2.h | 283 +
50 drivers/crypto/caam/caamhash.c | 555 +-
51 drivers/crypto/caam/caamhash_desc.c | 108 +
52 drivers/crypto/caam/caamhash_desc.h | 49 +
53 drivers/crypto/caam/caampkc.c | 471 +-
54 drivers/crypto/caam/caampkc.h | 58 +
55 drivers/crypto/caam/caamrng.c | 16 +-
56 drivers/crypto/caam/compat.h | 1 +
57 drivers/crypto/caam/ctrl.c | 358 +-
58 drivers/crypto/caam/ctrl.h | 2 +
59 drivers/crypto/caam/desc.h | 84 +-
60 drivers/crypto/caam/desc_constr.h | 180 +-
61 drivers/crypto/caam/dpseci.c | 858 +++
62 drivers/crypto/caam/dpseci.h | 395 ++
63 drivers/crypto/caam/dpseci_cmd.h | 261 +
64 drivers/crypto/caam/error.c | 127 +-
65 drivers/crypto/caam/error.h | 10 +-
66 drivers/crypto/caam/intern.h | 31 +-
67 drivers/crypto/caam/jr.c | 72 +-
68 drivers/crypto/caam/jr.h | 2 +
69 drivers/crypto/caam/key_gen.c | 32 +-
70 drivers/crypto/caam/key_gen.h | 36 +-
71 drivers/crypto/caam/pdb.h | 62 +
72 drivers/crypto/caam/pkc_desc.c | 36 +
73 drivers/crypto/caam/qi.c | 804 +++
74 drivers/crypto/caam/qi.h | 204 +
75 drivers/crypto/caam/regs.h | 63 +-
76 drivers/crypto/caam/sg_sw_qm.h | 126 +
77 drivers/crypto/caam/sg_sw_qm2.h | 81 +
78 drivers/crypto/caam/sg_sw_sec4.h | 60 +-
79 drivers/crypto/talitos.c | 8 +
80 drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
81 drivers/staging/wilc1000/linux_wlan.c | 2 +-
82 .../staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
83 include/crypto/acompress.h | 269 +
84 include/crypto/internal/acompress.h | 81 +
85 include/crypto/internal/scompress.h | 136 +
86 include/linux/crypto.h | 3 +
87 include/uapi/linux/cryptouser.h | 5 +
88 scripts/spelling.txt | 3 +
89 sound/soc/amd/acp-pcm-dma.c | 2 +-
90 58 files changed, 19620 insertions(+), 3990 deletions(-)
91 create mode 100644 crypto/acompress.c
92 create mode 100644 crypto/scompress.c
93 create mode 100644 crypto/tls.c
94 create mode 100644 drivers/crypto/caam/caamalg_desc.c
95 create mode 100644 drivers/crypto/caam/caamalg_desc.h
96 create mode 100644 drivers/crypto/caam/caamalg_qi.c
97 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
98 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
99 create mode 100644 drivers/crypto/caam/caamhash_desc.c
100 create mode 100644 drivers/crypto/caam/caamhash_desc.h
101 create mode 100644 drivers/crypto/caam/dpseci.c
102 create mode 100644 drivers/crypto/caam/dpseci.h
103 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
104 create mode 100644 drivers/crypto/caam/qi.c
105 create mode 100644 drivers/crypto/caam/qi.h
106 create mode 100644 drivers/crypto/caam/sg_sw_qm.h
107 create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
108 create mode 100644 include/crypto/acompress.h
109 create mode 100644 include/crypto/internal/acompress.h
110 create mode 100644 include/crypto/internal/scompress.h
111
112 --- a/crypto/Kconfig
113 +++ b/crypto/Kconfig
114 @@ -102,6 +102,15 @@ config CRYPTO_KPP
115 select CRYPTO_ALGAPI
116 select CRYPTO_KPP2
117
118 +config CRYPTO_ACOMP2
119 + tristate
120 + select CRYPTO_ALGAPI2
121 +
122 +config CRYPTO_ACOMP
123 + tristate
124 + select CRYPTO_ALGAPI
125 + select CRYPTO_ACOMP2
126 +
127 config CRYPTO_RSA
128 tristate "RSA algorithm"
129 select CRYPTO_AKCIPHER
130 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
131 select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
132 select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
133 select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
134 + select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
135
136 config CRYPTO_USER
137 tristate "Userspace cryptographic algorithm configuration"
138 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
139 a sequence number xored with a salt. This is the default
140 algorithm for CBC.
141
142 +config CRYPTO_TLS
143 + tristate "TLS support"
144 + select CRYPTO_AEAD
145 + select CRYPTO_BLKCIPHER
146 + select CRYPTO_MANAGER
147 + select CRYPTO_HASH
148 + select CRYPTO_NULL
149 + select CRYPTO_AUTHENC
150 + help
151 + Support for TLS 1.0 record encryption and decryption
152 +
153 + This module adds support for encryption/decryption of TLS 1.0 frames
154 + using blockcipher algorithms. The name of the resulting algorithm is
155 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
156 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
157 + accelerated versions will be used automatically if available.
158 +
159 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
160 + operations through AF_ALG or cryptodev interfaces
161 +
162 comment "Block modes"
163
164 config CRYPTO_CBC
165 --- a/crypto/Makefile
166 +++ b/crypto/Makefile
167 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
168 rsa_generic-y += rsa-pkcs1pad.o
169 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
170
171 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
172 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
173 +
174 cryptomgr-y := algboss.o testmgr.o
175
176 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
177 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
178 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
179 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
180 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
181 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
182 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
183 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
184 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
185 --- /dev/null
186 +++ b/crypto/acompress.c
187 @@ -0,0 +1,169 @@
188 +/*
189 + * Asynchronous Compression operations
190 + *
191 + * Copyright (c) 2016, Intel Corporation
192 + * Authors: Weigang Li <weigang.li@intel.com>
193 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
194 + *
195 + * This program is free software; you can redistribute it and/or modify it
196 + * under the terms of the GNU General Public License as published by the Free
197 + * Software Foundation; either version 2 of the License, or (at your option)
198 + * any later version.
199 + *
200 + */
201 +#include <linux/errno.h>
202 +#include <linux/kernel.h>
203 +#include <linux/module.h>
204 +#include <linux/seq_file.h>
205 +#include <linux/slab.h>
206 +#include <linux/string.h>
207 +#include <linux/crypto.h>
208 +#include <crypto/algapi.h>
209 +#include <linux/cryptouser.h>
210 +#include <net/netlink.h>
211 +#include <crypto/internal/acompress.h>
212 +#include <crypto/internal/scompress.h>
213 +#include "internal.h"
214 +
215 +static const struct crypto_type crypto_acomp_type;
216 +
217 +#ifdef CONFIG_NET
218 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
219 +{
220 + struct crypto_report_acomp racomp;
221 +
222 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
223 +
224 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
225 + sizeof(struct crypto_report_acomp), &racomp))
226 + goto nla_put_failure;
227 + return 0;
228 +
229 +nla_put_failure:
230 + return -EMSGSIZE;
231 +}
232 +#else
233 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
234 +{
235 + return -ENOSYS;
236 +}
237 +#endif
238 +
239 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
240 + __attribute__ ((unused));
241 +
242 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
243 +{
244 + seq_puts(m, "type : acomp\n");
245 +}
246 +
247 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
248 +{
249 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
250 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
251 +
252 + alg->exit(acomp);
253 +}
254 +
255 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
256 +{
257 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
258 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
259 +
260 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
261 + return crypto_init_scomp_ops_async(tfm);
262 +
263 + acomp->compress = alg->compress;
264 + acomp->decompress = alg->decompress;
265 + acomp->dst_free = alg->dst_free;
266 + acomp->reqsize = alg->reqsize;
267 +
268 + if (alg->exit)
269 + acomp->base.exit = crypto_acomp_exit_tfm;
270 +
271 + if (alg->init)
272 + return alg->init(acomp);
273 +
274 + return 0;
275 +}
276 +
277 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
278 +{
279 + int extsize = crypto_alg_extsize(alg);
280 +
281 + if (alg->cra_type != &crypto_acomp_type)
282 + extsize += sizeof(struct crypto_scomp *);
283 +
284 + return extsize;
285 +}
286 +
287 +static const struct crypto_type crypto_acomp_type = {
288 + .extsize = crypto_acomp_extsize,
289 + .init_tfm = crypto_acomp_init_tfm,
290 +#ifdef CONFIG_PROC_FS
291 + .show = crypto_acomp_show,
292 +#endif
293 + .report = crypto_acomp_report,
294 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
295 + .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
296 + .type = CRYPTO_ALG_TYPE_ACOMPRESS,
297 + .tfmsize = offsetof(struct crypto_acomp, base),
298 +};
299 +
300 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
301 + u32 mask)
302 +{
303 + return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
304 +}
305 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
306 +
307 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
308 +{
309 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
310 + struct acomp_req *req;
311 +
312 + req = __acomp_request_alloc(acomp);
313 + if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
314 + return crypto_acomp_scomp_alloc_ctx(req);
315 +
316 + return req;
317 +}
318 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
319 +
320 +void acomp_request_free(struct acomp_req *req)
321 +{
322 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
323 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
324 +
325 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
326 + crypto_acomp_scomp_free_ctx(req);
327 +
328 + if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
329 + acomp->dst_free(req->dst);
330 + req->dst = NULL;
331 + }
332 +
333 + __acomp_request_free(req);
334 +}
335 +EXPORT_SYMBOL_GPL(acomp_request_free);
336 +
337 +int crypto_register_acomp(struct acomp_alg *alg)
338 +{
339 + struct crypto_alg *base = &alg->base;
340 +
341 + base->cra_type = &crypto_acomp_type;
342 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
343 + base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
344 +
345 + return crypto_register_alg(base);
346 +}
347 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
348 +
349 +int crypto_unregister_acomp(struct acomp_alg *alg)
350 +{
351 + return crypto_unregister_alg(&alg->base);
352 +}
353 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
354 +
355 +MODULE_LICENSE("GPL");
356 +MODULE_DESCRIPTION("Asynchronous compression type");
357 --- a/crypto/algboss.c
358 +++ b/crypto/algboss.c
359 @@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struc
360 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
361 type = alg->cra_flags;
362
363 - /* This piece of crap needs to disappear into per-type test hooks. */
364 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
365 - type |= CRYPTO_ALG_TESTED;
366 -#else
367 - if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
368 - CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
369 - ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
370 - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
371 - alg->cra_ablkcipher.ivsize))
372 + /* Do not test internal algorithms. */
373 + if (type & CRYPTO_ALG_INTERNAL)
374 type |= CRYPTO_ALG_TESTED;
375 -#endif
376
377 param->type = type;
378
379 --- a/crypto/crypto_user.c
380 +++ b/crypto/crypto_user.c
381 @@ -112,6 +112,21 @@ nla_put_failure:
382 return -EMSGSIZE;
383 }
384
385 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
386 +{
387 + struct crypto_report_acomp racomp;
388 +
389 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
390 +
391 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
392 + sizeof(struct crypto_report_acomp), &racomp))
393 + goto nla_put_failure;
394 + return 0;
395 +
396 +nla_put_failure:
397 + return -EMSGSIZE;
398 +}
399 +
400 static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
401 {
402 struct crypto_report_akcipher rakcipher;
403 @@ -186,7 +201,11 @@ static int crypto_report_one(struct cryp
404 goto nla_put_failure;
405
406 break;
407 + case CRYPTO_ALG_TYPE_ACOMPRESS:
408 + if (crypto_report_acomp(skb, alg))
409 + goto nla_put_failure;
410
411 + break;
412 case CRYPTO_ALG_TYPE_AKCIPHER:
413 if (crypto_report_akcipher(skb, alg))
414 goto nla_put_failure;
415 --- /dev/null
416 +++ b/crypto/scompress.c
417 @@ -0,0 +1,356 @@
418 +/*
419 + * Synchronous Compression operations
420 + *
421 + * Copyright 2015 LG Electronics Inc.
422 + * Copyright (c) 2016, Intel Corporation
423 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
424 + *
425 + * This program is free software; you can redistribute it and/or modify it
426 + * under the terms of the GNU General Public License as published by the Free
427 + * Software Foundation; either version 2 of the License, or (at your option)
428 + * any later version.
429 + *
430 + */
431 +#include <linux/errno.h>
432 +#include <linux/kernel.h>
433 +#include <linux/module.h>
434 +#include <linux/seq_file.h>
435 +#include <linux/slab.h>
436 +#include <linux/string.h>
437 +#include <linux/crypto.h>
438 +#include <linux/vmalloc.h>
439 +#include <crypto/algapi.h>
440 +#include <linux/cryptouser.h>
441 +#include <net/netlink.h>
442 +#include <linux/scatterlist.h>
443 +#include <crypto/scatterwalk.h>
444 +#include <crypto/internal/acompress.h>
445 +#include <crypto/internal/scompress.h>
446 +#include "internal.h"
447 +
448 +static const struct crypto_type crypto_scomp_type;
449 +static void * __percpu *scomp_src_scratches;
450 +static void * __percpu *scomp_dst_scratches;
451 +static int scomp_scratch_users;
452 +static DEFINE_MUTEX(scomp_lock);
453 +
454 +#ifdef CONFIG_NET
455 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
456 +{
457 + struct crypto_report_comp rscomp;
458 +
459 + strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
460 +
461 + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
462 + sizeof(struct crypto_report_comp), &rscomp))
463 + goto nla_put_failure;
464 + return 0;
465 +
466 +nla_put_failure:
467 + return -EMSGSIZE;
468 +}
469 +#else
470 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
471 +{
472 + return -ENOSYS;
473 +}
474 +#endif
475 +
476 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
477 + __attribute__ ((unused));
478 +
479 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
480 +{
481 + seq_puts(m, "type : scomp\n");
482 +}
483 +
484 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
485 +{
486 + return 0;
487 +}
488 +
489 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
490 +{
491 + int i;
492 +
493 + if (!scratches)
494 + return;
495 +
496 + for_each_possible_cpu(i)
497 + vfree(*per_cpu_ptr(scratches, i));
498 +
499 + free_percpu(scratches);
500 +}
501 +
502 +static void * __percpu *crypto_scomp_alloc_scratches(void)
503 +{
504 + void * __percpu *scratches;
505 + int i;
506 +
507 + scratches = alloc_percpu(void *);
508 + if (!scratches)
509 + return NULL;
510 +
511 + for_each_possible_cpu(i) {
512 + void *scratch;
513 +
514 + scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
515 + if (!scratch)
516 + goto error;
517 + *per_cpu_ptr(scratches, i) = scratch;
518 + }
519 +
520 + return scratches;
521 +
522 +error:
523 + crypto_scomp_free_scratches(scratches);
524 + return NULL;
525 +}
526 +
527 +static void crypto_scomp_free_all_scratches(void)
528 +{
529 + if (!--scomp_scratch_users) {
530 + crypto_scomp_free_scratches(scomp_src_scratches);
531 + crypto_scomp_free_scratches(scomp_dst_scratches);
532 + scomp_src_scratches = NULL;
533 + scomp_dst_scratches = NULL;
534 + }
535 +}
536 +
537 +static int crypto_scomp_alloc_all_scratches(void)
538 +{
539 + if (!scomp_scratch_users++) {
540 + scomp_src_scratches = crypto_scomp_alloc_scratches();
541 + if (!scomp_src_scratches)
542 + return -ENOMEM;
543 + scomp_dst_scratches = crypto_scomp_alloc_scratches();
544 + if (!scomp_dst_scratches)
545 + return -ENOMEM;
546 + }
547 + return 0;
548 +}
549 +
550 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
551 +{
552 + int i, n;
553 + struct page *page;
554 +
555 + if (!sgl)
556 + return;
557 +
558 + n = sg_nents(sgl);
559 + for_each_sg(sgl, sgl, n, i) {
560 + page = sg_page(sgl);
561 + if (page)
562 + __free_page(page);
563 + }
564 +
565 + kfree(sgl);
566 +}
567 +
568 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
569 +{
570 + struct scatterlist *sgl;
571 + struct page *page;
572 + int i, n;
573 +
574 + n = ((size - 1) >> PAGE_SHIFT) + 1;
575 +
576 + sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
577 + if (!sgl)
578 + return NULL;
579 +
580 + sg_init_table(sgl, n);
581 +
582 + for (i = 0; i < n; i++) {
583 + page = alloc_page(gfp);
584 + if (!page)
585 + goto err;
586 + sg_set_page(sgl + i, page, PAGE_SIZE, 0);
587 + }
588 +
589 + return sgl;
590 +
591 +err:
592 + sg_mark_end(sgl + i);
593 + crypto_scomp_sg_free(sgl);
594 + return NULL;
595 +}
596 +
597 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
598 +{
599 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
600 + void **tfm_ctx = acomp_tfm_ctx(tfm);
601 + struct crypto_scomp *scomp = *tfm_ctx;
602 + void **ctx = acomp_request_ctx(req);
603 + const int cpu = get_cpu();
604 + u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
605 + u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
606 + int ret;
607 +
608 + if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
609 + ret = -EINVAL;
610 + goto out;
611 + }
612 +
613 + if (req->dst && !req->dlen) {
614 + ret = -EINVAL;
615 + goto out;
616 + }
617 +
618 + if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
619 + req->dlen = SCOMP_SCRATCH_SIZE;
620 +
621 + scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
622 + if (dir)
623 + ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
624 + scratch_dst, &req->dlen, *ctx);
625 + else
626 + ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
627 + scratch_dst, &req->dlen, *ctx);
628 + if (!ret) {
629 + if (!req->dst) {
630 + req->dst = crypto_scomp_sg_alloc(req->dlen,
631 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
632 + GFP_KERNEL : GFP_ATOMIC);
633 + if (!req->dst)
634 + goto out;
635 + }
636 + scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
637 + 1);
638 + }
639 +out:
640 + put_cpu();
641 + return ret;
642 +}
643 +
644 +static int scomp_acomp_compress(struct acomp_req *req)
645 +{
646 + return scomp_acomp_comp_decomp(req, 1);
647 +}
648 +
649 +static int scomp_acomp_decompress(struct acomp_req *req)
650 +{
651 + return scomp_acomp_comp_decomp(req, 0);
652 +}
653 +
654 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
655 +{
656 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
657 +
658 + crypto_free_scomp(*ctx);
659 +}
660 +
661 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
662 +{
663 + struct crypto_alg *calg = tfm->__crt_alg;
664 + struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
665 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
666 + struct crypto_scomp *scomp;
667 +
668 + if (!crypto_mod_get(calg))
669 + return -EAGAIN;
670 +
671 + scomp = crypto_create_tfm(calg, &crypto_scomp_type);
672 + if (IS_ERR(scomp)) {
673 + crypto_mod_put(calg);
674 + return PTR_ERR(scomp);
675 + }
676 +
677 + *ctx = scomp;
678 + tfm->exit = crypto_exit_scomp_ops_async;
679 +
680 + crt->compress = scomp_acomp_compress;
681 + crt->decompress = scomp_acomp_decompress;
682 + crt->dst_free = crypto_scomp_sg_free;
683 + crt->reqsize = sizeof(void *);
684 +
685 + return 0;
686 +}
687 +
688 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
689 +{
690 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
691 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
692 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
693 + struct crypto_scomp *scomp = *tfm_ctx;
694 + void *ctx;
695 +
696 + ctx = crypto_scomp_alloc_ctx(scomp);
697 + if (IS_ERR(ctx)) {
698 + kfree(req);
699 + return NULL;
700 + }
701 +
702 + *req->__ctx = ctx;
703 +
704 + return req;
705 +}
706 +
707 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
708 +{
709 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
710 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
711 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
712 + struct crypto_scomp *scomp = *tfm_ctx;
713 + void *ctx = *req->__ctx;
714 +
715 + if (ctx)
716 + crypto_scomp_free_ctx(scomp, ctx);
717 +}
718 +
719 +static const struct crypto_type crypto_scomp_type = {
720 + .extsize = crypto_alg_extsize,
721 + .init_tfm = crypto_scomp_init_tfm,
722 +#ifdef CONFIG_PROC_FS
723 + .show = crypto_scomp_show,
724 +#endif
725 + .report = crypto_scomp_report,
726 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
727 + .maskset = CRYPTO_ALG_TYPE_MASK,
728 + .type = CRYPTO_ALG_TYPE_SCOMPRESS,
729 + .tfmsize = offsetof(struct crypto_scomp, base),
730 +};
731 +
732 +int crypto_register_scomp(struct scomp_alg *alg)
733 +{
734 + struct crypto_alg *base = &alg->base;
735 + int ret = -ENOMEM;
736 +
737 + mutex_lock(&scomp_lock);
738 + if (crypto_scomp_alloc_all_scratches())
739 + goto error;
740 +
741 + base->cra_type = &crypto_scomp_type;
742 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
743 + base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
744 +
745 + ret = crypto_register_alg(base);
746 + if (ret)
747 + goto error;
748 +
749 + mutex_unlock(&scomp_lock);
750 + return ret;
751 +
752 +error:
753 + crypto_scomp_free_all_scratches();
754 + mutex_unlock(&scomp_lock);
755 + return ret;
756 +}
757 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
758 +
759 +int crypto_unregister_scomp(struct scomp_alg *alg)
760 +{
761 + int ret;
762 +
763 + mutex_lock(&scomp_lock);
764 + ret = crypto_unregister_alg(&alg->base);
765 + crypto_scomp_free_all_scratches();
766 + mutex_unlock(&scomp_lock);
767 +
768 + return ret;
769 +}
770 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
771 +
772 +MODULE_LICENSE("GPL");
773 +MODULE_DESCRIPTION("Synchronous compression type");
774 --- a/crypto/tcrypt.c
775 +++ b/crypto/tcrypt.c
776 @@ -74,7 +74,7 @@ static char *check[] = {
777 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
778 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
779 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
780 - NULL
781 + "rsa", NULL
782 };
783
784 struct tcrypt_result {
785 @@ -1336,6 +1336,10 @@ static int do_test(const char *alg, u32
786 ret += tcrypt_test("hmac(sha3-512)");
787 break;
788
789 + case 115:
790 + ret += tcrypt_test("rsa");
791 + break;
792 +
793 case 150:
794 ret += tcrypt_test("ansi_cprng");
795 break;
796 @@ -1397,6 +1401,9 @@ static int do_test(const char *alg, u32
797 case 190:
798 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
799 break;
800 + case 191:
801 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
802 + break;
803 case 200:
804 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
805 speed_template_16_24_32);
806 @@ -1411,9 +1418,9 @@ static int do_test(const char *alg, u32
807 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
808 speed_template_32_40_48);
809 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
810 - speed_template_32_48_64);
811 + speed_template_32_64);
812 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
813 - speed_template_32_48_64);
814 + speed_template_32_64);
815 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
816 speed_template_16_24_32);
817 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
818 @@ -1844,9 +1851,9 @@ static int do_test(const char *alg, u32
819 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
820 speed_template_32_40_48);
821 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
822 - speed_template_32_48_64);
823 + speed_template_32_64);
824 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
825 - speed_template_32_48_64);
826 + speed_template_32_64);
827 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
828 speed_template_16_24_32);
829 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
830 --- a/crypto/testmgr.c
831 +++ b/crypto/testmgr.c
832 @@ -33,6 +33,7 @@
833 #include <crypto/drbg.h>
834 #include <crypto/akcipher.h>
835 #include <crypto/kpp.h>
836 +#include <crypto/acompress.h>
837
838 #include "internal.h"
839
840 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const c
841 */
842 #define IDX1 32
843 #define IDX2 32400
844 -#define IDX3 1
845 +#define IDX3 1511
846 #define IDX4 8193
847 #define IDX5 22222
848 #define IDX6 17101
849 @@ -82,47 +83,54 @@ struct tcrypt_result {
850
851 struct aead_test_suite {
852 struct {
853 - struct aead_testvec *vecs;
854 + const struct aead_testvec *vecs;
855 unsigned int count;
856 } enc, dec;
857 };
858
859 struct cipher_test_suite {
860 struct {
861 - struct cipher_testvec *vecs;
862 + const struct cipher_testvec *vecs;
863 unsigned int count;
864 } enc, dec;
865 };
866
867 struct comp_test_suite {
868 struct {
869 - struct comp_testvec *vecs;
870 + const struct comp_testvec *vecs;
871 unsigned int count;
872 } comp, decomp;
873 };
874
875 struct hash_test_suite {
876 - struct hash_testvec *vecs;
877 + const struct hash_testvec *vecs;
878 unsigned int count;
879 };
880
881 struct cprng_test_suite {
882 - struct cprng_testvec *vecs;
883 + const struct cprng_testvec *vecs;
884 unsigned int count;
885 };
886
887 struct drbg_test_suite {
888 - struct drbg_testvec *vecs;
889 + const struct drbg_testvec *vecs;
890 unsigned int count;
891 };
892
893 +struct tls_test_suite {
894 + struct {
895 + struct tls_testvec *vecs;
896 + unsigned int count;
897 + } enc, dec;
898 +};
899 +
900 struct akcipher_test_suite {
901 - struct akcipher_testvec *vecs;
902 + const struct akcipher_testvec *vecs;
903 unsigned int count;
904 };
905
906 struct kpp_test_suite {
907 - struct kpp_testvec *vecs;
908 + const struct kpp_testvec *vecs;
909 unsigned int count;
910 };
911
912 @@ -139,12 +147,14 @@ struct alg_test_desc {
913 struct hash_test_suite hash;
914 struct cprng_test_suite cprng;
915 struct drbg_test_suite drbg;
916 + struct tls_test_suite tls;
917 struct akcipher_test_suite akcipher;
918 struct kpp_test_suite kpp;
919 } suite;
920 };
921
922 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
923 +static const unsigned int IDX[8] = {
924 + IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
925
926 static void hexdump(unsigned char *buf, unsigned int len)
927 {
928 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_r
929 }
930
931 static int ahash_partial_update(struct ahash_request **preq,
932 - struct crypto_ahash *tfm, struct hash_testvec *template,
933 + struct crypto_ahash *tfm, const struct hash_testvec *template,
934 void *hash_buff, int k, int temp, struct scatterlist *sg,
935 const char *algo, char *result, struct tcrypt_result *tresult)
936 {
937 @@ -259,11 +269,12 @@ out_nostate:
938 return ret;
939 }
940
941 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
942 - unsigned int tcount, bool use_digest,
943 - const int align_offset)
944 +static int __test_hash(struct crypto_ahash *tfm,
945 + const struct hash_testvec *template, unsigned int tcount,
946 + bool use_digest, const int align_offset)
947 {
948 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
949 + size_t digest_size = crypto_ahash_digestsize(tfm);
950 unsigned int i, j, k, temp;
951 struct scatterlist sg[8];
952 char *result;
953 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_aha
954 char *xbuf[XBUFSIZE];
955 int ret = -ENOMEM;
956
957 - result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
958 + result = kmalloc(digest_size, GFP_KERNEL);
959 if (!result)
960 return ret;
961 key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
962 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_aha
963 goto out;
964
965 j++;
966 - memset(result, 0, MAX_DIGEST_SIZE);
967 + memset(result, 0, digest_size);
968
969 hash_buff = xbuf[0];
970 hash_buff += align_offset;
971 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_aha
972 continue;
973
974 j++;
975 - memset(result, 0, MAX_DIGEST_SIZE);
976 + memset(result, 0, digest_size);
977
978 temp = 0;
979 sg_init_table(sg, template[i].np);
980 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_aha
981 continue;
982
983 j++;
984 - memset(result, 0, MAX_DIGEST_SIZE);
985 + memset(result, 0, digest_size);
986
987 ret = -EINVAL;
988 hash_buff = xbuf[0];
989 @@ -536,7 +547,8 @@ out_nobuf:
990 return ret;
991 }
992
993 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
994 +static int test_hash(struct crypto_ahash *tfm,
995 + const struct hash_testvec *template,
996 unsigned int tcount, bool use_digest)
997 {
998 unsigned int alignmask;
999 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash
1000 }
1001
1002 static int __test_aead(struct crypto_aead *tfm, int enc,
1003 - struct aead_testvec *template, unsigned int tcount,
1004 + const struct aead_testvec *template, unsigned int tcount,
1005 const bool diff_dst, const int align_offset)
1006 {
1007 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1008 @@ -955,7 +967,7 @@ out_noxbuf:
1009 }
1010
1011 static int test_aead(struct crypto_aead *tfm, int enc,
1012 - struct aead_testvec *template, unsigned int tcount)
1013 + const struct aead_testvec *template, unsigned int tcount)
1014 {
1015 unsigned int alignmask;
1016 int ret;
1017 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead
1018 return 0;
1019 }
1020
1021 +static int __test_tls(struct crypto_aead *tfm, int enc,
1022 + struct tls_testvec *template, unsigned int tcount,
1023 + const bool diff_dst)
1024 +{
1025 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1026 + unsigned int i, k, authsize;
1027 + char *q;
1028 + struct aead_request *req;
1029 + struct scatterlist *sg;
1030 + struct scatterlist *sgout;
1031 + const char *e, *d;
1032 + struct tcrypt_result result;
1033 + void *input;
1034 + void *output;
1035 + void *assoc;
1036 + char *iv;
1037 + char *key;
1038 + char *xbuf[XBUFSIZE];
1039 + char *xoutbuf[XBUFSIZE];
1040 + char *axbuf[XBUFSIZE];
1041 + int ret = -ENOMEM;
1042 +
1043 + if (testmgr_alloc_buf(xbuf))
1044 + goto out_noxbuf;
1045 +
1046 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
1047 + goto out_nooutbuf;
1048 +
1049 + if (testmgr_alloc_buf(axbuf))
1050 + goto out_noaxbuf;
1051 +
1052 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1053 + if (!iv)
1054 + goto out_noiv;
1055 +
1056 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1057 + if (!key)
1058 + goto out_nokey;
1059 +
1060 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1061 + if (!sg)
1062 + goto out_nosg;
1063 +
1064 + sgout = sg + 8;
1065 +
1066 + d = diff_dst ? "-ddst" : "";
1067 + e = enc ? "encryption" : "decryption";
1068 +
1069 + init_completion(&result.completion);
1070 +
1071 + req = aead_request_alloc(tfm, GFP_KERNEL);
1072 + if (!req) {
1073 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
1074 + d, algo);
1075 + goto out;
1076 + }
1077 +
1078 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1079 + tcrypt_complete, &result);
1080 +
1081 + for (i = 0; i < tcount; i++) {
1082 + input = xbuf[0];
1083 + assoc = axbuf[0];
1084 +
1085 + ret = -EINVAL;
1086 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1087 + template[i].alen > PAGE_SIZE))
1088 + goto out;
1089 +
1090 + memcpy(assoc, template[i].assoc, template[i].alen);
1091 + memcpy(input, template[i].input, template[i].ilen);
1092 +
1093 + if (template[i].iv)
1094 + memcpy(iv, template[i].iv, MAX_IVLEN);
1095 + else
1096 + memset(iv, 0, MAX_IVLEN);
1097 +
1098 + crypto_aead_clear_flags(tfm, ~0);
1099 +
1100 + if (template[i].klen > MAX_KEYLEN) {
1101 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1102 + d, i, algo, template[i].klen, MAX_KEYLEN);
1103 + ret = -EINVAL;
1104 + goto out;
1105 + }
1106 + memcpy(key, template[i].key, template[i].klen);
1107 +
1108 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
1109 + if (!ret == template[i].fail) {
1110 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1111 + d, i, algo, crypto_aead_get_flags(tfm));
1112 + goto out;
1113 + } else if (ret)
1114 + continue;
1115 +
1116 + authsize = 20;
1117 + ret = crypto_aead_setauthsize(tfm, authsize);
1118 + if (ret) {
1119 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1120 + d, authsize, i, algo);
1121 + goto out;
1122 + }
1123 +
1124 + k = !!template[i].alen;
1125 + sg_init_table(sg, k + 1);
1126 + sg_set_buf(&sg[0], assoc, template[i].alen);
1127 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1128 + template[i].ilen));
1129 + output = input;
1130 +
1131 + if (diff_dst) {
1132 + sg_init_table(sgout, k + 1);
1133 + sg_set_buf(&sgout[0], assoc, template[i].alen);
1134 +
1135 + output = xoutbuf[0];
1136 + sg_set_buf(&sgout[k], output,
1137 + (enc ? template[i].rlen : template[i].ilen));
1138 + }
1139 +
1140 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1141 + template[i].ilen, iv);
1142 +
1143 + aead_request_set_ad(req, template[i].alen);
1144 +
1145 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1146 +
1147 + switch (ret) {
1148 + case 0:
1149 + if (template[i].novrfy) {
1150 + /* verification was supposed to fail */
1151 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1152 + d, e, i, algo);
1153 + /* so really, we got a bad message */
1154 + ret = -EBADMSG;
1155 + goto out;
1156 + }
1157 + break;
1158 + case -EINPROGRESS:
1159 + case -EBUSY:
1160 + wait_for_completion(&result.completion);
1161 + reinit_completion(&result.completion);
1162 + ret = result.err;
1163 + if (!ret)
1164 + break;
1165 + case -EBADMSG:
1166 + /* verification failure was expected */
1167 + if (template[i].novrfy)
1168 + continue;
1169 + /* fall through */
1170 + default:
1171 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1172 + d, e, i, algo, -ret);
1173 + goto out;
1174 + }
1175 +
1176 + q = output;
1177 + if (memcmp(q, template[i].result, template[i].rlen)) {
1178 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1179 + d, i, e, algo);
1180 + hexdump(q, template[i].rlen);
1181 + pr_err("should be:\n");
1182 + hexdump(template[i].result, template[i].rlen);
1183 + ret = -EINVAL;
1184 + goto out;
1185 + }
1186 + }
1187 +
1188 +out:
1189 + aead_request_free(req);
1190 +
1191 + kfree(sg);
1192 +out_nosg:
1193 + kfree(key);
1194 +out_nokey:
1195 + kfree(iv);
1196 +out_noiv:
1197 + testmgr_free_buf(axbuf);
1198 +out_noaxbuf:
1199 + if (diff_dst)
1200 + testmgr_free_buf(xoutbuf);
1201 +out_nooutbuf:
1202 + testmgr_free_buf(xbuf);
1203 +out_noxbuf:
1204 + return ret;
1205 +}
1206 +
1207 +static int test_tls(struct crypto_aead *tfm, int enc,
1208 + struct tls_testvec *template, unsigned int tcount)
1209 +{
1210 + int ret;
1211 + /* test 'dst == src' case */
1212 + ret = __test_tls(tfm, enc, template, tcount, false);
1213 + if (ret)
1214 + return ret;
1215 + /* test 'dst != src' case */
1216 + return __test_tls(tfm, enc, template, tcount, true);
1217 +}
1218 +
1219 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1220 + u32 type, u32 mask)
1221 +{
1222 + struct crypto_aead *tfm;
1223 + int err = 0;
1224 +
1225 + tfm = crypto_alloc_aead(driver, type, mask);
1226 + if (IS_ERR(tfm)) {
1227 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1228 + driver, PTR_ERR(tfm));
1229 + return PTR_ERR(tfm);
1230 + }
1231 +
1232 + if (desc->suite.tls.enc.vecs) {
1233 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1234 + desc->suite.tls.enc.count);
1235 + if (err)
1236 + goto out;
1237 + }
1238 +
1239 + if (!err && desc->suite.tls.dec.vecs)
1240 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1241 + desc->suite.tls.dec.count);
1242 +
1243 +out:
1244 + crypto_free_aead(tfm);
1245 + return err;
1246 +}
1247 +
1248 static int test_cipher(struct crypto_cipher *tfm, int enc,
1249 - struct cipher_testvec *template, unsigned int tcount)
1250 + const struct cipher_testvec *template,
1251 + unsigned int tcount)
1252 {
1253 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1254 unsigned int i, j, k;
1255 @@ -1066,7 +1306,8 @@ out_nobuf:
1256 }
1257
1258 static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1259 - struct cipher_testvec *template, unsigned int tcount,
1260 + const struct cipher_testvec *template,
1261 + unsigned int tcount,
1262 const bool diff_dst, const int align_offset)
1263 {
1264 const char *algo =
1265 @@ -1079,12 +1320,16 @@ static int __test_skcipher(struct crypto
1266 const char *e, *d;
1267 struct tcrypt_result result;
1268 void *data;
1269 - char iv[MAX_IVLEN];
1270 + char *iv;
1271 char *xbuf[XBUFSIZE];
1272 char *xoutbuf[XBUFSIZE];
1273 int ret = -ENOMEM;
1274 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1275
1276 + iv = kmalloc(MAX_IVLEN, GFP_KERNEL);
1277 + if (!iv)
1278 + return ret;
1279 +
1280 if (testmgr_alloc_buf(xbuf))
1281 goto out_nobuf;
1282
1283 @@ -1325,12 +1570,14 @@ out:
1284 testmgr_free_buf(xoutbuf);
1285 out_nooutbuf:
1286 testmgr_free_buf(xbuf);
1287 + kfree(iv);
1288 out_nobuf:
1289 return ret;
1290 }
1291
1292 static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1293 - struct cipher_testvec *template, unsigned int tcount)
1294 + const struct cipher_testvec *template,
1295 + unsigned int tcount)
1296 {
1297 unsigned int alignmask;
1298 int ret;
1299 @@ -1362,8 +1609,10 @@ static int test_skcipher(struct crypto_s
1300 return 0;
1301 }
1302
1303 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1304 - struct comp_testvec *dtemplate, int ctcount, int dtcount)
1305 +static int test_comp(struct crypto_comp *tfm,
1306 + const struct comp_testvec *ctemplate,
1307 + const struct comp_testvec *dtemplate,
1308 + int ctcount, int dtcount)
1309 {
1310 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1311 unsigned int i;
1312 @@ -1442,7 +1691,154 @@ out:
1313 return ret;
1314 }
1315
1316 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1317 +static int test_acomp(struct crypto_acomp *tfm,
1318 + const struct comp_testvec *ctemplate,
1319 + const struct comp_testvec *dtemplate,
1320 + int ctcount, int dtcount)
1321 +{
1322 + const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1323 + unsigned int i;
1324 + char *output;
1325 + int ret;
1326 + struct scatterlist src, dst;
1327 + struct acomp_req *req;
1328 + struct tcrypt_result result;
1329 +
1330 + output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1331 + if (!output)
1332 + return -ENOMEM;
1333 +
1334 + for (i = 0; i < ctcount; i++) {
1335 + unsigned int dlen = COMP_BUF_SIZE;
1336 + int ilen = ctemplate[i].inlen;
1337 + void *input_vec;
1338 +
1339 + input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1340 + if (!input_vec) {
1341 + ret = -ENOMEM;
1342 + goto out;
1343 + }
1344 +
1345 + memset(output, 0, dlen);
1346 + init_completion(&result.completion);
1347 + sg_init_one(&src, input_vec, ilen);
1348 + sg_init_one(&dst, output, dlen);
1349 +
1350 + req = acomp_request_alloc(tfm);
1351 + if (!req) {
1352 + pr_err("alg: acomp: request alloc failed for %s\n",
1353 + algo);
1354 + kfree(input_vec);
1355 + ret = -ENOMEM;
1356 + goto out;
1357 + }
1358 +
1359 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1360 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1361 + tcrypt_complete, &result);
1362 +
1363 + ret = wait_async_op(&result, crypto_acomp_compress(req));
1364 + if (ret) {
1365 + pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1366 + i + 1, algo, -ret);
1367 + kfree(input_vec);
1368 + acomp_request_free(req);
1369 + goto out;
1370 + }
1371 +
1372 + if (req->dlen != ctemplate[i].outlen) {
1373 + pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1374 + i + 1, algo, req->dlen);
1375 + ret = -EINVAL;
1376 + kfree(input_vec);
1377 + acomp_request_free(req);
1378 + goto out;
1379 + }
1380 +
1381 + if (memcmp(output, ctemplate[i].output, req->dlen)) {
1382 + pr_err("alg: acomp: Compression test %d failed for %s\n",
1383 + i + 1, algo);
1384 + hexdump(output, req->dlen);
1385 + ret = -EINVAL;
1386 + kfree(input_vec);
1387 + acomp_request_free(req);
1388 + goto out;
1389 + }
1390 +
1391 + kfree(input_vec);
1392 + acomp_request_free(req);
1393 + }
1394 +
1395 + for (i = 0; i < dtcount; i++) {
1396 + unsigned int dlen = COMP_BUF_SIZE;
1397 + int ilen = dtemplate[i].inlen;
1398 + void *input_vec;
1399 +
1400 + input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1401 + if (!input_vec) {
1402 + ret = -ENOMEM;
1403 + goto out;
1404 + }
1405 +
1406 + memset(output, 0, dlen);
1407 + init_completion(&result.completion);
1408 + sg_init_one(&src, input_vec, ilen);
1409 + sg_init_one(&dst, output, dlen);
1410 +
1411 + req = acomp_request_alloc(tfm);
1412 + if (!req) {
1413 + pr_err("alg: acomp: request alloc failed for %s\n",
1414 + algo);
1415 + kfree(input_vec);
1416 + ret = -ENOMEM;
1417 + goto out;
1418 + }
1419 +
1420 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1421 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1422 + tcrypt_complete, &result);
1423 +
1424 + ret = wait_async_op(&result, crypto_acomp_decompress(req));
1425 + if (ret) {
1426 + pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1427 + i + 1, algo, -ret);
1428 + kfree(input_vec);
1429 + acomp_request_free(req);
1430 + goto out;
1431 + }
1432 +
1433 + if (req->dlen != dtemplate[i].outlen) {
1434 + pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1435 + i + 1, algo, req->dlen);
1436 + ret = -EINVAL;
1437 + kfree(input_vec);
1438 + acomp_request_free(req);
1439 + goto out;
1440 + }
1441 +
1442 + if (memcmp(output, dtemplate[i].output, req->dlen)) {
1443 + pr_err("alg: acomp: Decompression test %d failed for %s\n",
1444 + i + 1, algo);
1445 + hexdump(output, req->dlen);
1446 + ret = -EINVAL;
1447 + kfree(input_vec);
1448 + acomp_request_free(req);
1449 + goto out;
1450 + }
1451 +
1452 + kfree(input_vec);
1453 + acomp_request_free(req);
1454 + }
1455 +
1456 + ret = 0;
1457 +
1458 +out:
1459 + kfree(output);
1460 + return ret;
1461 +}
1462 +
1463 +static int test_cprng(struct crypto_rng *tfm,
1464 + const struct cprng_testvec *template,
1465 unsigned int tcount)
1466 {
1467 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1468 @@ -1509,7 +1905,7 @@ static int alg_test_aead(const struct al
1469 struct crypto_aead *tfm;
1470 int err = 0;
1471
1472 - tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1473 + tfm = crypto_alloc_aead(driver, type, mask);
1474 if (IS_ERR(tfm)) {
1475 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1476 "%ld\n", driver, PTR_ERR(tfm));
1477 @@ -1538,7 +1934,7 @@ static int alg_test_cipher(const struct
1478 struct crypto_cipher *tfm;
1479 int err = 0;
1480
1481 - tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1482 + tfm = crypto_alloc_cipher(driver, type, mask);
1483 if (IS_ERR(tfm)) {
1484 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1485 "%s: %ld\n", driver, PTR_ERR(tfm));
1486 @@ -1567,7 +1963,7 @@ static int alg_test_skcipher(const struc
1487 struct crypto_skcipher *tfm;
1488 int err = 0;
1489
1490 - tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1491 + tfm = crypto_alloc_skcipher(driver, type, mask);
1492 if (IS_ERR(tfm)) {
1493 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1494 "%s: %ld\n", driver, PTR_ERR(tfm));
1495 @@ -1593,22 +1989,38 @@ out:
1496 static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1497 u32 type, u32 mask)
1498 {
1499 - struct crypto_comp *tfm;
1500 + struct crypto_comp *comp;
1501 + struct crypto_acomp *acomp;
1502 int err;
1503 + u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1504
1505 - tfm = crypto_alloc_comp(driver, type, mask);
1506 - if (IS_ERR(tfm)) {
1507 - printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1508 - "%ld\n", driver, PTR_ERR(tfm));
1509 - return PTR_ERR(tfm);
1510 - }
1511 + if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1512 + acomp = crypto_alloc_acomp(driver, type, mask);
1513 + if (IS_ERR(acomp)) {
1514 + pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1515 + driver, PTR_ERR(acomp));
1516 + return PTR_ERR(acomp);
1517 + }
1518 + err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1519 + desc->suite.comp.decomp.vecs,
1520 + desc->suite.comp.comp.count,
1521 + desc->suite.comp.decomp.count);
1522 + crypto_free_acomp(acomp);
1523 + } else {
1524 + comp = crypto_alloc_comp(driver, type, mask);
1525 + if (IS_ERR(comp)) {
1526 + pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1527 + driver, PTR_ERR(comp));
1528 + return PTR_ERR(comp);
1529 + }
1530
1531 - err = test_comp(tfm, desc->suite.comp.comp.vecs,
1532 - desc->suite.comp.decomp.vecs,
1533 - desc->suite.comp.comp.count,
1534 - desc->suite.comp.decomp.count);
1535 + err = test_comp(comp, desc->suite.comp.comp.vecs,
1536 + desc->suite.comp.decomp.vecs,
1537 + desc->suite.comp.comp.count,
1538 + desc->suite.comp.decomp.count);
1539
1540 - crypto_free_comp(tfm);
1541 + crypto_free_comp(comp);
1542 + }
1543 return err;
1544 }
1545
1546 @@ -1618,7 +2030,7 @@ static int alg_test_hash(const struct al
1547 struct crypto_ahash *tfm;
1548 int err;
1549
1550 - tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1551 + tfm = crypto_alloc_ahash(driver, type, mask);
1552 if (IS_ERR(tfm)) {
1553 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1554 "%ld\n", driver, PTR_ERR(tfm));
1555 @@ -1646,7 +2058,7 @@ static int alg_test_crc32c(const struct
1556 if (err)
1557 goto out;
1558
1559 - tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1560 + tfm = crypto_alloc_shash(driver, type, mask);
1561 if (IS_ERR(tfm)) {
1562 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1563 "%ld\n", driver, PTR_ERR(tfm));
1564 @@ -1688,7 +2100,7 @@ static int alg_test_cprng(const struct a
1565 struct crypto_rng *rng;
1566 int err;
1567
1568 - rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1569 + rng = crypto_alloc_rng(driver, type, mask);
1570 if (IS_ERR(rng)) {
1571 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1572 "%ld\n", driver, PTR_ERR(rng));
1573 @@ -1703,7 +2115,7 @@ static int alg_test_cprng(const struct a
1574 }
1575
1576
1577 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1578 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1579 const char *driver, u32 type, u32 mask)
1580 {
1581 int ret = -EAGAIN;
1582 @@ -1715,7 +2127,7 @@ static int drbg_cavs_test(struct drbg_te
1583 if (!buf)
1584 return -ENOMEM;
1585
1586 - drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1587 + drng = crypto_alloc_rng(driver, type, mask);
1588 if (IS_ERR(drng)) {
1589 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1590 "%s\n", driver);
1591 @@ -1777,7 +2189,7 @@ static int alg_test_drbg(const struct al
1592 int err = 0;
1593 int pr = 0;
1594 int i = 0;
1595 - struct drbg_testvec *template = desc->suite.drbg.vecs;
1596 + const struct drbg_testvec *template = desc->suite.drbg.vecs;
1597 unsigned int tcount = desc->suite.drbg.count;
1598
1599 if (0 == memcmp(driver, "drbg_pr_", 8))
1600 @@ -1796,7 +2208,7 @@ static int alg_test_drbg(const struct al
1601
1602 }
1603
1604 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1605 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1606 const char *alg)
1607 {
1608 struct kpp_request *req;
1609 @@ -1888,7 +2300,7 @@ free_req:
1610 }
1611
1612 static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1613 - struct kpp_testvec *vecs, unsigned int tcount)
1614 + const struct kpp_testvec *vecs, unsigned int tcount)
1615 {
1616 int ret, i;
1617
1618 @@ -1909,7 +2321,7 @@ static int alg_test_kpp(const struct alg
1619 struct crypto_kpp *tfm;
1620 int err = 0;
1621
1622 - tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1623 + tfm = crypto_alloc_kpp(driver, type, mask);
1624 if (IS_ERR(tfm)) {
1625 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1626 driver, PTR_ERR(tfm));
1627 @@ -1924,7 +2336,7 @@ static int alg_test_kpp(const struct alg
1628 }
1629
1630 static int test_akcipher_one(struct crypto_akcipher *tfm,
1631 - struct akcipher_testvec *vecs)
1632 + const struct akcipher_testvec *vecs)
1633 {
1634 char *xbuf[XBUFSIZE];
1635 struct akcipher_request *req;
1636 @@ -2044,7 +2456,8 @@ free_xbuf:
1637 }
1638
1639 static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1640 - struct akcipher_testvec *vecs, unsigned int tcount)
1641 + const struct akcipher_testvec *vecs,
1642 + unsigned int tcount)
1643 {
1644 const char *algo =
1645 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1646 @@ -2068,7 +2481,7 @@ static int alg_test_akcipher(const struc
1647 struct crypto_akcipher *tfm;
1648 int err = 0;
1649
1650 - tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1651 + tfm = crypto_alloc_akcipher(driver, type, mask);
1652 if (IS_ERR(tfm)) {
1653 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1654 driver, PTR_ERR(tfm));
1655 @@ -2088,112 +2501,23 @@ static int alg_test_null(const struct al
1656 return 0;
1657 }
1658
1659 +#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) }
1660 +
1661 /* Please keep this list sorted by algorithm name. */
1662 static const struct alg_test_desc alg_test_descs[] = {
1663 {
1664 - .alg = "__cbc-cast5-avx",
1665 - .test = alg_test_null,
1666 - }, {
1667 - .alg = "__cbc-cast6-avx",
1668 - .test = alg_test_null,
1669 - }, {
1670 - .alg = "__cbc-serpent-avx",
1671 - .test = alg_test_null,
1672 - }, {
1673 - .alg = "__cbc-serpent-avx2",
1674 - .test = alg_test_null,
1675 - }, {
1676 - .alg = "__cbc-serpent-sse2",
1677 - .test = alg_test_null,
1678 - }, {
1679 - .alg = "__cbc-twofish-avx",
1680 - .test = alg_test_null,
1681 - }, {
1682 - .alg = "__driver-cbc-aes-aesni",
1683 - .test = alg_test_null,
1684 - .fips_allowed = 1,
1685 - }, {
1686 - .alg = "__driver-cbc-camellia-aesni",
1687 - .test = alg_test_null,
1688 - }, {
1689 - .alg = "__driver-cbc-camellia-aesni-avx2",
1690 - .test = alg_test_null,
1691 - }, {
1692 - .alg = "__driver-cbc-cast5-avx",
1693 - .test = alg_test_null,
1694 - }, {
1695 - .alg = "__driver-cbc-cast6-avx",
1696 - .test = alg_test_null,
1697 - }, {
1698 - .alg = "__driver-cbc-serpent-avx",
1699 - .test = alg_test_null,
1700 - }, {
1701 - .alg = "__driver-cbc-serpent-avx2",
1702 - .test = alg_test_null,
1703 - }, {
1704 - .alg = "__driver-cbc-serpent-sse2",
1705 - .test = alg_test_null,
1706 - }, {
1707 - .alg = "__driver-cbc-twofish-avx",
1708 - .test = alg_test_null,
1709 - }, {
1710 - .alg = "__driver-ecb-aes-aesni",
1711 - .test = alg_test_null,
1712 - .fips_allowed = 1,
1713 - }, {
1714 - .alg = "__driver-ecb-camellia-aesni",
1715 - .test = alg_test_null,
1716 - }, {
1717 - .alg = "__driver-ecb-camellia-aesni-avx2",
1718 - .test = alg_test_null,
1719 - }, {
1720 - .alg = "__driver-ecb-cast5-avx",
1721 - .test = alg_test_null,
1722 - }, {
1723 - .alg = "__driver-ecb-cast6-avx",
1724 - .test = alg_test_null,
1725 - }, {
1726 - .alg = "__driver-ecb-serpent-avx",
1727 - .test = alg_test_null,
1728 - }, {
1729 - .alg = "__driver-ecb-serpent-avx2",
1730 - .test = alg_test_null,
1731 - }, {
1732 - .alg = "__driver-ecb-serpent-sse2",
1733 - .test = alg_test_null,
1734 - }, {
1735 - .alg = "__driver-ecb-twofish-avx",
1736 - .test = alg_test_null,
1737 - }, {
1738 - .alg = "__driver-gcm-aes-aesni",
1739 - .test = alg_test_null,
1740 - .fips_allowed = 1,
1741 - }, {
1742 - .alg = "__ghash-pclmulqdqni",
1743 - .test = alg_test_null,
1744 - .fips_allowed = 1,
1745 - }, {
1746 .alg = "ansi_cprng",
1747 .test = alg_test_cprng,
1748 .suite = {
1749 - .cprng = {
1750 - .vecs = ansi_cprng_aes_tv_template,
1751 - .count = ANSI_CPRNG_AES_TEST_VECTORS
1752 - }
1753 + .cprng = __VECS(ansi_cprng_aes_tv_template)
1754 }
1755 }, {
1756 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1757 .test = alg_test_aead,
1758 .suite = {
1759 .aead = {
1760 - .enc = {
1761 - .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1762 - .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1763 - },
1764 - .dec = {
1765 - .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1766 - .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1767 - }
1768 + .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1769 + .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1770 }
1771 }
1772 }, {
1773 @@ -2201,12 +2525,7 @@ static const struct alg_test_desc alg_te
1774 .test = alg_test_aead,
1775 .suite = {
1776 .aead = {
1777 - .enc = {
1778 - .vecs =
1779 - hmac_sha1_aes_cbc_enc_tv_temp,
1780 - .count =
1781 - HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1782 - }
1783 + .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1784 }
1785 }
1786 }, {
1787 @@ -2214,12 +2533,7 @@ static const struct alg_test_desc alg_te
1788 .test = alg_test_aead,
1789 .suite = {
1790 .aead = {
1791 - .enc = {
1792 - .vecs =
1793 - hmac_sha1_des_cbc_enc_tv_temp,
1794 - .count =
1795 - HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1796 - }
1797 + .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1798 }
1799 }
1800 }, {
1801 @@ -2228,12 +2542,7 @@ static const struct alg_test_desc alg_te
1802 .fips_allowed = 1,
1803 .suite = {
1804 .aead = {
1805 - .enc = {
1806 - .vecs =
1807 - hmac_sha1_des3_ede_cbc_enc_tv_temp,
1808 - .count =
1809 - HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1810 - }
1811 + .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1812 }
1813 }
1814 }, {
1815 @@ -2245,18 +2554,8 @@ static const struct alg_test_desc alg_te
1816 .test = alg_test_aead,
1817 .suite = {
1818 .aead = {
1819 - .enc = {
1820 - .vecs =
1821 - hmac_sha1_ecb_cipher_null_enc_tv_temp,
1822 - .count =
1823 - HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1824 - },
1825 - .dec = {
1826 - .vecs =
1827 - hmac_sha1_ecb_cipher_null_dec_tv_temp,
1828 - .count =
1829 - HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1830 - }
1831 + .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1832 + .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1833 }
1834 }
1835 }, {
1836 @@ -2268,12 +2567,7 @@ static const struct alg_test_desc alg_te
1837 .test = alg_test_aead,
1838 .suite = {
1839 .aead = {
1840 - .enc = {
1841 - .vecs =
1842 - hmac_sha224_des_cbc_enc_tv_temp,
1843 - .count =
1844 - HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1845 - }
1846 + .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1847 }
1848 }
1849 }, {
1850 @@ -2282,12 +2576,7 @@ static const struct alg_test_desc alg_te
1851 .fips_allowed = 1,
1852 .suite = {
1853 .aead = {
1854 - .enc = {
1855 - .vecs =
1856 - hmac_sha224_des3_ede_cbc_enc_tv_temp,
1857 - .count =
1858 - HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1859 - }
1860 + .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1861 }
1862 }
1863 }, {
1864 @@ -2296,12 +2585,7 @@ static const struct alg_test_desc alg_te
1865 .fips_allowed = 1,
1866 .suite = {
1867 .aead = {
1868 - .enc = {
1869 - .vecs =
1870 - hmac_sha256_aes_cbc_enc_tv_temp,
1871 - .count =
1872 - HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1873 - }
1874 + .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1875 }
1876 }
1877 }, {
1878 @@ -2309,12 +2593,7 @@ static const struct alg_test_desc alg_te
1879 .test = alg_test_aead,
1880 .suite = {
1881 .aead = {
1882 - .enc = {
1883 - .vecs =
1884 - hmac_sha256_des_cbc_enc_tv_temp,
1885 - .count =
1886 - HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1887 - }
1888 + .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1889 }
1890 }
1891 }, {
1892 @@ -2323,12 +2602,7 @@ static const struct alg_test_desc alg_te
1893 .fips_allowed = 1,
1894 .suite = {
1895 .aead = {
1896 - .enc = {
1897 - .vecs =
1898 - hmac_sha256_des3_ede_cbc_enc_tv_temp,
1899 - .count =
1900 - HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1901 - }
1902 + .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1903 }
1904 }
1905 }, {
1906 @@ -2344,12 +2618,7 @@ static const struct alg_test_desc alg_te
1907 .test = alg_test_aead,
1908 .suite = {
1909 .aead = {
1910 - .enc = {
1911 - .vecs =
1912 - hmac_sha384_des_cbc_enc_tv_temp,
1913 - .count =
1914 - HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1915 - }
1916 + .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1917 }
1918 }
1919 }, {
1920 @@ -2358,12 +2627,7 @@ static const struct alg_test_desc alg_te
1921 .fips_allowed = 1,
1922 .suite = {
1923 .aead = {
1924 - .enc = {
1925 - .vecs =
1926 - hmac_sha384_des3_ede_cbc_enc_tv_temp,
1927 - .count =
1928 - HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1929 - }
1930 + .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1931 }
1932 }
1933 }, {
1934 @@ -2380,12 +2644,7 @@ static const struct alg_test_desc alg_te
1935 .test = alg_test_aead,
1936 .suite = {
1937 .aead = {
1938 - .enc = {
1939 - .vecs =
1940 - hmac_sha512_aes_cbc_enc_tv_temp,
1941 - .count =
1942 - HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1943 - }
1944 + .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1945 }
1946 }
1947 }, {
1948 @@ -2393,12 +2652,7 @@ static const struct alg_test_desc alg_te
1949 .test = alg_test_aead,
1950 .suite = {
1951 .aead = {
1952 - .enc = {
1953 - .vecs =
1954 - hmac_sha512_des_cbc_enc_tv_temp,
1955 - .count =
1956 - HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1957 - }
1958 + .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1959 }
1960 }
1961 }, {
1962 @@ -2407,12 +2661,7 @@ static const struct alg_test_desc alg_te
1963 .fips_allowed = 1,
1964 .suite = {
1965 .aead = {
1966 - .enc = {
1967 - .vecs =
1968 - hmac_sha512_des3_ede_cbc_enc_tv_temp,
1969 - .count =
1970 - HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1971 - }
1972 + .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1973 }
1974 }
1975 }, {
1976 @@ -2429,14 +2678,8 @@ static const struct alg_test_desc alg_te
1977 .fips_allowed = 1,
1978 .suite = {
1979 .cipher = {
1980 - .enc = {
1981 - .vecs = aes_cbc_enc_tv_template,
1982 - .count = AES_CBC_ENC_TEST_VECTORS
1983 - },
1984 - .dec = {
1985 - .vecs = aes_cbc_dec_tv_template,
1986 - .count = AES_CBC_DEC_TEST_VECTORS
1987 - }
1988 + .enc = __VECS(aes_cbc_enc_tv_template),
1989 + .dec = __VECS(aes_cbc_dec_tv_template)
1990 }
1991 }
1992 }, {
1993 @@ -2444,14 +2687,8 @@ static const struct alg_test_desc alg_te
1994 .test = alg_test_skcipher,
1995 .suite = {
1996 .cipher = {
1997 - .enc = {
1998 - .vecs = anubis_cbc_enc_tv_template,
1999 - .count = ANUBIS_CBC_ENC_TEST_VECTORS
2000 - },
2001 - .dec = {
2002 - .vecs = anubis_cbc_dec_tv_template,
2003 - .count = ANUBIS_CBC_DEC_TEST_VECTORS
2004 - }
2005 + .enc = __VECS(anubis_cbc_enc_tv_template),
2006 + .dec = __VECS(anubis_cbc_dec_tv_template)
2007 }
2008 }
2009 }, {
2010 @@ -2459,14 +2696,8 @@ static const struct alg_test_desc alg_te
2011 .test = alg_test_skcipher,
2012 .suite = {
2013 .cipher = {
2014 - .enc = {
2015 - .vecs = bf_cbc_enc_tv_template,
2016 - .count = BF_CBC_ENC_TEST_VECTORS
2017 - },
2018 - .dec = {
2019 - .vecs = bf_cbc_dec_tv_template,
2020 - .count = BF_CBC_DEC_TEST_VECTORS
2021 - }
2022 + .enc = __VECS(bf_cbc_enc_tv_template),
2023 + .dec = __VECS(bf_cbc_dec_tv_template)
2024 }
2025 }
2026 }, {
2027 @@ -2474,14 +2705,8 @@ static const struct alg_test_desc alg_te
2028 .test = alg_test_skcipher,
2029 .suite = {
2030 .cipher = {
2031 - .enc = {
2032 - .vecs = camellia_cbc_enc_tv_template,
2033 - .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2034 - },
2035 - .dec = {
2036 - .vecs = camellia_cbc_dec_tv_template,
2037 - .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2038 - }
2039 + .enc = __VECS(camellia_cbc_enc_tv_template),
2040 + .dec = __VECS(camellia_cbc_dec_tv_template)
2041 }
2042 }
2043 }, {
2044 @@ -2489,14 +2714,8 @@ static const struct alg_test_desc alg_te
2045 .test = alg_test_skcipher,
2046 .suite = {
2047 .cipher = {
2048 - .enc = {
2049 - .vecs = cast5_cbc_enc_tv_template,
2050 - .count = CAST5_CBC_ENC_TEST_VECTORS
2051 - },
2052 - .dec = {
2053 - .vecs = cast5_cbc_dec_tv_template,
2054 - .count = CAST5_CBC_DEC_TEST_VECTORS
2055 - }
2056 + .enc = __VECS(cast5_cbc_enc_tv_template),
2057 + .dec = __VECS(cast5_cbc_dec_tv_template)
2058 }
2059 }
2060 }, {
2061 @@ -2504,14 +2723,8 @@ static const struct alg_test_desc alg_te
2062 .test = alg_test_skcipher,
2063 .suite = {
2064 .cipher = {
2065 - .enc = {
2066 - .vecs = cast6_cbc_enc_tv_template,
2067 - .count = CAST6_CBC_ENC_TEST_VECTORS
2068 - },
2069 - .dec = {
2070 - .vecs = cast6_cbc_dec_tv_template,
2071 - .count = CAST6_CBC_DEC_TEST_VECTORS
2072 - }
2073 + .enc = __VECS(cast6_cbc_enc_tv_template),
2074 + .dec = __VECS(cast6_cbc_dec_tv_template)
2075 }
2076 }
2077 }, {
2078 @@ -2519,14 +2732,8 @@ static const struct alg_test_desc alg_te
2079 .test = alg_test_skcipher,
2080 .suite = {
2081 .cipher = {
2082 - .enc = {
2083 - .vecs = des_cbc_enc_tv_template,
2084 - .count = DES_CBC_ENC_TEST_VECTORS
2085 - },
2086 - .dec = {
2087 - .vecs = des_cbc_dec_tv_template,
2088 - .count = DES_CBC_DEC_TEST_VECTORS
2089 - }
2090 + .enc = __VECS(des_cbc_enc_tv_template),
2091 + .dec = __VECS(des_cbc_dec_tv_template)
2092 }
2093 }
2094 }, {
2095 @@ -2535,14 +2742,8 @@ static const struct alg_test_desc alg_te
2096 .fips_allowed = 1,
2097 .suite = {
2098 .cipher = {
2099 - .enc = {
2100 - .vecs = des3_ede_cbc_enc_tv_template,
2101 - .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2102 - },
2103 - .dec = {
2104 - .vecs = des3_ede_cbc_dec_tv_template,
2105 - .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2106 - }
2107 + .enc = __VECS(des3_ede_cbc_enc_tv_template),
2108 + .dec = __VECS(des3_ede_cbc_dec_tv_template)
2109 }
2110 }
2111 }, {
2112 @@ -2550,14 +2751,8 @@ static const struct alg_test_desc alg_te
2113 .test = alg_test_skcipher,
2114 .suite = {
2115 .cipher = {
2116 - .enc = {
2117 - .vecs = serpent_cbc_enc_tv_template,
2118 - .count = SERPENT_CBC_ENC_TEST_VECTORS
2119 - },
2120 - .dec = {
2121 - .vecs = serpent_cbc_dec_tv_template,
2122 - .count = SERPENT_CBC_DEC_TEST_VECTORS
2123 - }
2124 + .enc = __VECS(serpent_cbc_enc_tv_template),
2125 + .dec = __VECS(serpent_cbc_dec_tv_template)
2126 }
2127 }
2128 }, {
2129 @@ -2565,30 +2760,25 @@ static const struct alg_test_desc alg_te
2130 .test = alg_test_skcipher,
2131 .suite = {
2132 .cipher = {
2133 - .enc = {
2134 - .vecs = tf_cbc_enc_tv_template,
2135 - .count = TF_CBC_ENC_TEST_VECTORS
2136 - },
2137 - .dec = {
2138 - .vecs = tf_cbc_dec_tv_template,
2139 - .count = TF_CBC_DEC_TEST_VECTORS
2140 - }
2141 + .enc = __VECS(tf_cbc_enc_tv_template),
2142 + .dec = __VECS(tf_cbc_dec_tv_template)
2143 }
2144 }
2145 }, {
2146 + .alg = "cbcmac(aes)",
2147 + .fips_allowed = 1,
2148 + .test = alg_test_hash,
2149 + .suite = {
2150 + .hash = __VECS(aes_cbcmac_tv_template)
2151 + }
2152 + }, {
2153 .alg = "ccm(aes)",
2154 .test = alg_test_aead,
2155 .fips_allowed = 1,
2156 .suite = {
2157 .aead = {
2158 - .enc = {
2159 - .vecs = aes_ccm_enc_tv_template,
2160 - .count = AES_CCM_ENC_TEST_VECTORS
2161 - },
2162 - .dec = {
2163 - .vecs = aes_ccm_dec_tv_template,
2164 - .count = AES_CCM_DEC_TEST_VECTORS
2165 - }
2166 + .enc = __VECS(aes_ccm_enc_tv_template),
2167 + .dec = __VECS(aes_ccm_dec_tv_template)
2168 }
2169 }
2170 }, {
2171 @@ -2596,14 +2786,8 @@ static const struct alg_test_desc alg_te
2172 .test = alg_test_skcipher,
2173 .suite = {
2174 .cipher = {
2175 - .enc = {
2176 - .vecs = chacha20_enc_tv_template,
2177 - .count = CHACHA20_ENC_TEST_VECTORS
2178 - },
2179 - .dec = {
2180 - .vecs = chacha20_enc_tv_template,
2181 - .count = CHACHA20_ENC_TEST_VECTORS
2182 - },
2183 + .enc = __VECS(chacha20_enc_tv_template),
2184 + .dec = __VECS(chacha20_enc_tv_template),
2185 }
2186 }
2187 }, {
2188 @@ -2611,20 +2795,14 @@ static const struct alg_test_desc alg_te
2189 .fips_allowed = 1,
2190 .test = alg_test_hash,
2191 .suite = {
2192 - .hash = {
2193 - .vecs = aes_cmac128_tv_template,
2194 - .count = CMAC_AES_TEST_VECTORS
2195 - }
2196 + .hash = __VECS(aes_cmac128_tv_template)
2197 }
2198 }, {
2199 .alg = "cmac(des3_ede)",
2200 .fips_allowed = 1,
2201 .test = alg_test_hash,
2202 .suite = {
2203 - .hash = {
2204 - .vecs = des3_ede_cmac64_tv_template,
2205 - .count = CMAC_DES3_EDE_TEST_VECTORS
2206 - }
2207 + .hash = __VECS(des3_ede_cmac64_tv_template)
2208 }
2209 }, {
2210 .alg = "compress_null",
2211 @@ -2633,94 +2811,30 @@ static const struct alg_test_desc alg_te
2212 .alg = "crc32",
2213 .test = alg_test_hash,
2214 .suite = {
2215 - .hash = {
2216 - .vecs = crc32_tv_template,
2217 - .count = CRC32_TEST_VECTORS
2218 - }
2219 + .hash = __VECS(crc32_tv_template)
2220 }
2221 }, {
2222 .alg = "crc32c",
2223 .test = alg_test_crc32c,
2224 .fips_allowed = 1,
2225 .suite = {
2226 - .hash = {
2227 - .vecs = crc32c_tv_template,
2228 - .count = CRC32C_TEST_VECTORS
2229 - }
2230 + .hash = __VECS(crc32c_tv_template)
2231 }
2232 }, {
2233 .alg = "crct10dif",
2234 .test = alg_test_hash,
2235 .fips_allowed = 1,
2236 .suite = {
2237 - .hash = {
2238 - .vecs = crct10dif_tv_template,
2239 - .count = CRCT10DIF_TEST_VECTORS
2240 - }
2241 + .hash = __VECS(crct10dif_tv_template)
2242 }
2243 }, {
2244 - .alg = "cryptd(__driver-cbc-aes-aesni)",
2245 - .test = alg_test_null,
2246 - .fips_allowed = 1,
2247 - }, {
2248 - .alg = "cryptd(__driver-cbc-camellia-aesni)",
2249 - .test = alg_test_null,
2250 - }, {
2251 - .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2252 - .test = alg_test_null,
2253 - }, {
2254 - .alg = "cryptd(__driver-cbc-serpent-avx2)",
2255 - .test = alg_test_null,
2256 - }, {
2257 - .alg = "cryptd(__driver-ecb-aes-aesni)",
2258 - .test = alg_test_null,
2259 - .fips_allowed = 1,
2260 - }, {
2261 - .alg = "cryptd(__driver-ecb-camellia-aesni)",
2262 - .test = alg_test_null,
2263 - }, {
2264 - .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2265 - .test = alg_test_null,
2266 - }, {
2267 - .alg = "cryptd(__driver-ecb-cast5-avx)",
2268 - .test = alg_test_null,
2269 - }, {
2270 - .alg = "cryptd(__driver-ecb-cast6-avx)",
2271 - .test = alg_test_null,
2272 - }, {
2273 - .alg = "cryptd(__driver-ecb-serpent-avx)",
2274 - .test = alg_test_null,
2275 - }, {
2276 - .alg = "cryptd(__driver-ecb-serpent-avx2)",
2277 - .test = alg_test_null,
2278 - }, {
2279 - .alg = "cryptd(__driver-ecb-serpent-sse2)",
2280 - .test = alg_test_null,
2281 - }, {
2282 - .alg = "cryptd(__driver-ecb-twofish-avx)",
2283 - .test = alg_test_null,
2284 - }, {
2285 - .alg = "cryptd(__driver-gcm-aes-aesni)",
2286 - .test = alg_test_null,
2287 - .fips_allowed = 1,
2288 - }, {
2289 - .alg = "cryptd(__ghash-pclmulqdqni)",
2290 - .test = alg_test_null,
2291 - .fips_allowed = 1,
2292 - }, {
2293 .alg = "ctr(aes)",
2294 .test = alg_test_skcipher,
2295 .fips_allowed = 1,
2296 .suite = {
2297 .cipher = {
2298 - .enc = {
2299 - .vecs = aes_ctr_enc_tv_template,
2300 - .count = AES_CTR_ENC_TEST_VECTORS
2301 - },
2302 - .dec = {
2303 - .vecs = aes_ctr_dec_tv_template,
2304 - .count = AES_CTR_DEC_TEST_VECTORS
2305 - }
2306 + .enc = __VECS(aes_ctr_enc_tv_template),
2307 + .dec = __VECS(aes_ctr_dec_tv_template)
2308 }
2309 }
2310 }, {
2311 @@ -2728,14 +2842,8 @@ static const struct alg_test_desc alg_te
2312 .test = alg_test_skcipher,
2313 .suite = {
2314 .cipher = {
2315 - .enc = {
2316 - .vecs = bf_ctr_enc_tv_template,
2317 - .count = BF_CTR_ENC_TEST_VECTORS
2318 - },
2319 - .dec = {
2320 - .vecs = bf_ctr_dec_tv_template,
2321 - .count = BF_CTR_DEC_TEST_VECTORS
2322 - }
2323 + .enc = __VECS(bf_ctr_enc_tv_template),
2324 + .dec = __VECS(bf_ctr_dec_tv_template)
2325 }
2326 }
2327 }, {
2328 @@ -2743,14 +2851,8 @@ static const struct alg_test_desc alg_te
2329 .test = alg_test_skcipher,
2330 .suite = {
2331 .cipher = {
2332 - .enc = {
2333 - .vecs = camellia_ctr_enc_tv_template,
2334 - .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2335 - },
2336 - .dec = {
2337 - .vecs = camellia_ctr_dec_tv_template,
2338 - .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2339 - }
2340 + .enc = __VECS(camellia_ctr_enc_tv_template),
2341 + .dec = __VECS(camellia_ctr_dec_tv_template)
2342 }
2343 }
2344 }, {
2345 @@ -2758,14 +2860,8 @@ static const struct alg_test_desc alg_te
2346 .test = alg_test_skcipher,
2347 .suite = {
2348 .cipher = {
2349 - .enc = {
2350 - .vecs = cast5_ctr_enc_tv_template,
2351 - .count = CAST5_CTR_ENC_TEST_VECTORS
2352 - },
2353 - .dec = {
2354 - .vecs = cast5_ctr_dec_tv_template,
2355 - .count = CAST5_CTR_DEC_TEST_VECTORS
2356 - }
2357 + .enc = __VECS(cast5_ctr_enc_tv_template),
2358 + .dec = __VECS(cast5_ctr_dec_tv_template)
2359 }
2360 }
2361 }, {
2362 @@ -2773,14 +2869,8 @@ static const struct alg_test_desc alg_te
2363 .test = alg_test_skcipher,
2364 .suite = {
2365 .cipher = {
2366 - .enc = {
2367 - .vecs = cast6_ctr_enc_tv_template,
2368 - .count = CAST6_CTR_ENC_TEST_VECTORS
2369 - },
2370 - .dec = {
2371 - .vecs = cast6_ctr_dec_tv_template,
2372 - .count = CAST6_CTR_DEC_TEST_VECTORS
2373 - }
2374 + .enc = __VECS(cast6_ctr_enc_tv_template),
2375 + .dec = __VECS(cast6_ctr_dec_tv_template)
2376 }
2377 }
2378 }, {
2379 @@ -2788,29 +2878,18 @@ static const struct alg_test_desc alg_te
2380 .test = alg_test_skcipher,
2381 .suite = {
2382 .cipher = {
2383 - .enc = {
2384 - .vecs = des_ctr_enc_tv_template,
2385 - .count = DES_CTR_ENC_TEST_VECTORS
2386 - },
2387 - .dec = {
2388 - .vecs = des_ctr_dec_tv_template,
2389 - .count = DES_CTR_DEC_TEST_VECTORS
2390 - }
2391 + .enc = __VECS(des_ctr_enc_tv_template),
2392 + .dec = __VECS(des_ctr_dec_tv_template)
2393 }
2394 }
2395 }, {
2396 .alg = "ctr(des3_ede)",
2397 .test = alg_test_skcipher,
2398 + .fips_allowed = 1,
2399 .suite = {
2400 .cipher = {
2401 - .enc = {
2402 - .vecs = des3_ede_ctr_enc_tv_template,
2403 - .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2404 - },
2405 - .dec = {
2406 - .vecs = des3_ede_ctr_dec_tv_template,
2407 - .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2408 - }
2409 + .enc = __VECS(des3_ede_ctr_enc_tv_template),
2410 + .dec = __VECS(des3_ede_ctr_dec_tv_template)
2411 }
2412 }
2413 }, {
2414 @@ -2818,14 +2897,8 @@ static const struct alg_test_desc alg_te
2415 .test = alg_test_skcipher,
2416 .suite = {
2417 .cipher = {
2418 - .enc = {
2419 - .vecs = serpent_ctr_enc_tv_template,
2420 - .count = SERPENT_CTR_ENC_TEST_VECTORS
2421 - },
2422 - .dec = {
2423 - .vecs = serpent_ctr_dec_tv_template,
2424 - .count = SERPENT_CTR_DEC_TEST_VECTORS
2425 - }
2426 + .enc = __VECS(serpent_ctr_enc_tv_template),
2427 + .dec = __VECS(serpent_ctr_dec_tv_template)
2428 }
2429 }
2430 }, {
2431 @@ -2833,14 +2906,8 @@ static const struct alg_test_desc alg_te
2432 .test = alg_test_skcipher,
2433 .suite = {
2434 .cipher = {
2435 - .enc = {
2436 - .vecs = tf_ctr_enc_tv_template,
2437 - .count = TF_CTR_ENC_TEST_VECTORS
2438 - },
2439 - .dec = {
2440 - .vecs = tf_ctr_dec_tv_template,
2441 - .count = TF_CTR_DEC_TEST_VECTORS
2442 - }
2443 + .enc = __VECS(tf_ctr_enc_tv_template),
2444 + .dec = __VECS(tf_ctr_dec_tv_template)
2445 }
2446 }
2447 }, {
2448 @@ -2848,14 +2915,8 @@ static const struct alg_test_desc alg_te
2449 .test = alg_test_skcipher,
2450 .suite = {
2451 .cipher = {
2452 - .enc = {
2453 - .vecs = cts_mode_enc_tv_template,
2454 - .count = CTS_MODE_ENC_TEST_VECTORS
2455 - },
2456 - .dec = {
2457 - .vecs = cts_mode_dec_tv_template,
2458 - .count = CTS_MODE_DEC_TEST_VECTORS
2459 - }
2460 + .enc = __VECS(cts_mode_enc_tv_template),
2461 + .dec = __VECS(cts_mode_dec_tv_template)
2462 }
2463 }
2464 }, {
2465 @@ -2864,14 +2925,8 @@ static const struct alg_test_desc alg_te
2466 .fips_allowed = 1,
2467 .suite = {
2468 .comp = {
2469 - .comp = {
2470 - .vecs = deflate_comp_tv_template,
2471 - .count = DEFLATE_COMP_TEST_VECTORS
2472 - },
2473 - .decomp = {
2474 - .vecs = deflate_decomp_tv_template,
2475 - .count = DEFLATE_DECOMP_TEST_VECTORS
2476 - }
2477 + .comp = __VECS(deflate_comp_tv_template),
2478 + .decomp = __VECS(deflate_decomp_tv_template)
2479 }
2480 }
2481 }, {
2482 @@ -2879,10 +2934,7 @@ static const struct alg_test_desc alg_te
2483 .test = alg_test_kpp,
2484 .fips_allowed = 1,
2485 .suite = {
2486 - .kpp = {
2487 - .vecs = dh_tv_template,
2488 - .count = DH_TEST_VECTORS
2489 - }
2490 + .kpp = __VECS(dh_tv_template)
2491 }
2492 }, {
2493 .alg = "digest_null",
2494 @@ -2892,30 +2944,21 @@ static const struct alg_test_desc alg_te
2495 .test = alg_test_drbg,
2496 .fips_allowed = 1,
2497 .suite = {
2498 - .drbg = {
2499 - .vecs = drbg_nopr_ctr_aes128_tv_template,
2500 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2501 - }
2502 + .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2503 }
2504 }, {
2505 .alg = "drbg_nopr_ctr_aes192",
2506 .test = alg_test_drbg,
2507 .fips_allowed = 1,
2508 .suite = {
2509 - .drbg = {
2510 - .vecs = drbg_nopr_ctr_aes192_tv_template,
2511 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2512 - }
2513 + .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2514 }
2515 }, {
2516 .alg = "drbg_nopr_ctr_aes256",
2517 .test = alg_test_drbg,
2518 .fips_allowed = 1,
2519 .suite = {
2520 - .drbg = {
2521 - .vecs = drbg_nopr_ctr_aes256_tv_template,
2522 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2523 - }
2524 + .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2525 }
2526 }, {
2527 /*
2528 @@ -2930,11 +2973,7 @@ static const struct alg_test_desc alg_te
2529 .test = alg_test_drbg,
2530 .fips_allowed = 1,
2531 .suite = {
2532 - .drbg = {
2533 - .vecs = drbg_nopr_hmac_sha256_tv_template,
2534 - .count =
2535 - ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2536 - }
2537 + .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2538 }
2539 }, {
2540 /* covered by drbg_nopr_hmac_sha256 test */
2541 @@ -2954,10 +2993,7 @@ static const struct alg_test_desc alg_te
2542 .test = alg_test_drbg,
2543 .fips_allowed = 1,
2544 .suite = {
2545 - .drbg = {
2546 - .vecs = drbg_nopr_sha256_tv_template,
2547 - .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2548 - }
2549 + .drbg = __VECS(drbg_nopr_sha256_tv_template)
2550 }
2551 }, {
2552 /* covered by drbg_nopr_sha256 test */
2553 @@ -2973,10 +3009,7 @@ static const struct alg_test_desc alg_te
2554 .test = alg_test_drbg,
2555 .fips_allowed = 1,
2556 .suite = {
2557 - .drbg = {
2558 - .vecs = drbg_pr_ctr_aes128_tv_template,
2559 - .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2560 - }
2561 + .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2562 }
2563 }, {
2564 /* covered by drbg_pr_ctr_aes128 test */
2565 @@ -2996,10 +3029,7 @@ static const struct alg_test_desc alg_te
2566 .test = alg_test_drbg,
2567 .fips_allowed = 1,
2568 .suite = {
2569 - .drbg = {
2570 - .vecs = drbg_pr_hmac_sha256_tv_template,
2571 - .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2572 - }
2573 + .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2574 }
2575 }, {
2576 /* covered by drbg_pr_hmac_sha256 test */
2577 @@ -3019,10 +3049,7 @@ static const struct alg_test_desc alg_te
2578 .test = alg_test_drbg,
2579 .fips_allowed = 1,
2580 .suite = {
2581 - .drbg = {
2582 - .vecs = drbg_pr_sha256_tv_template,
2583 - .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2584 - }
2585 + .drbg = __VECS(drbg_pr_sha256_tv_template)
2586 }
2587 }, {
2588 /* covered by drbg_pr_sha256 test */
2589 @@ -3034,23 +3061,13 @@ static const struct alg_test_desc alg_te
2590 .fips_allowed = 1,
2591 .test = alg_test_null,
2592 }, {
2593 - .alg = "ecb(__aes-aesni)",
2594 - .test = alg_test_null,
2595 - .fips_allowed = 1,
2596 - }, {
2597 .alg = "ecb(aes)",
2598 .test = alg_test_skcipher,
2599 .fips_allowed = 1,
2600 .suite = {
2601 .cipher = {
2602 - .enc = {
2603 - .vecs = aes_enc_tv_template,
2604 - .count = AES_ENC_TEST_VECTORS
2605 - },
2606 - .dec = {
2607 - .vecs = aes_dec_tv_template,
2608 - .count = AES_DEC_TEST_VECTORS
2609 - }
2610 + .enc = __VECS(aes_enc_tv_template),
2611 + .dec = __VECS(aes_dec_tv_template)
2612 }
2613 }
2614 }, {
2615 @@ -3058,14 +3075,8 @@ static const struct alg_test_desc alg_te
2616 .test = alg_test_skcipher,
2617 .suite = {
2618 .cipher = {
2619 - .enc = {
2620 - .vecs = anubis_enc_tv_template,
2621 - .count = ANUBIS_ENC_TEST_VECTORS
2622 - },
2623 - .dec = {
2624 - .vecs = anubis_dec_tv_template,
2625 - .count = ANUBIS_DEC_TEST_VECTORS
2626 - }
2627 + .enc = __VECS(anubis_enc_tv_template),
2628 + .dec = __VECS(anubis_dec_tv_template)
2629 }
2630 }
2631 }, {
2632 @@ -3073,14 +3084,8 @@ static const struct alg_test_desc alg_te
2633 .test = alg_test_skcipher,
2634 .suite = {
2635 .cipher = {
2636 - .enc = {
2637 - .vecs = arc4_enc_tv_template,
2638 - .count = ARC4_ENC_TEST_VECTORS
2639 - },
2640 - .dec = {
2641 - .vecs = arc4_dec_tv_template,
2642 - .count = ARC4_DEC_TEST_VECTORS
2643 - }
2644 + .enc = __VECS(arc4_enc_tv_template),
2645 + .dec = __VECS(arc4_dec_tv_template)
2646 }
2647 }
2648 }, {
2649 @@ -3088,14 +3093,8 @@ static const struct alg_test_desc alg_te
2650 .test = alg_test_skcipher,
2651 .suite = {
2652 .cipher = {
2653 - .enc = {
2654 - .vecs = bf_enc_tv_template,
2655 - .count = BF_ENC_TEST_VECTORS
2656 - },
2657 - .dec = {
2658 - .vecs = bf_dec_tv_template,
2659 - .count = BF_DEC_TEST_VECTORS
2660 - }
2661 + .enc = __VECS(bf_enc_tv_template),
2662 + .dec = __VECS(bf_dec_tv_template)
2663 }
2664 }
2665 }, {
2666 @@ -3103,14 +3102,8 @@ static const struct alg_test_desc alg_te
2667 .test = alg_test_skcipher,
2668 .suite = {
2669 .cipher = {
2670 - .enc = {
2671 - .vecs = camellia_enc_tv_template,
2672 - .count = CAMELLIA_ENC_TEST_VECTORS
2673 - },
2674 - .dec = {
2675 - .vecs = camellia_dec_tv_template,
2676 - .count = CAMELLIA_DEC_TEST_VECTORS
2677 - }
2678 + .enc = __VECS(camellia_enc_tv_template),
2679 + .dec = __VECS(camellia_dec_tv_template)
2680 }
2681 }
2682 }, {
2683 @@ -3118,14 +3111,8 @@ static const struct alg_test_desc alg_te
2684 .test = alg_test_skcipher,
2685 .suite = {
2686 .cipher = {
2687 - .enc = {
2688 - .vecs = cast5_enc_tv_template,
2689 - .count = CAST5_ENC_TEST_VECTORS
2690 - },
2691 - .dec = {
2692 - .vecs = cast5_dec_tv_template,
2693 - .count = CAST5_DEC_TEST_VECTORS
2694 - }
2695 + .enc = __VECS(cast5_enc_tv_template),
2696 + .dec = __VECS(cast5_dec_tv_template)
2697 }
2698 }
2699 }, {
2700 @@ -3133,14 +3120,8 @@ static const struct alg_test_desc alg_te
2701 .test = alg_test_skcipher,
2702 .suite = {
2703 .cipher = {
2704 - .enc = {
2705 - .vecs = cast6_enc_tv_template,
2706 - .count = CAST6_ENC_TEST_VECTORS
2707 - },
2708 - .dec = {
2709 - .vecs = cast6_dec_tv_template,
2710 - .count = CAST6_DEC_TEST_VECTORS
2711 - }
2712 + .enc = __VECS(cast6_enc_tv_template),
2713 + .dec = __VECS(cast6_dec_tv_template)
2714 }
2715 }
2716 }, {
2717 @@ -3151,14 +3132,8 @@ static const struct alg_test_desc alg_te
2718 .test = alg_test_skcipher,
2719 .suite = {
2720 .cipher = {
2721 - .enc = {
2722 - .vecs = des_enc_tv_template,
2723 - .count = DES_ENC_TEST_VECTORS
2724 - },
2725 - .dec = {
2726 - .vecs = des_dec_tv_template,
2727 - .count = DES_DEC_TEST_VECTORS
2728 - }
2729 + .enc = __VECS(des_enc_tv_template),
2730 + .dec = __VECS(des_dec_tv_template)
2731 }
2732 }
2733 }, {
2734 @@ -3167,14 +3142,8 @@ static const struct alg_test_desc alg_te
2735 .fips_allowed = 1,
2736 .suite = {
2737 .cipher = {
2738 - .enc = {
2739 - .vecs = des3_ede_enc_tv_template,
2740 - .count = DES3_EDE_ENC_TEST_VECTORS
2741 - },
2742 - .dec = {
2743 - .vecs = des3_ede_dec_tv_template,
2744 - .count = DES3_EDE_DEC_TEST_VECTORS
2745 - }
2746 + .enc = __VECS(des3_ede_enc_tv_template),
2747 + .dec = __VECS(des3_ede_dec_tv_template)
2748 }
2749 }
2750 }, {
2751 @@ -3197,14 +3166,8 @@ static const struct alg_test_desc alg_te
2752 .test = alg_test_skcipher,
2753 .suite = {
2754 .cipher = {
2755 - .enc = {
2756 - .vecs = khazad_enc_tv_template,
2757 - .count = KHAZAD_ENC_TEST_VECTORS
2758 - },
2759 - .dec = {
2760 - .vecs = khazad_dec_tv_template,
2761 - .count = KHAZAD_DEC_TEST_VECTORS
2762 - }
2763 + .enc = __VECS(khazad_enc_tv_template),
2764 + .dec = __VECS(khazad_dec_tv_template)
2765 }
2766 }
2767 }, {
2768 @@ -3212,14 +3175,8 @@ static const struct alg_test_desc alg_te
2769 .test = alg_test_skcipher,
2770 .suite = {
2771 .cipher = {
2772 - .enc = {
2773 - .vecs = seed_enc_tv_template,
2774 - .count = SEED_ENC_TEST_VECTORS
2775 - },
2776 - .dec = {
2777 - .vecs = seed_dec_tv_template,
2778 - .count = SEED_DEC_TEST_VECTORS
2779 - }
2780 + .enc = __VECS(seed_enc_tv_template),
2781 + .dec = __VECS(seed_dec_tv_template)
2782 }
2783 }
2784 }, {
2785 @@ -3227,14 +3184,8 @@ static const struct alg_test_desc alg_te
2786 .test = alg_test_skcipher,
2787 .suite = {
2788 .cipher = {
2789 - .enc = {
2790 - .vecs = serpent_enc_tv_template,
2791 - .count = SERPENT_ENC_TEST_VECTORS
2792 - },
2793 - .dec = {
2794 - .vecs = serpent_dec_tv_template,
2795 - .count = SERPENT_DEC_TEST_VECTORS
2796 - }
2797 + .enc = __VECS(serpent_enc_tv_template),
2798 + .dec = __VECS(serpent_dec_tv_template)
2799 }
2800 }
2801 }, {
2802 @@ -3242,14 +3193,8 @@ static const struct alg_test_desc alg_te
2803 .test = alg_test_skcipher,
2804 .suite = {
2805 .cipher = {
2806 - .enc = {
2807 - .vecs = tea_enc_tv_template,
2808 - .count = TEA_ENC_TEST_VECTORS
2809 - },
2810 - .dec = {
2811 - .vecs = tea_dec_tv_template,
2812 - .count = TEA_DEC_TEST_VECTORS
2813 - }
2814 + .enc = __VECS(tea_enc_tv_template),
2815 + .dec = __VECS(tea_dec_tv_template)
2816 }
2817 }
2818 }, {
2819 @@ -3257,14 +3202,8 @@ static const struct alg_test_desc alg_te
2820 .test = alg_test_skcipher,
2821 .suite = {
2822 .cipher = {
2823 - .enc = {
2824 - .vecs = tnepres_enc_tv_template,
2825 - .count = TNEPRES_ENC_TEST_VECTORS
2826 - },
2827 - .dec = {
2828 - .vecs = tnepres_dec_tv_template,
2829 - .count = TNEPRES_DEC_TEST_VECTORS
2830 - }
2831 + .enc = __VECS(tnepres_enc_tv_template),
2832 + .dec = __VECS(tnepres_dec_tv_template)
2833 }
2834 }
2835 }, {
2836 @@ -3272,14 +3211,8 @@ static const struct alg_test_desc alg_te
2837 .test = alg_test_skcipher,
2838 .suite = {
2839 .cipher = {
2840 - .enc = {
2841 - .vecs = tf_enc_tv_template,
2842 - .count = TF_ENC_TEST_VECTORS
2843 - },
2844 - .dec = {
2845 - .vecs = tf_dec_tv_template,
2846 - .count = TF_DEC_TEST_VECTORS
2847 - }
2848 + .enc = __VECS(tf_enc_tv_template),
2849 + .dec = __VECS(tf_dec_tv_template)
2850 }
2851 }
2852 }, {
2853 @@ -3287,14 +3220,8 @@ static const struct alg_test_desc alg_te
2854 .test = alg_test_skcipher,
2855 .suite = {
2856 .cipher = {
2857 - .enc = {
2858 - .vecs = xeta_enc_tv_template,
2859 - .count = XETA_ENC_TEST_VECTORS
2860 - },
2861 - .dec = {
2862 - .vecs = xeta_dec_tv_template,
2863 - .count = XETA_DEC_TEST_VECTORS
2864 - }
2865 + .enc = __VECS(xeta_enc_tv_template),
2866 + .dec = __VECS(xeta_dec_tv_template)
2867 }
2868 }
2869 }, {
2870 @@ -3302,14 +3229,8 @@ static const struct alg_test_desc alg_te
2871 .test = alg_test_skcipher,
2872 .suite = {
2873 .cipher = {
2874 - .enc = {
2875 - .vecs = xtea_enc_tv_template,
2876 - .count = XTEA_ENC_TEST_VECTORS
2877 - },
2878 - .dec = {
2879 - .vecs = xtea_dec_tv_template,
2880 - .count = XTEA_DEC_TEST_VECTORS
2881 - }
2882 + .enc = __VECS(xtea_enc_tv_template),
2883 + .dec = __VECS(xtea_dec_tv_template)
2884 }
2885 }
2886 }, {
2887 @@ -3317,10 +3238,7 @@ static const struct alg_test_desc alg_te
2888 .test = alg_test_kpp,
2889 .fips_allowed = 1,
2890 .suite = {
2891 - .kpp = {
2892 - .vecs = ecdh_tv_template,
2893 - .count = ECDH_TEST_VECTORS
2894 - }
2895 + .kpp = __VECS(ecdh_tv_template)
2896 }
2897 }, {
2898 .alg = "gcm(aes)",
2899 @@ -3328,14 +3246,8 @@ static const struct alg_test_desc alg_te
2900 .fips_allowed = 1,
2901 .suite = {
2902 .aead = {
2903 - .enc = {
2904 - .vecs = aes_gcm_enc_tv_template,
2905 - .count = AES_GCM_ENC_TEST_VECTORS
2906 - },
2907 - .dec = {
2908 - .vecs = aes_gcm_dec_tv_template,
2909 - .count = AES_GCM_DEC_TEST_VECTORS
2910 - }
2911 + .enc = __VECS(aes_gcm_enc_tv_template),
2912 + .dec = __VECS(aes_gcm_dec_tv_template)
2913 }
2914 }
2915 }, {
2916 @@ -3343,136 +3255,94 @@ static const struct alg_test_desc alg_te
2917 .test = alg_test_hash,
2918 .fips_allowed = 1,
2919 .suite = {
2920 - .hash = {
2921 - .vecs = ghash_tv_template,
2922 - .count = GHASH_TEST_VECTORS
2923 - }
2924 + .hash = __VECS(ghash_tv_template)
2925 }
2926 }, {
2927 .alg = "hmac(crc32)",
2928 .test = alg_test_hash,
2929 .suite = {
2930 - .hash = {
2931 - .vecs = bfin_crc_tv_template,
2932 - .count = BFIN_CRC_TEST_VECTORS
2933 - }
2934 + .hash = __VECS(bfin_crc_tv_template)
2935 }
2936 }, {
2937 .alg = "hmac(md5)",
2938 .test = alg_test_hash,
2939 .suite = {
2940 - .hash = {
2941 - .vecs = hmac_md5_tv_template,
2942 - .count = HMAC_MD5_TEST_VECTORS
2943 - }
2944 + .hash = __VECS(hmac_md5_tv_template)
2945 }
2946 }, {
2947 .alg = "hmac(rmd128)",
2948 .test = alg_test_hash,
2949 .suite = {
2950 - .hash = {
2951 - .vecs = hmac_rmd128_tv_template,
2952 - .count = HMAC_RMD128_TEST_VECTORS
2953 - }
2954 + .hash = __VECS(hmac_rmd128_tv_template)
2955 }
2956 }, {
2957 .alg = "hmac(rmd160)",
2958 .test = alg_test_hash,
2959 .suite = {
2960 - .hash = {
2961 - .vecs = hmac_rmd160_tv_template,
2962 - .count = HMAC_RMD160_TEST_VECTORS
2963 - }
2964 + .hash = __VECS(hmac_rmd160_tv_template)
2965 }
2966 }, {
2967 .alg = "hmac(sha1)",
2968 .test = alg_test_hash,
2969 .fips_allowed = 1,
2970 .suite = {
2971 - .hash = {
2972 - .vecs = hmac_sha1_tv_template,
2973 - .count = HMAC_SHA1_TEST_VECTORS
2974 - }
2975 + .hash = __VECS(hmac_sha1_tv_template)
2976 }
2977 }, {
2978 .alg = "hmac(sha224)",
2979 .test = alg_test_hash,
2980 .fips_allowed = 1,
2981 .suite = {
2982 - .hash = {
2983 - .vecs = hmac_sha224_tv_template,
2984 - .count = HMAC_SHA224_TEST_VECTORS
2985 - }
2986 + .hash = __VECS(hmac_sha224_tv_template)
2987 }
2988 }, {
2989 .alg = "hmac(sha256)",
2990 .test = alg_test_hash,
2991 .fips_allowed = 1,
2992 .suite = {
2993 - .hash = {
2994 - .vecs = hmac_sha256_tv_template,
2995 - .count = HMAC_SHA256_TEST_VECTORS
2996 - }
2997 + .hash = __VECS(hmac_sha256_tv_template)
2998 }
2999 }, {
3000 .alg = "hmac(sha3-224)",
3001 .test = alg_test_hash,
3002 .fips_allowed = 1,
3003 .suite = {
3004 - .hash = {
3005 - .vecs = hmac_sha3_224_tv_template,
3006 - .count = HMAC_SHA3_224_TEST_VECTORS
3007 - }
3008 + .hash = __VECS(hmac_sha3_224_tv_template)
3009 }
3010 }, {
3011 .alg = "hmac(sha3-256)",
3012 .test = alg_test_hash,
3013 .fips_allowed = 1,
3014 .suite = {
3015 - .hash = {
3016 - .vecs = hmac_sha3_256_tv_template,
3017 - .count = HMAC_SHA3_256_TEST_VECTORS
3018 - }
3019 + .hash = __VECS(hmac_sha3_256_tv_template)
3020 }
3021 }, {
3022 .alg = "hmac(sha3-384)",
3023 .test = alg_test_hash,
3024 .fips_allowed = 1,
3025 .suite = {
3026 - .hash = {
3027 - .vecs = hmac_sha3_384_tv_template,
3028 - .count = HMAC_SHA3_384_TEST_VECTORS
3029 - }
3030 + .hash = __VECS(hmac_sha3_384_tv_template)
3031 }
3032 }, {
3033 .alg = "hmac(sha3-512)",
3034 .test = alg_test_hash,
3035 .fips_allowed = 1,
3036 .suite = {
3037 - .hash = {
3038 - .vecs = hmac_sha3_512_tv_template,
3039 - .count = HMAC_SHA3_512_TEST_VECTORS
3040 - }
3041 + .hash = __VECS(hmac_sha3_512_tv_template)
3042 }
3043 }, {
3044 .alg = "hmac(sha384)",
3045 .test = alg_test_hash,
3046 .fips_allowed = 1,
3047 .suite = {
3048 - .hash = {
3049 - .vecs = hmac_sha384_tv_template,
3050 - .count = HMAC_SHA384_TEST_VECTORS
3051 - }
3052 + .hash = __VECS(hmac_sha384_tv_template)
3053 }
3054 }, {
3055 .alg = "hmac(sha512)",
3056 .test = alg_test_hash,
3057 .fips_allowed = 1,
3058 .suite = {
3059 - .hash = {
3060 - .vecs = hmac_sha512_tv_template,
3061 - .count = HMAC_SHA512_TEST_VECTORS
3062 - }
3063 + .hash = __VECS(hmac_sha512_tv_template)
3064 }
3065 }, {
3066 .alg = "jitterentropy_rng",
3067 @@ -3484,14 +3354,8 @@ static const struct alg_test_desc alg_te
3068 .fips_allowed = 1,
3069 .suite = {
3070 .cipher = {
3071 - .enc = {
3072 - .vecs = aes_kw_enc_tv_template,
3073 - .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3074 - },
3075 - .dec = {
3076 - .vecs = aes_kw_dec_tv_template,
3077 - .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3078 - }
3079 + .enc = __VECS(aes_kw_enc_tv_template),
3080 + .dec = __VECS(aes_kw_dec_tv_template)
3081 }
3082 }
3083 }, {
3084 @@ -3499,14 +3363,8 @@ static const struct alg_test_desc alg_te
3085 .test = alg_test_skcipher,
3086 .suite = {
3087 .cipher = {
3088 - .enc = {
3089 - .vecs = aes_lrw_enc_tv_template,
3090 - .count = AES_LRW_ENC_TEST_VECTORS
3091 - },
3092 - .dec = {
3093 - .vecs = aes_lrw_dec_tv_template,
3094 - .count = AES_LRW_DEC_TEST_VECTORS
3095 - }
3096 + .enc = __VECS(aes_lrw_enc_tv_template),
3097 + .dec = __VECS(aes_lrw_dec_tv_template)
3098 }
3099 }
3100 }, {
3101 @@ -3514,14 +3372,8 @@ static const struct alg_test_desc alg_te
3102 .test = alg_test_skcipher,
3103 .suite = {
3104 .cipher = {
3105 - .enc = {
3106 - .vecs = camellia_lrw_enc_tv_template,
3107 - .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3108 - },
3109 - .dec = {
3110 - .vecs = camellia_lrw_dec_tv_template,
3111 - .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3112 - }
3113 + .enc = __VECS(camellia_lrw_enc_tv_template),
3114 + .dec = __VECS(camellia_lrw_dec_tv_template)
3115 }
3116 }
3117 }, {
3118 @@ -3529,14 +3381,8 @@ static const struct alg_test_desc alg_te
3119 .test = alg_test_skcipher,
3120 .suite = {
3121 .cipher = {
3122 - .enc = {
3123 - .vecs = cast6_lrw_enc_tv_template,
3124 - .count = CAST6_LRW_ENC_TEST_VECTORS
3125 - },
3126 - .dec = {
3127 - .vecs = cast6_lrw_dec_tv_template,
3128 - .count = CAST6_LRW_DEC_TEST_VECTORS
3129 - }
3130 + .enc = __VECS(cast6_lrw_enc_tv_template),
3131 + .dec = __VECS(cast6_lrw_dec_tv_template)
3132 }
3133 }
3134 }, {
3135 @@ -3544,14 +3390,8 @@ static const struct alg_test_desc alg_te
3136 .test = alg_test_skcipher,
3137 .suite = {
3138 .cipher = {
3139 - .enc = {
3140 - .vecs = serpent_lrw_enc_tv_template,
3141 - .count = SERPENT_LRW_ENC_TEST_VECTORS
3142 - },
3143 - .dec = {
3144 - .vecs = serpent_lrw_dec_tv_template,
3145 - .count = SERPENT_LRW_DEC_TEST_VECTORS
3146 - }
3147 + .enc = __VECS(serpent_lrw_enc_tv_template),
3148 + .dec = __VECS(serpent_lrw_dec_tv_template)
3149 }
3150 }
3151 }, {
3152 @@ -3559,14 +3399,8 @@ static const struct alg_test_desc alg_te
3153 .test = alg_test_skcipher,
3154 .suite = {
3155 .cipher = {
3156 - .enc = {
3157 - .vecs = tf_lrw_enc_tv_template,
3158 - .count = TF_LRW_ENC_TEST_VECTORS
3159 - },
3160 - .dec = {
3161 - .vecs = tf_lrw_dec_tv_template,
3162 - .count = TF_LRW_DEC_TEST_VECTORS
3163 - }
3164 + .enc = __VECS(tf_lrw_enc_tv_template),
3165 + .dec = __VECS(tf_lrw_dec_tv_template)
3166 }
3167 }
3168 }, {
3169 @@ -3575,14 +3409,8 @@ static const struct alg_test_desc alg_te
3170 .fips_allowed = 1,
3171 .suite = {
3172 .comp = {
3173 - .comp = {
3174 - .vecs = lz4_comp_tv_template,
3175 - .count = LZ4_COMP_TEST_VECTORS
3176 - },
3177 - .decomp = {
3178 - .vecs = lz4_decomp_tv_template,
3179 - .count = LZ4_DECOMP_TEST_VECTORS
3180 - }
3181 + .comp = __VECS(lz4_comp_tv_template),
3182 + .decomp = __VECS(lz4_decomp_tv_template)
3183 }
3184 }
3185 }, {
3186 @@ -3591,14 +3419,8 @@ static const struct alg_test_desc alg_te
3187 .fips_allowed = 1,
3188 .suite = {
3189 .comp = {
3190 - .comp = {
3191 - .vecs = lz4hc_comp_tv_template,
3192 - .count = LZ4HC_COMP_TEST_VECTORS
3193 - },
3194 - .decomp = {
3195 - .vecs = lz4hc_decomp_tv_template,
3196 - .count = LZ4HC_DECOMP_TEST_VECTORS
3197 - }
3198 + .comp = __VECS(lz4hc_comp_tv_template),
3199 + .decomp = __VECS(lz4hc_decomp_tv_template)
3200 }
3201 }
3202 }, {
3203 @@ -3607,42 +3429,27 @@ static const struct alg_test_desc alg_te
3204 .fips_allowed = 1,
3205 .suite = {
3206 .comp = {
3207 - .comp = {
3208 - .vecs = lzo_comp_tv_template,
3209 - .count = LZO_COMP_TEST_VECTORS
3210 - },
3211 - .decomp = {
3212 - .vecs = lzo_decomp_tv_template,
3213 - .count = LZO_DECOMP_TEST_VECTORS
3214 - }
3215 + .comp = __VECS(lzo_comp_tv_template),
3216 + .decomp = __VECS(lzo_decomp_tv_template)
3217 }
3218 }
3219 }, {
3220 .alg = "md4",
3221 .test = alg_test_hash,
3222 .suite = {
3223 - .hash = {
3224 - .vecs = md4_tv_template,
3225 - .count = MD4_TEST_VECTORS
3226 - }
3227 + .hash = __VECS(md4_tv_template)
3228 }
3229 }, {
3230 .alg = "md5",
3231 .test = alg_test_hash,
3232 .suite = {
3233 - .hash = {
3234 - .vecs = md5_tv_template,
3235 - .count = MD5_TEST_VECTORS
3236 - }
3237 + .hash = __VECS(md5_tv_template)
3238 }
3239 }, {
3240 .alg = "michael_mic",
3241 .test = alg_test_hash,
3242 .suite = {
3243 - .hash = {
3244 - .vecs = michael_mic_tv_template,
3245 - .count = MICHAEL_MIC_TEST_VECTORS
3246 - }
3247 + .hash = __VECS(michael_mic_tv_template)
3248 }
3249 }, {
3250 .alg = "ofb(aes)",
3251 @@ -3650,14 +3457,8 @@ static const struct alg_test_desc alg_te
3252 .fips_allowed = 1,
3253 .suite = {
3254 .cipher = {
3255 - .enc = {
3256 - .vecs = aes_ofb_enc_tv_template,
3257 - .count = AES_OFB_ENC_TEST_VECTORS
3258 - },
3259 - .dec = {
3260 - .vecs = aes_ofb_dec_tv_template,
3261 - .count = AES_OFB_DEC_TEST_VECTORS
3262 - }
3263 + .enc = __VECS(aes_ofb_enc_tv_template),
3264 + .dec = __VECS(aes_ofb_dec_tv_template)
3265 }
3266 }
3267 }, {
3268 @@ -3665,24 +3466,15 @@ static const struct alg_test_desc alg_te
3269 .test = alg_test_skcipher,
3270 .suite = {
3271 .cipher = {
3272 - .enc = {
3273 - .vecs = fcrypt_pcbc_enc_tv_template,
3274 - .count = FCRYPT_ENC_TEST_VECTORS
3275 - },
3276 - .dec = {
3277 - .vecs = fcrypt_pcbc_dec_tv_template,
3278 - .count = FCRYPT_DEC_TEST_VECTORS
3279 - }
3280 + .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3281 + .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3282 }
3283 }
3284 }, {
3285 .alg = "poly1305",
3286 .test = alg_test_hash,
3287 .suite = {
3288 - .hash = {
3289 - .vecs = poly1305_tv_template,
3290 - .count = POLY1305_TEST_VECTORS
3291 - }
3292 + .hash = __VECS(poly1305_tv_template)
3293 }
3294 }, {
3295 .alg = "rfc3686(ctr(aes))",
3296 @@ -3690,14 +3482,8 @@ static const struct alg_test_desc alg_te
3297 .fips_allowed = 1,
3298 .suite = {
3299 .cipher = {
3300 - .enc = {
3301 - .vecs = aes_ctr_rfc3686_enc_tv_template,
3302 - .count = AES_CTR_3686_ENC_TEST_VECTORS
3303 - },
3304 - .dec = {
3305 - .vecs = aes_ctr_rfc3686_dec_tv_template,
3306 - .count = AES_CTR_3686_DEC_TEST_VECTORS
3307 - }
3308 + .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3309 + .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3310 }
3311 }
3312 }, {
3313 @@ -3706,14 +3492,8 @@ static const struct alg_test_desc alg_te
3314 .fips_allowed = 1,
3315 .suite = {
3316 .aead = {
3317 - .enc = {
3318 - .vecs = aes_gcm_rfc4106_enc_tv_template,
3319 - .count = AES_GCM_4106_ENC_TEST_VECTORS
3320 - },
3321 - .dec = {
3322 - .vecs = aes_gcm_rfc4106_dec_tv_template,
3323 - .count = AES_GCM_4106_DEC_TEST_VECTORS
3324 - }
3325 + .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3326 + .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3327 }
3328 }
3329 }, {
3330 @@ -3722,14 +3502,8 @@ static const struct alg_test_desc alg_te
3331 .fips_allowed = 1,
3332 .suite = {
3333 .aead = {
3334 - .enc = {
3335 - .vecs = aes_ccm_rfc4309_enc_tv_template,
3336 - .count = AES_CCM_4309_ENC_TEST_VECTORS
3337 - },
3338 - .dec = {
3339 - .vecs = aes_ccm_rfc4309_dec_tv_template,
3340 - .count = AES_CCM_4309_DEC_TEST_VECTORS
3341 - }
3342 + .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3343 + .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3344 }
3345 }
3346 }, {
3347 @@ -3737,14 +3511,8 @@ static const struct alg_test_desc alg_te
3348 .test = alg_test_aead,
3349 .suite = {
3350 .aead = {
3351 - .enc = {
3352 - .vecs = aes_gcm_rfc4543_enc_tv_template,
3353 - .count = AES_GCM_4543_ENC_TEST_VECTORS
3354 - },
3355 - .dec = {
3356 - .vecs = aes_gcm_rfc4543_dec_tv_template,
3357 - .count = AES_GCM_4543_DEC_TEST_VECTORS
3358 - },
3359 + .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3360 + .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3361 }
3362 }
3363 }, {
3364 @@ -3752,14 +3520,8 @@ static const struct alg_test_desc alg_te
3365 .test = alg_test_aead,
3366 .suite = {
3367 .aead = {
3368 - .enc = {
3369 - .vecs = rfc7539_enc_tv_template,
3370 - .count = RFC7539_ENC_TEST_VECTORS
3371 - },
3372 - .dec = {
3373 - .vecs = rfc7539_dec_tv_template,
3374 - .count = RFC7539_DEC_TEST_VECTORS
3375 - },
3376 + .enc = __VECS(rfc7539_enc_tv_template),
3377 + .dec = __VECS(rfc7539_dec_tv_template),
3378 }
3379 }
3380 }, {
3381 @@ -3767,71 +3529,47 @@ static const struct alg_test_desc alg_te
3382 .test = alg_test_aead,
3383 .suite = {
3384 .aead = {
3385 - .enc = {
3386 - .vecs = rfc7539esp_enc_tv_template,
3387 - .count = RFC7539ESP_ENC_TEST_VECTORS
3388 - },
3389 - .dec = {
3390 - .vecs = rfc7539esp_dec_tv_template,
3391 - .count = RFC7539ESP_DEC_TEST_VECTORS
3392 - },
3393 + .enc = __VECS(rfc7539esp_enc_tv_template),
3394 + .dec = __VECS(rfc7539esp_dec_tv_template),
3395 }
3396 }
3397 }, {
3398 .alg = "rmd128",
3399 .test = alg_test_hash,
3400 .suite = {
3401 - .hash = {
3402 - .vecs = rmd128_tv_template,
3403 - .count = RMD128_TEST_VECTORS
3404 - }
3405 + .hash = __VECS(rmd128_tv_template)
3406 }
3407 }, {
3408 .alg = "rmd160",
3409 .test = alg_test_hash,
3410 .suite = {
3411 - .hash = {
3412 - .vecs = rmd160_tv_template,
3413 - .count = RMD160_TEST_VECTORS
3414 - }
3415 + .hash = __VECS(rmd160_tv_template)
3416 }
3417 }, {
3418 .alg = "rmd256",
3419 .test = alg_test_hash,
3420 .suite = {
3421 - .hash = {
3422 - .vecs = rmd256_tv_template,
3423 - .count = RMD256_TEST_VECTORS
3424 - }
3425 + .hash = __VECS(rmd256_tv_template)
3426 }
3427 }, {
3428 .alg = "rmd320",
3429 .test = alg_test_hash,
3430 .suite = {
3431 - .hash = {
3432 - .vecs = rmd320_tv_template,
3433 - .count = RMD320_TEST_VECTORS
3434 - }
3435 + .hash = __VECS(rmd320_tv_template)
3436 }
3437 }, {
3438 .alg = "rsa",
3439 .test = alg_test_akcipher,
3440 .fips_allowed = 1,
3441 .suite = {
3442 - .akcipher = {
3443 - .vecs = rsa_tv_template,
3444 - .count = RSA_TEST_VECTORS
3445 - }
3446 + .akcipher = __VECS(rsa_tv_template)
3447 }
3448 }, {
3449 .alg = "salsa20",
3450 .test = alg_test_skcipher,
3451 .suite = {
3452 .cipher = {
3453 - .enc = {
3454 - .vecs = salsa20_stream_enc_tv_template,
3455 - .count = SALSA20_STREAM_ENC_TEST_VECTORS
3456 - }
3457 + .enc = __VECS(salsa20_stream_enc_tv_template)
3458 }
3459 }
3460 }, {
3461 @@ -3839,162 +3577,120 @@ static const struct alg_test_desc alg_te
3462 .test = alg_test_hash,
3463 .fips_allowed = 1,
3464 .suite = {
3465 - .hash = {
3466 - .vecs = sha1_tv_template,
3467 - .count = SHA1_TEST_VECTORS
3468 - }
3469 + .hash = __VECS(sha1_tv_template)
3470 }
3471 }, {
3472 .alg = "sha224",
3473 .test = alg_test_hash,
3474 .fips_allowed = 1,
3475 .suite = {
3476 - .hash = {
3477 - .vecs = sha224_tv_template,
3478 - .count = SHA224_TEST_VECTORS
3479 - }
3480 + .hash = __VECS(sha224_tv_template)
3481 }
3482 }, {
3483 .alg = "sha256",
3484 .test = alg_test_hash,
3485 .fips_allowed = 1,
3486 .suite = {
3487 - .hash = {
3488 - .vecs = sha256_tv_template,
3489 - .count = SHA256_TEST_VECTORS
3490 - }
3491 + .hash = __VECS(sha256_tv_template)
3492 }
3493 }, {
3494 .alg = "sha3-224",
3495 .test = alg_test_hash,
3496 .fips_allowed = 1,
3497 .suite = {
3498 - .hash = {
3499 - .vecs = sha3_224_tv_template,
3500 - .count = SHA3_224_TEST_VECTORS
3501 - }
3502 + .hash = __VECS(sha3_224_tv_template)
3503 }
3504 }, {
3505 .alg = "sha3-256",
3506 .test = alg_test_hash,
3507 .fips_allowed = 1,
3508 .suite = {
3509 - .hash = {
3510 - .vecs = sha3_256_tv_template,
3511 - .count = SHA3_256_TEST_VECTORS
3512 - }
3513 + .hash = __VECS(sha3_256_tv_template)
3514 }
3515 }, {
3516 .alg = "sha3-384",
3517 .test = alg_test_hash,
3518 .fips_allowed = 1,
3519 .suite = {
3520 - .hash = {
3521 - .vecs = sha3_384_tv_template,
3522 - .count = SHA3_384_TEST_VECTORS
3523 - }
3524 + .hash = __VECS(sha3_384_tv_template)
3525 }
3526 }, {
3527 .alg = "sha3-512",
3528 .test = alg_test_hash,
3529 .fips_allowed = 1,
3530 .suite = {
3531 - .hash = {
3532 - .vecs = sha3_512_tv_template,
3533 - .count = SHA3_512_TEST_VECTORS
3534 - }
3535 + .hash = __VECS(sha3_512_tv_template)
3536 }
3537 }, {
3538 .alg = "sha384",
3539 .test = alg_test_hash,
3540 .fips_allowed = 1,
3541 .suite = {
3542 - .hash = {
3543 - .vecs = sha384_tv_template,
3544 - .count = SHA384_TEST_VECTORS
3545 - }
3546 + .hash = __VECS(sha384_tv_template)
3547 }
3548 }, {
3549 .alg = "sha512",
3550 .test = alg_test_hash,
3551 .fips_allowed = 1,
3552 .suite = {
3553 - .hash = {
3554 - .vecs = sha512_tv_template,
3555 - .count = SHA512_TEST_VECTORS
3556 - }
3557 + .hash = __VECS(sha512_tv_template)
3558 }
3559 }, {
3560 .alg = "tgr128",
3561 .test = alg_test_hash,
3562 .suite = {
3563 - .hash = {
3564 - .vecs = tgr128_tv_template,
3565 - .count = TGR128_TEST_VECTORS
3566 - }
3567 + .hash = __VECS(tgr128_tv_template)
3568 }
3569 }, {
3570 .alg = "tgr160",
3571 .test = alg_test_hash,
3572 .suite = {
3573 - .hash = {
3574 - .vecs = tgr160_tv_template,
3575 - .count = TGR160_TEST_VECTORS
3576 - }
3577 + .hash = __VECS(tgr160_tv_template)
3578 }
3579 }, {
3580 .alg = "tgr192",
3581 .test = alg_test_hash,
3582 .suite = {
3583 - .hash = {
3584 - .vecs = tgr192_tv_template,
3585 - .count = TGR192_TEST_VECTORS
3586 + .hash = __VECS(tgr192_tv_template)
3587 + }
3588 + }, {
3589 + .alg = "tls10(hmac(sha1),cbc(aes))",
3590 + .test = alg_test_tls,
3591 + .suite = {
3592 + .tls = {
3593 + .enc = __VECS(tls_enc_tv_template),
3594 + .dec = __VECS(tls_dec_tv_template)
3595 }
3596 }
3597 }, {
3598 .alg = "vmac(aes)",
3599 .test = alg_test_hash,
3600 .suite = {
3601 - .hash = {
3602 - .vecs = aes_vmac128_tv_template,
3603 - .count = VMAC_AES_TEST_VECTORS
3604 - }
3605 + .hash = __VECS(aes_vmac128_tv_template)
3606 }
3607 }, {
3608 .alg = "wp256",
3609 .test = alg_test_hash,
3610 .suite = {
3611 - .hash = {
3612 - .vecs = wp256_tv_template,
3613 - .count = WP256_TEST_VECTORS
3614 - }
3615 + .hash = __VECS(wp256_tv_template)
3616 }
3617 }, {
3618 .alg = "wp384",
3619 .test = alg_test_hash,
3620 .suite = {
3621 - .hash = {
3622 - .vecs = wp384_tv_template,
3623 - .count = WP384_TEST_VECTORS
3624 - }
3625 + .hash = __VECS(wp384_tv_template)
3626 }
3627 }, {
3628 .alg = "wp512",
3629 .test = alg_test_hash,
3630 .suite = {
3631 - .hash = {
3632 - .vecs = wp512_tv_template,
3633 - .count = WP512_TEST_VECTORS
3634 - }
3635 + .hash = __VECS(wp512_tv_template)
3636 }
3637 }, {
3638 .alg = "xcbc(aes)",
3639 .test = alg_test_hash,
3640 .suite = {
3641 - .hash = {
3642 - .vecs = aes_xcbc128_tv_template,
3643 - .count = XCBC_AES_TEST_VECTORS
3644 - }
3645 + .hash = __VECS(aes_xcbc128_tv_template)
3646 }
3647 }, {
3648 .alg = "xts(aes)",
3649 @@ -4002,14 +3698,8 @@ static const struct alg_test_desc alg_te
3650 .fips_allowed = 1,
3651 .suite = {
3652 .cipher = {
3653 - .enc = {
3654 - .vecs = aes_xts_enc_tv_template,
3655 - .count = AES_XTS_ENC_TEST_VECTORS
3656 - },
3657 - .dec = {
3658 - .vecs = aes_xts_dec_tv_template,
3659 - .count = AES_XTS_DEC_TEST_VECTORS
3660 - }
3661 + .enc = __VECS(aes_xts_enc_tv_template),
3662 + .dec = __VECS(aes_xts_dec_tv_template)
3663 }
3664 }
3665 }, {
3666 @@ -4017,14 +3707,8 @@ static const struct alg_test_desc alg_te
3667 .test = alg_test_skcipher,
3668 .suite = {
3669 .cipher = {
3670 - .enc = {
3671 - .vecs = camellia_xts_enc_tv_template,
3672 - .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3673 - },
3674 - .dec = {
3675 - .vecs = camellia_xts_dec_tv_template,
3676 - .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3677 - }
3678 + .enc = __VECS(camellia_xts_enc_tv_template),
3679 + .dec = __VECS(camellia_xts_dec_tv_template)
3680 }
3681 }
3682 }, {
3683 @@ -4032,14 +3716,8 @@ static const struct alg_test_desc alg_te
3684 .test = alg_test_skcipher,
3685 .suite = {
3686 .cipher = {
3687 - .enc = {
3688 - .vecs = cast6_xts_enc_tv_template,
3689 - .count = CAST6_XTS_ENC_TEST_VECTORS
3690 - },
3691 - .dec = {
3692 - .vecs = cast6_xts_dec_tv_template,
3693 - .count = CAST6_XTS_DEC_TEST_VECTORS
3694 - }
3695 + .enc = __VECS(cast6_xts_enc_tv_template),
3696 + .dec = __VECS(cast6_xts_dec_tv_template)
3697 }
3698 }
3699 }, {
3700 @@ -4047,14 +3725,8 @@ static const struct alg_test_desc alg_te
3701 .test = alg_test_skcipher,
3702 .suite = {
3703 .cipher = {
3704 - .enc = {
3705 - .vecs = serpent_xts_enc_tv_template,
3706 - .count = SERPENT_XTS_ENC_TEST_VECTORS
3707 - },
3708 - .dec = {
3709 - .vecs = serpent_xts_dec_tv_template,
3710 - .count = SERPENT_XTS_DEC_TEST_VECTORS
3711 - }
3712 + .enc = __VECS(serpent_xts_enc_tv_template),
3713 + .dec = __VECS(serpent_xts_dec_tv_template)
3714 }
3715 }
3716 }, {
3717 @@ -4062,14 +3734,8 @@ static const struct alg_test_desc alg_te
3718 .test = alg_test_skcipher,
3719 .suite = {
3720 .cipher = {
3721 - .enc = {
3722 - .vecs = tf_xts_enc_tv_template,
3723 - .count = TF_XTS_ENC_TEST_VECTORS
3724 - },
3725 - .dec = {
3726 - .vecs = tf_xts_dec_tv_template,
3727 - .count = TF_XTS_DEC_TEST_VECTORS
3728 - }
3729 + .enc = __VECS(tf_xts_enc_tv_template),
3730 + .dec = __VECS(tf_xts_dec_tv_template)
3731 }
3732 }
3733 }
3734 --- a/crypto/testmgr.h
3735 +++ b/crypto/testmgr.h
3736 @@ -34,9 +34,9 @@
3737
3738 struct hash_testvec {
3739 /* only used with keyed hash algorithms */
3740 - char *key;
3741 - char *plaintext;
3742 - char *digest;
3743 + const char *key;
3744 + const char *plaintext;
3745 + const char *digest;
3746 unsigned char tap[MAX_TAP];
3747 unsigned short psize;
3748 unsigned char np;
3749 @@ -63,11 +63,11 @@ struct hash_testvec {
3750 */
3751
3752 struct cipher_testvec {
3753 - char *key;
3754 - char *iv;
3755 - char *iv_out;
3756 - char *input;
3757 - char *result;
3758 + const char *key;
3759 + const char *iv;
3760 + const char *iv_out;
3761 + const char *input;
3762 + const char *result;
3763 unsigned short tap[MAX_TAP];
3764 int np;
3765 unsigned char also_non_np;
3766 @@ -80,11 +80,11 @@ struct cipher_testvec {
3767 };
3768
3769 struct aead_testvec {
3770 - char *key;
3771 - char *iv;
3772 - char *input;
3773 - char *assoc;
3774 - char *result;
3775 + const char *key;
3776 + const char *iv;
3777 + const char *input;
3778 + const char *assoc;
3779 + const char *result;
3780 unsigned char tap[MAX_TAP];
3781 unsigned char atap[MAX_TAP];
3782 int np;
3783 @@ -99,10 +99,10 @@ struct aead_testvec {
3784 };
3785
3786 struct cprng_testvec {
3787 - char *key;
3788 - char *dt;
3789 - char *v;
3790 - char *result;
3791 + const char *key;
3792 + const char *dt;
3793 + const char *v;
3794 + const char *result;
3795 unsigned char klen;
3796 unsigned short dtlen;
3797 unsigned short vlen;
3798 @@ -111,24 +111,38 @@ struct cprng_testvec {
3799 };
3800
3801 struct drbg_testvec {
3802 - unsigned char *entropy;
3803 + const unsigned char *entropy;
3804 size_t entropylen;
3805 - unsigned char *entpra;
3806 - unsigned char *entprb;
3807 + const unsigned char *entpra;
3808 + const unsigned char *entprb;
3809 size_t entprlen;
3810 - unsigned char *addtla;
3811 - unsigned char *addtlb;
3812 + const unsigned char *addtla;
3813 + const unsigned char *addtlb;
3814 size_t addtllen;
3815 - unsigned char *pers;
3816 + const unsigned char *pers;
3817 size_t perslen;
3818 - unsigned char *expected;
3819 + const unsigned char *expected;
3820 size_t expectedlen;
3821 };
3822
3823 +struct tls_testvec {
3824 + char *key; /* wrapped keys for encryption and authentication */
3825 + char *iv; /* initialization vector */
3826 + char *input; /* input data */
3827 + char *assoc; /* associated data: seq num, type, version, input len */
3828 + char *result; /* result data */
3829 + unsigned char fail; /* the test failure is expected */
3830 + unsigned char novrfy; /* dec verification failure expected */
3831 + unsigned char klen; /* key length */
3832 + unsigned short ilen; /* input data length */
3833 + unsigned short alen; /* associated data length */
3834 + unsigned short rlen; /* result length */
3835 +};
3836 +
3837 struct akcipher_testvec {
3838 - unsigned char *key;
3839 - unsigned char *m;
3840 - unsigned char *c;
3841 + const unsigned char *key;
3842 + const unsigned char *m;
3843 + const unsigned char *c;
3844 unsigned int key_len;
3845 unsigned int m_size;
3846 unsigned int c_size;
3847 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3848 };
3849
3850 struct kpp_testvec {
3851 - unsigned char *secret;
3852 - unsigned char *b_public;
3853 - unsigned char *expected_a_public;
3854 - unsigned char *expected_ss;
3855 + const unsigned char *secret;
3856 + const unsigned char *b_public;
3857 + const unsigned char *expected_a_public;
3858 + const unsigned char *expected_ss;
3859 unsigned short secret_size;
3860 unsigned short b_public_size;
3861 unsigned short expected_a_public_size;
3862 unsigned short expected_ss_size;
3863 };
3864
3865 -static char zeroed_string[48];
3866 +static const char zeroed_string[48];
3867
3868 /*
3869 - * RSA test vectors. Borrowed from openSSL.
3870 + * TLS1.0 synthetic test vectors
3871 */
3872 -#ifdef CONFIG_CRYPTO_FIPS
3873 -#define RSA_TEST_VECTORS 2
3874 +static struct tls_testvec tls_enc_tv_template[] = {
3875 + {
3876 +#ifdef __LITTLE_ENDIAN
3877 + .key = "\x08\x00" /* rta length */
3878 + "\x01\x00" /* rta type */
3879 +#else
3880 + .key = "\x00\x08" /* rta length */
3881 + "\x00\x01" /* rta type */
3882 +#endif
3883 + "\x00\x00\x00\x10" /* enc key length */
3884 + "authenticationkey20benckeyis16_bytes",
3885 + .klen = 8 + 20 + 16,
3886 + .iv = "iv0123456789abcd",
3887 + .input = "Single block msg",
3888 + .ilen = 16,
3889 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3890 + "\x00\x03\x01\x00\x10",
3891 + .alen = 13,
3892 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3893 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3894 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3895 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3896 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3897 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3898 + .rlen = 16 + 20 + 12,
3899 + }, {
3900 +#ifdef __LITTLE_ENDIAN
3901 + .key = "\x08\x00" /* rta length */
3902 + "\x01\x00" /* rta type */
3903 +#else
3904 + .key = "\x00\x08" /* rta length */
3905 + "\x00\x01" /* rta type */
3906 +#endif
3907 + "\x00\x00\x00\x10" /* enc key length */
3908 + "authenticationkey20benckeyis16_bytes",
3909 + .klen = 8 + 20 + 16,
3910 + .iv = "iv0123456789abcd",
3911 + .input = "",
3912 + .ilen = 0,
3913 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3914 + "\x00\x03\x01\x00\x00",
3915 + .alen = 13,
3916 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3917 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3918 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3919 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3920 + .rlen = 20 + 12,
3921 + }, {
3922 +#ifdef __LITTLE_ENDIAN
3923 + .key = "\x08\x00" /* rta length */
3924 + "\x01\x00" /* rta type */
3925 +#else
3926 + .key = "\x00\x08" /* rta length */
3927 + "\x00\x01" /* rta type */
3928 +#endif
3929 + "\x00\x00\x00\x10" /* enc key length */
3930 + "authenticationkey20benckeyis16_bytes",
3931 + .klen = 8 + 20 + 16,
3932 + .iv = "iv0123456789abcd",
3933 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
3934 + " plaintext285 bytes plaintext285 bytes plaintext285"
3935 + " bytes plaintext285 bytes plaintext285 bytes"
3936 + " plaintext285 bytes plaintext285 bytes plaintext285"
3937 + " bytes plaintext285 bytes plaintext285 bytes"
3938 + " plaintext285 bytes plaintext285 bytes plaintext285"
3939 + " bytes plaintext285 bytes plaintext",
3940 + .ilen = 285,
3941 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3942 + "\x00\x03\x01\x01\x1d",
3943 + .alen = 13,
3944 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3945 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3946 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3947 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3948 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3949 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3950 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3951 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3952 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3953 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3954 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3955 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3956 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3957 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3958 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3959 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3960 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3961 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3962 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3963 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3964 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3965 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3966 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3967 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3968 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3969 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3970 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3971 + .rlen = 285 + 20 + 15,
3972 + }
3973 +};
3974 +
3975 +static struct tls_testvec tls_dec_tv_template[] = {
3976 + {
3977 +#ifdef __LITTLE_ENDIAN
3978 + .key = "\x08\x00" /* rta length */
3979 + "\x01\x00" /* rta type */
3980 +#else
3981 + .key = "\x00\x08" /* rta length */
3982 + "\x00\x01" /* rta type */
3983 +#endif
3984 + "\x00\x00\x00\x10" /* enc key length */
3985 + "authenticationkey20benckeyis16_bytes",
3986 + .klen = 8 + 20 + 16,
3987 + .iv = "iv0123456789abcd",
3988 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3989 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3990 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3991 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3992 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3993 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3994 + .ilen = 16 + 20 + 12,
3995 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3996 + "\x00\x03\x01\x00\x30",
3997 + .alen = 13,
3998 + .result = "Single block msg",
3999 + .rlen = 16,
4000 + }, {
4001 +#ifdef __LITTLE_ENDIAN
4002 + .key = "\x08\x00" /* rta length */
4003 + "\x01\x00" /* rta type */
4004 #else
4005 -#define RSA_TEST_VECTORS 5
4006 + .key = "\x00\x08" /* rta length */
4007 + "\x00\x01" /* rta type */
4008 #endif
4009 -static struct akcipher_testvec rsa_tv_template[] = {
4010 + "\x00\x00\x00\x10" /* enc key length */
4011 + "authenticationkey20benckeyis16_bytes",
4012 + .klen = 8 + 20 + 16,
4013 + .iv = "iv0123456789abcd",
4014 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
4015 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
4016 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
4017 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
4018 + .ilen = 20 + 12,
4019 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4020 + "\x00\x03\x01\x00\x20",
4021 + .alen = 13,
4022 + .result = "",
4023 + .rlen = 0,
4024 + }, {
4025 +#ifdef __LITTLE_ENDIAN
4026 + .key = "\x08\x00" /* rta length */
4027 + "\x01\x00" /* rta type */
4028 +#else
4029 + .key = "\x00\x08" /* rta length */
4030 + "\x00\x01" /* rta type */
4031 +#endif
4032 + "\x00\x00\x00\x10" /* enc key length */
4033 + "authenticationkey20benckeyis16_bytes",
4034 + .klen = 8 + 20 + 16,
4035 + .iv = "iv0123456789abcd",
4036 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4037 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4038 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4039 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4040 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4041 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4042 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4043 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4044 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4045 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4046 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4047 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4048 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4049 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4050 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4051 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4052 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4053 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4054 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4055 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4056 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4057 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4058 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4059 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4060 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4061 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4062 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4063 +
4064 + .ilen = 285 + 20 + 15,
4065 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4066 + "\x00\x03\x01\x01\x40",
4067 + .alen = 13,
4068 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4069 + " plaintext285 bytes plaintext285 bytes plaintext285"
4070 + " bytes plaintext285 bytes plaintext285 bytes"
4071 + " plaintext285 bytes plaintext285 bytes plaintext285"
4072 + " bytes plaintext285 bytes plaintext285 bytes"
4073 + " plaintext285 bytes plaintext285 bytes plaintext",
4074 + .rlen = 285,
4075 + }
4076 +};
4077 +
4078 +/*
4079 + * RSA test vectors. Borrowed from openSSL.
4080 + */
4081 +static const struct akcipher_testvec rsa_tv_template[] = {
4082 {
4083 #ifndef CONFIG_CRYPTO_FIPS
4084 .key =
4085 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_te
4086 .m_size = 8,
4087 .c_size = 256,
4088 .public_key_vec = true,
4089 +#ifndef CONFIG_CRYPTO_FIPS
4090 }, {
4091 .key =
4092 "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4093 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_te
4094 .key_len = 2349,
4095 .m_size = 8,
4096 .c_size = 512,
4097 +#endif
4098 }
4099 };
4100
4101 -#define DH_TEST_VECTORS 2
4102 -
4103 -struct kpp_testvec dh_tv_template[] = {
4104 +static const struct kpp_testvec dh_tv_template[] = {
4105 {
4106 .secret =
4107 #ifdef __LITTLE_ENDIAN
4108 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4109 }
4110 };
4111
4112 -#ifdef CONFIG_CRYPTO_FIPS
4113 -#define ECDH_TEST_VECTORS 1
4114 -#else
4115 -#define ECDH_TEST_VECTORS 2
4116 -#endif
4117 -struct kpp_testvec ecdh_tv_template[] = {
4118 +static const struct kpp_testvec ecdh_tv_template[] = {
4119 {
4120 #ifndef CONFIG_CRYPTO_FIPS
4121 .secret =
4122 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] =
4123 /*
4124 * MD4 test vectors from RFC1320
4125 */
4126 -#define MD4_TEST_VECTORS 7
4127 -
4128 -static struct hash_testvec md4_tv_template [] = {
4129 +static const struct hash_testvec md4_tv_template[] = {
4130 {
4131 .plaintext = "",
4132 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4133 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_templa
4134 },
4135 };
4136
4137 -#define SHA3_224_TEST_VECTORS 3
4138 -static struct hash_testvec sha3_224_tv_template[] = {
4139 +static const struct hash_testvec sha3_224_tv_template[] = {
4140 {
4141 .plaintext = "",
4142 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4143 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_t
4144 },
4145 };
4146
4147 -#define SHA3_256_TEST_VECTORS 3
4148 -static struct hash_testvec sha3_256_tv_template[] = {
4149 +static const struct hash_testvec sha3_256_tv_template[] = {
4150 {
4151 .plaintext = "",
4152 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4153 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_t
4154 };
4155
4156
4157 -#define SHA3_384_TEST_VECTORS 3
4158 -static struct hash_testvec sha3_384_tv_template[] = {
4159 +static const struct hash_testvec sha3_384_tv_template[] = {
4160 {
4161 .plaintext = "",
4162 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4163 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_t
4164 };
4165
4166
4167 -#define SHA3_512_TEST_VECTORS 3
4168 -static struct hash_testvec sha3_512_tv_template[] = {
4169 +static const struct hash_testvec sha3_512_tv_template[] = {
4170 {
4171 .plaintext = "",
4172 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4173 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_t
4174 /*
4175 * MD5 test vectors from RFC1321
4176 */
4177 -#define MD5_TEST_VECTORS 7
4178 -
4179 -static struct hash_testvec md5_tv_template[] = {
4180 +static const struct hash_testvec md5_tv_template[] = {
4181 {
4182 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4183 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4184 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_templa
4185 /*
4186 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4187 */
4188 -#define RMD128_TEST_VECTORS 10
4189 -
4190 -static struct hash_testvec rmd128_tv_template[] = {
4191 +static const struct hash_testvec rmd128_tv_template[] = {
4192 {
4193 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4194 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4195 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_tem
4196 /*
4197 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4198 */
4199 -#define RMD160_TEST_VECTORS 10
4200 -
4201 -static struct hash_testvec rmd160_tv_template[] = {
4202 +static const struct hash_testvec rmd160_tv_template[] = {
4203 {
4204 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4205 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4206 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_tem
4207 /*
4208 * RIPEMD-256 test vectors
4209 */
4210 -#define RMD256_TEST_VECTORS 8
4211 -
4212 -static struct hash_testvec rmd256_tv_template[] = {
4213 +static const struct hash_testvec rmd256_tv_template[] = {
4214 {
4215 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4216 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4217 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_tem
4218 /*
4219 * RIPEMD-320 test vectors
4220 */
4221 -#define RMD320_TEST_VECTORS 8
4222 -
4223 -static struct hash_testvec rmd320_tv_template[] = {
4224 +static const struct hash_testvec rmd320_tv_template[] = {
4225 {
4226 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4227 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4228 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_tem
4229 }
4230 };
4231
4232 -#define CRCT10DIF_TEST_VECTORS 3
4233 -static struct hash_testvec crct10dif_tv_template[] = {
4234 +static const struct hash_testvec crct10dif_tv_template[] = {
4235 {
4236 - .plaintext = "abc",
4237 - .psize = 3,
4238 -#ifdef __LITTLE_ENDIAN
4239 - .digest = "\x3b\x44",
4240 -#else
4241 - .digest = "\x44\x3b",
4242 -#endif
4243 - }, {
4244 - .plaintext = "1234567890123456789012345678901234567890"
4245 - "123456789012345678901234567890123456789",
4246 - .psize = 79,
4247 -#ifdef __LITTLE_ENDIAN
4248 - .digest = "\x70\x4b",
4249 -#else
4250 - .digest = "\x4b\x70",
4251 -#endif
4252 - }, {
4253 - .plaintext =
4254 - "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4255 - .psize = 56,
4256 -#ifdef __LITTLE_ENDIAN
4257 - .digest = "\xe3\x9c",
4258 -#else
4259 - .digest = "\x9c\xe3",
4260 -#endif
4261 - .np = 2,
4262 - .tap = { 28, 28 }
4263 + .plaintext = "abc",
4264 + .psize = 3,
4265 + .digest = (u8 *)(u16 []){ 0x443b },
4266 + }, {
4267 + .plaintext = "1234567890123456789012345678901234567890"
4268 + "123456789012345678901234567890123456789",
4269 + .psize = 79,
4270 + .digest = (u8 *)(u16 []){ 0x4b70 },
4271 + .np = 2,
4272 + .tap = { 63, 16 },
4273 + }, {
4274 + .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
4275 + "ddddddddddddd",
4276 + .psize = 56,
4277 + .digest = (u8 *)(u16 []){ 0x9ce3 },
4278 + .np = 8,
4279 + .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
4280 + }, {
4281 + .plaintext = "1234567890123456789012345678901234567890"
4282 + "1234567890123456789012345678901234567890"
4283 + "1234567890123456789012345678901234567890"
4284 + "1234567890123456789012345678901234567890"
4285 + "1234567890123456789012345678901234567890"
4286 + "1234567890123456789012345678901234567890"
4287 + "1234567890123456789012345678901234567890"
4288 + "123456789012345678901234567890123456789",
4289 + .psize = 319,
4290 + .digest = (u8 *)(u16 []){ 0x44c6 },
4291 + }, {
4292 + .plaintext = "1234567890123456789012345678901234567890"
4293 + "1234567890123456789012345678901234567890"
4294 + "1234567890123456789012345678901234567890"
4295 + "1234567890123456789012345678901234567890"
4296 + "1234567890123456789012345678901234567890"
4297 + "1234567890123456789012345678901234567890"
4298 + "1234567890123456789012345678901234567890"
4299 + "123456789012345678901234567890123456789",
4300 + .psize = 319,
4301 + .digest = (u8 *)(u16 []){ 0x44c6 },
4302 + .np = 4,
4303 + .tap = { 1, 255, 57, 6 },
4304 }
4305 };
4306
4307 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_
4308 * SHA1 test vectors from from FIPS PUB 180-1
4309 * Long vector from CAVS 5.0
4310 */
4311 -#define SHA1_TEST_VECTORS 6
4312 -
4313 -static struct hash_testvec sha1_tv_template[] = {
4314 +static const struct hash_testvec sha1_tv_template[] = {
4315 {
4316 .plaintext = "",
4317 .psize = 0,
4318 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_templ
4319 /*
4320 * SHA224 test vectors from from FIPS PUB 180-2
4321 */
4322 -#define SHA224_TEST_VECTORS 5
4323 -
4324 -static struct hash_testvec sha224_tv_template[] = {
4325 +static const struct hash_testvec sha224_tv_template[] = {
4326 {
4327 .plaintext = "",
4328 .psize = 0,
4329 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_tem
4330 /*
4331 * SHA256 test vectors from from NIST
4332 */
4333 -#define SHA256_TEST_VECTORS 5
4334 -
4335 -static struct hash_testvec sha256_tv_template[] = {
4336 +static const struct hash_testvec sha256_tv_template[] = {
4337 {
4338 .plaintext = "",
4339 .psize = 0,
4340 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_tem
4341 /*
4342 * SHA384 test vectors from from NIST and kerneli
4343 */
4344 -#define SHA384_TEST_VECTORS 6
4345 -
4346 -static struct hash_testvec sha384_tv_template[] = {
4347 +static const struct hash_testvec sha384_tv_template[] = {
4348 {
4349 .plaintext = "",
4350 .psize = 0,
4351 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_tem
4352 /*
4353 * SHA512 test vectors from from NIST and kerneli
4354 */
4355 -#define SHA512_TEST_VECTORS 6
4356 -
4357 -static struct hash_testvec sha512_tv_template[] = {
4358 +static const struct hash_testvec sha512_tv_template[] = {
4359 {
4360 .plaintext = "",
4361 .psize = 0,
4362 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_tem
4363 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4364 * submission
4365 */
4366 -#define WP512_TEST_VECTORS 8
4367 -
4368 -static struct hash_testvec wp512_tv_template[] = {
4369 +static const struct hash_testvec wp512_tv_template[] = {
4370 {
4371 .plaintext = "",
4372 .psize = 0,
4373 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_temp
4374 },
4375 };
4376
4377 -#define WP384_TEST_VECTORS 8
4378 -
4379 -static struct hash_testvec wp384_tv_template[] = {
4380 +static const struct hash_testvec wp384_tv_template[] = {
4381 {
4382 .plaintext = "",
4383 .psize = 0,
4384 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_temp
4385 },
4386 };
4387
4388 -#define WP256_TEST_VECTORS 8
4389 -
4390 -static struct hash_testvec wp256_tv_template[] = {
4391 +static const struct hash_testvec wp256_tv_template[] = {
4392 {
4393 .plaintext = "",
4394 .psize = 0,
4395 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_temp
4396 /*
4397 * TIGER test vectors from Tiger website
4398 */
4399 -#define TGR192_TEST_VECTORS 6
4400 -
4401 -static struct hash_testvec tgr192_tv_template[] = {
4402 +static const struct hash_testvec tgr192_tv_template[] = {
4403 {
4404 .plaintext = "",
4405 .psize = 0,
4406 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_tem
4407 },
4408 };
4409
4410 -#define TGR160_TEST_VECTORS 6
4411 -
4412 -static struct hash_testvec tgr160_tv_template[] = {
4413 +static const struct hash_testvec tgr160_tv_template[] = {
4414 {
4415 .plaintext = "",
4416 .psize = 0,
4417 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_tem
4418 },
4419 };
4420
4421 -#define TGR128_TEST_VECTORS 6
4422 -
4423 -static struct hash_testvec tgr128_tv_template[] = {
4424 +static const struct hash_testvec tgr128_tv_template[] = {
4425 {
4426 .plaintext = "",
4427 .psize = 0,
4428 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_tem
4429 },
4430 };
4431
4432 -#define GHASH_TEST_VECTORS 6
4433 -
4434 -static struct hash_testvec ghash_tv_template[] =
4435 +static const struct hash_testvec ghash_tv_template[] =
4436 {
4437 {
4438 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4439 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_temp
4440 * HMAC-MD5 test vectors from RFC2202
4441 * (These need to be fixed to not use strlen).
4442 */
4443 -#define HMAC_MD5_TEST_VECTORS 7
4444 -
4445 -static struct hash_testvec hmac_md5_tv_template[] =
4446 +static const struct hash_testvec hmac_md5_tv_template[] =
4447 {
4448 {
4449 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4450 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_t
4451 /*
4452 * HMAC-RIPEMD128 test vectors from RFC2286
4453 */
4454 -#define HMAC_RMD128_TEST_VECTORS 7
4455 -
4456 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4457 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4458 {
4459 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4460 .ksize = 16,
4461 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_t
4462 /*
4463 * HMAC-RIPEMD160 test vectors from RFC2286
4464 */
4465 -#define HMAC_RMD160_TEST_VECTORS 7
4466 -
4467 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4468 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4469 {
4470 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4471 .ksize = 20,
4472 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_t
4473 /*
4474 * HMAC-SHA1 test vectors from RFC2202
4475 */
4476 -#define HMAC_SHA1_TEST_VECTORS 7
4477 -
4478 -static struct hash_testvec hmac_sha1_tv_template[] = {
4479 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4480 {
4481 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4482 .ksize = 20,
4483 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_
4484 /*
4485 * SHA224 HMAC test vectors from RFC4231
4486 */
4487 -#define HMAC_SHA224_TEST_VECTORS 4
4488 -
4489 -static struct hash_testvec hmac_sha224_tv_template[] = {
4490 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4491 {
4492 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4493 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4494 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_t
4495 * HMAC-SHA256 test vectors from
4496 * draft-ietf-ipsec-ciph-sha-256-01.txt
4497 */
4498 -#define HMAC_SHA256_TEST_VECTORS 10
4499 -
4500 -static struct hash_testvec hmac_sha256_tv_template[] = {
4501 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4502 {
4503 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
4504 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4505 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_t
4506 },
4507 };
4508
4509 -#define CMAC_AES_TEST_VECTORS 6
4510 -
4511 -static struct hash_testvec aes_cmac128_tv_template[] = {
4512 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4513 { /* From NIST Special Publication 800-38B, AES-128 */
4514 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4515 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4516 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_t
4517 }
4518 };
4519
4520 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4521 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4522 + {
4523 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4524 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4525 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4526 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4527 + .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4528 + "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4529 + .psize = 16,
4530 + .ksize = 16,
4531 + }, {
4532 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4533 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4534 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4535 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4536 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4537 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4538 + "\x30",
4539 + .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4540 + "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4541 + .psize = 33,
4542 + .ksize = 16,
4543 + .np = 2,
4544 + .tap = { 7, 26 },
4545 + }, {
4546 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4547 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4548 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4549 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4550 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4551 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4552 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4553 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4554 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4555 + "\xad\x2b\x41\x7b\xe6\x6c\x37",
4556 + .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4557 + "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4558 + .psize = 63,
4559 + .ksize = 16,
4560 + }, {
4561 + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4562 + "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4563 + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4564 + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4565 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4566 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4567 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4568 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4569 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4570 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4571 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4572 + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4573 + "\x1c",
4574 + .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4575 + "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4576 + .psize = 65,
4577 + .ksize = 32,
4578 + }
4579 +};
4580
4581 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4582 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4583 /*
4584 * From NIST Special Publication 800-38B, Three Key TDEA
4585 * Corrected test vectors from:
4586 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac
4587 }
4588 };
4589
4590 -#define XCBC_AES_TEST_VECTORS 6
4591 -
4592 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4593 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4594 {
4595 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4596 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4597 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_t
4598 }
4599 };
4600
4601 -#define VMAC_AES_TEST_VECTORS 11
4602 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4603 - '\x02', '\x03', '\x02', '\x02',
4604 - '\x02', '\x04', '\x01', '\x07',
4605 - '\x04', '\x01', '\x04', '\x03',};
4606 -static char vmac_string2[128] = {'a', 'b', 'c',};
4607 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4608 - 'a', 'b', 'c', 'a', 'b', 'c',
4609 - 'a', 'b', 'c', 'a', 'b', 'c',
4610 - 'a', 'b', 'c', 'a', 'b', 'c',
4611 - 'a', 'b', 'c', 'a', 'b', 'c',
4612 - 'a', 'b', 'c', 'a', 'b', 'c',
4613 - 'a', 'b', 'c', 'a', 'b', 'c',
4614 - 'a', 'b', 'c', 'a', 'b', 'c',
4615 - };
4616 -
4617 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4618 - 'i', 'j', 'l', 'm',
4619 - 'o', 'p', 'r', 's',
4620 - 't', 'u', 'w', 'x', 'z'};
4621 -
4622 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4623 - 'o', 'l', 'k', ']', '%',
4624 - '9', '2', '7', '!', 'A'};
4625 -
4626 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4627 - 'i', '!', '#', 'w', '0',
4628 - 'z', '/', '4', 'A', 'n'};
4629 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4630 + '\x02', '\x03', '\x02', '\x02',
4631 + '\x02', '\x04', '\x01', '\x07',
4632 + '\x04', '\x01', '\x04', '\x03',};
4633 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4634 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4635 + 'a', 'b', 'c', 'a', 'b', 'c',
4636 + 'a', 'b', 'c', 'a', 'b', 'c',
4637 + 'a', 'b', 'c', 'a', 'b', 'c',
4638 + 'a', 'b', 'c', 'a', 'b', 'c',
4639 + 'a', 'b', 'c', 'a', 'b', 'c',
4640 + 'a', 'b', 'c', 'a', 'b', 'c',
4641 + 'a', 'b', 'c', 'a', 'b', 'c',
4642 + };
4643 +
4644 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4645 + 'i', 'j', 'l', 'm',
4646 + 'o', 'p', 'r', 's',
4647 + 't', 'u', 'w', 'x', 'z'};
4648 +
4649 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4650 + 'o', 'l', 'k', ']', '%',
4651 + '9', '2', '7', '!', 'A'};
4652 +
4653 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4654 + 'i', '!', '#', 'w', '0',
4655 + 'z', '/', '4', 'A', 'n'};
4656
4657 -static struct hash_testvec aes_vmac128_tv_template[] = {
4658 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4659 {
4660 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4661 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4662 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_t
4663 * SHA384 HMAC test vectors from RFC4231
4664 */
4665
4666 -#define HMAC_SHA384_TEST_VECTORS 4
4667 -
4668 -static struct hash_testvec hmac_sha384_tv_template[] = {
4669 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4670 {
4671 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4672 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4673 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_t
4674 * SHA512 HMAC test vectors from RFC4231
4675 */
4676
4677 -#define HMAC_SHA512_TEST_VECTORS 4
4678 -
4679 -static struct hash_testvec hmac_sha512_tv_template[] = {
4680 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4681 {
4682 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4683 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4684 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_t
4685 },
4686 };
4687
4688 -#define HMAC_SHA3_224_TEST_VECTORS 4
4689 -
4690 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4691 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4692 {
4693 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4694 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4695 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224
4696 },
4697 };
4698
4699 -#define HMAC_SHA3_256_TEST_VECTORS 4
4700 -
4701 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4702 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4703 {
4704 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4705 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4706 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256
4707 },
4708 };
4709
4710 -#define HMAC_SHA3_384_TEST_VECTORS 4
4711 -
4712 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4713 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4714 {
4715 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4716 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4717 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384
4718 },
4719 };
4720
4721 -#define HMAC_SHA3_512_TEST_VECTORS 4
4722 -
4723 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4724 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4725 {
4726 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4727 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4728 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512
4729 * Poly1305 test vectors from RFC7539 A.3.
4730 */
4731
4732 -#define POLY1305_TEST_VECTORS 11
4733 -
4734 -static struct hash_testvec poly1305_tv_template[] = {
4735 +static const struct hash_testvec poly1305_tv_template[] = {
4736 { /* Test Vector #1 */
4737 .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
4738 "\x00\x00\x00\x00\x00\x00\x00\x00"
4739 @@ -4533,20 +4742,7 @@ static struct hash_testvec poly1305_tv_t
4740 /*
4741 * DES test vectors.
4742 */
4743 -#define DES_ENC_TEST_VECTORS 11
4744 -#define DES_DEC_TEST_VECTORS 5
4745 -#define DES_CBC_ENC_TEST_VECTORS 6
4746 -#define DES_CBC_DEC_TEST_VECTORS 5
4747 -#define DES_CTR_ENC_TEST_VECTORS 2
4748 -#define DES_CTR_DEC_TEST_VECTORS 2
4749 -#define DES3_EDE_ENC_TEST_VECTORS 4
4750 -#define DES3_EDE_DEC_TEST_VECTORS 4
4751 -#define DES3_EDE_CBC_ENC_TEST_VECTORS 2
4752 -#define DES3_EDE_CBC_DEC_TEST_VECTORS 2
4753 -#define DES3_EDE_CTR_ENC_TEST_VECTORS 2
4754 -#define DES3_EDE_CTR_DEC_TEST_VECTORS 2
4755 -
4756 -static struct cipher_testvec des_enc_tv_template[] = {
4757 +static const struct cipher_testvec des_enc_tv_template[] = {
4758 { /* From Applied Cryptography */
4759 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4760 .klen = 8,
4761 @@ -4720,7 +4916,7 @@ static struct cipher_testvec des_enc_tv_
4762 },
4763 };
4764
4765 -static struct cipher_testvec des_dec_tv_template[] = {
4766 +static const struct cipher_testvec des_dec_tv_template[] = {
4767 { /* From Applied Cryptography */
4768 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4769 .klen = 8,
4770 @@ -4830,7 +5026,7 @@ static struct cipher_testvec des_dec_tv_
4771 },
4772 };
4773
4774 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4775 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4776 { /* From OpenSSL */
4777 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4778 .klen = 8,
4779 @@ -4956,7 +5152,7 @@ static struct cipher_testvec des_cbc_enc
4780 },
4781 };
4782
4783 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4784 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4785 { /* FIPS Pub 81 */
4786 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4787 .klen = 8,
4788 @@ -5065,7 +5261,7 @@ static struct cipher_testvec des_cbc_dec
4789 },
4790 };
4791
4792 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4793 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4794 { /* Generated with Crypto++ */
4795 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4796 .klen = 8,
4797 @@ -5211,7 +5407,7 @@ static struct cipher_testvec des_ctr_enc
4798 },
4799 };
4800
4801 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4802 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4803 { /* Generated with Crypto++ */
4804 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4805 .klen = 8,
4806 @@ -5357,7 +5553,7 @@ static struct cipher_testvec des_ctr_dec
4807 },
4808 };
4809
4810 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4811 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4812 { /* These are from openssl */
4813 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4814 "\x55\x55\x55\x55\x55\x55\x55\x55"
4815 @@ -5522,7 +5718,7 @@ static struct cipher_testvec des3_ede_en
4816 },
4817 };
4818
4819 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4820 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4821 { /* These are from openssl */
4822 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4823 "\x55\x55\x55\x55\x55\x55\x55\x55"
4824 @@ -5687,7 +5883,7 @@ static struct cipher_testvec des3_ede_de
4825 },
4826 };
4827
4828 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4829 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4830 { /* Generated from openssl */
4831 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4832 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4833 @@ -5867,7 +6063,7 @@ static struct cipher_testvec des3_ede_cb
4834 },
4835 };
4836
4837 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4838 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4839 { /* Generated from openssl */
4840 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4841 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4842 @@ -6047,7 +6243,7 @@ static struct cipher_testvec des3_ede_cb
4843 },
4844 };
4845
4846 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4847 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4848 { /* Generated with Crypto++ */
4849 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4850 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4851 @@ -6325,7 +6521,7 @@ static struct cipher_testvec des3_ede_ct
4852 },
4853 };
4854
4855 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4856 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4857 { /* Generated with Crypto++ */
4858 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4859 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4860 @@ -6606,14 +6802,7 @@ static struct cipher_testvec des3_ede_ct
4861 /*
4862 * Blowfish test vectors.
4863 */
4864 -#define BF_ENC_TEST_VECTORS 7
4865 -#define BF_DEC_TEST_VECTORS 7
4866 -#define BF_CBC_ENC_TEST_VECTORS 2
4867 -#define BF_CBC_DEC_TEST_VECTORS 2
4868 -#define BF_CTR_ENC_TEST_VECTORS 2
4869 -#define BF_CTR_DEC_TEST_VECTORS 2
4870 -
4871 -static struct cipher_testvec bf_enc_tv_template[] = {
4872 +static const struct cipher_testvec bf_enc_tv_template[] = {
4873 { /* DES test vectors from OpenSSL */
4874 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4875 .klen = 8,
4876 @@ -6805,7 +6994,7 @@ static struct cipher_testvec bf_enc_tv_t
4877 },
4878 };
4879
4880 -static struct cipher_testvec bf_dec_tv_template[] = {
4881 +static const struct cipher_testvec bf_dec_tv_template[] = {
4882 { /* DES test vectors from OpenSSL */
4883 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4884 .klen = 8,
4885 @@ -6997,7 +7186,7 @@ static struct cipher_testvec bf_dec_tv_t
4886 },
4887 };
4888
4889 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4890 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4891 { /* From OpenSSL */
4892 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4893 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4894 @@ -7154,7 +7343,7 @@ static struct cipher_testvec bf_cbc_enc_
4895 },
4896 };
4897
4898 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4899 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4900 { /* From OpenSSL */
4901 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4902 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4903 @@ -7311,7 +7500,7 @@ static struct cipher_testvec bf_cbc_dec_
4904 },
4905 };
4906
4907 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4908 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4909 { /* Generated with Crypto++ */
4910 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4911 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4912 @@ -7723,7 +7912,7 @@ static struct cipher_testvec bf_ctr_enc_
4913 },
4914 };
4915
4916 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4917 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4918 { /* Generated with Crypto++ */
4919 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4920 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4921 @@ -8138,18 +8327,7 @@ static struct cipher_testvec bf_ctr_dec_
4922 /*
4923 * Twofish test vectors.
4924 */
4925 -#define TF_ENC_TEST_VECTORS 4
4926 -#define TF_DEC_TEST_VECTORS 4
4927 -#define TF_CBC_ENC_TEST_VECTORS 5
4928 -#define TF_CBC_DEC_TEST_VECTORS 5
4929 -#define TF_CTR_ENC_TEST_VECTORS 2
4930 -#define TF_CTR_DEC_TEST_VECTORS 2
4931 -#define TF_LRW_ENC_TEST_VECTORS 8
4932 -#define TF_LRW_DEC_TEST_VECTORS 8
4933 -#define TF_XTS_ENC_TEST_VECTORS 5
4934 -#define TF_XTS_DEC_TEST_VECTORS 5
4935 -
4936 -static struct cipher_testvec tf_enc_tv_template[] = {
4937 +static const struct cipher_testvec tf_enc_tv_template[] = {
4938 {
4939 .key = zeroed_string,
4940 .klen = 16,
4941 @@ -8317,7 +8495,7 @@ static struct cipher_testvec tf_enc_tv_t
4942 },
4943 };
4944
4945 -static struct cipher_testvec tf_dec_tv_template[] = {
4946 +static const struct cipher_testvec tf_dec_tv_template[] = {
4947 {
4948 .key = zeroed_string,
4949 .klen = 16,
4950 @@ -8485,7 +8663,7 @@ static struct cipher_testvec tf_dec_tv_t
4951 },
4952 };
4953
4954 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4955 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4956 { /* Generated with Nettle */
4957 .key = zeroed_string,
4958 .klen = 16,
4959 @@ -8668,7 +8846,7 @@ static struct cipher_testvec tf_cbc_enc_
4960 },
4961 };
4962
4963 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4964 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4965 { /* Reverse of the first four above */
4966 .key = zeroed_string,
4967 .klen = 16,
4968 @@ -8851,7 +9029,7 @@ static struct cipher_testvec tf_cbc_dec_
4969 },
4970 };
4971
4972 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4973 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4974 { /* Generated with Crypto++ */
4975 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4976 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4977 @@ -9262,7 +9440,7 @@ static struct cipher_testvec tf_ctr_enc_
4978 },
4979 };
4980
4981 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4982 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4983 { /* Generated with Crypto++ */
4984 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4985 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4986 @@ -9673,7 +9851,7 @@ static struct cipher_testvec tf_ctr_dec_
4987 },
4988 };
4989
4990 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4991 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4992 /* Generated from AES-LRW test vectors */
4993 {
4994 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4995 @@ -9925,7 +10103,7 @@ static struct cipher_testvec tf_lrw_enc_
4996 },
4997 };
4998
4999 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
5000 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
5001 /* Generated from AES-LRW test vectors */
5002 /* same as enc vectors with input and result reversed */
5003 {
5004 @@ -10178,7 +10356,7 @@ static struct cipher_testvec tf_lrw_dec_
5005 },
5006 };
5007
5008 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
5009 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
5010 /* Generated from AES-XTS test vectors */
5011 {
5012 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5013 @@ -10520,7 +10698,7 @@ static struct cipher_testvec tf_xts_enc_
5014 },
5015 };
5016
5017 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
5018 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
5019 /* Generated from AES-XTS test vectors */
5020 /* same as enc vectors with input and result reversed */
5021 {
5022 @@ -10867,25 +11045,7 @@ static struct cipher_testvec tf_xts_dec_
5023 * Serpent test vectors. These are backwards because Serpent writes
5024 * octet sequences in right-to-left mode.
5025 */
5026 -#define SERPENT_ENC_TEST_VECTORS 5
5027 -#define SERPENT_DEC_TEST_VECTORS 5
5028 -
5029 -#define TNEPRES_ENC_TEST_VECTORS 4
5030 -#define TNEPRES_DEC_TEST_VECTORS 4
5031 -
5032 -#define SERPENT_CBC_ENC_TEST_VECTORS 1
5033 -#define SERPENT_CBC_DEC_TEST_VECTORS 1
5034 -
5035 -#define SERPENT_CTR_ENC_TEST_VECTORS 2
5036 -#define SERPENT_CTR_DEC_TEST_VECTORS 2
5037 -
5038 -#define SERPENT_LRW_ENC_TEST_VECTORS 8
5039 -#define SERPENT_LRW_DEC_TEST_VECTORS 8
5040 -
5041 -#define SERPENT_XTS_ENC_TEST_VECTORS 5
5042 -#define SERPENT_XTS_DEC_TEST_VECTORS 5
5043 -
5044 -static struct cipher_testvec serpent_enc_tv_template[] = {
5045 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5046 {
5047 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
5048 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5049 @@ -11061,7 +11221,7 @@ static struct cipher_testvec serpent_enc
5050 },
5051 };
5052
5053 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5054 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5055 { /* KeySize=128, PT=0, I=1 */
5056 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5057 "\x00\x00\x00\x00\x00\x00\x00\x00",
5058 @@ -11111,7 +11271,7 @@ static struct cipher_testvec tnepres_enc
5059 };
5060
5061
5062 -static struct cipher_testvec serpent_dec_tv_template[] = {
5063 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5064 {
5065 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5066 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5067 @@ -11287,7 +11447,7 @@ static struct cipher_testvec serpent_dec
5068 },
5069 };
5070
5071 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5072 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5073 {
5074 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5075 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5076 @@ -11328,7 +11488,7 @@ static struct cipher_testvec tnepres_dec
5077 },
5078 };
5079
5080 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5081 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5082 { /* Generated with Crypto++ */
5083 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5084 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5085 @@ -11469,7 +11629,7 @@ static struct cipher_testvec serpent_cbc
5086 },
5087 };
5088
5089 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5090 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5091 { /* Generated with Crypto++ */
5092 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5093 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5094 @@ -11610,7 +11770,7 @@ static struct cipher_testvec serpent_cbc
5095 },
5096 };
5097
5098 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5099 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5100 { /* Generated with Crypto++ */
5101 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5102 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5103 @@ -12021,7 +12181,7 @@ static struct cipher_testvec serpent_ctr
5104 },
5105 };
5106
5107 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5108 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5109 { /* Generated with Crypto++ */
5110 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5111 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5112 @@ -12432,7 +12592,7 @@ static struct cipher_testvec serpent_ctr
5113 },
5114 };
5115
5116 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5117 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5118 /* Generated from AES-LRW test vectors */
5119 {
5120 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5121 @@ -12684,7 +12844,7 @@ static struct cipher_testvec serpent_lrw
5122 },
5123 };
5124
5125 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5126 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5127 /* Generated from AES-LRW test vectors */
5128 /* same as enc vectors with input and result reversed */
5129 {
5130 @@ -12937,7 +13097,7 @@ static struct cipher_testvec serpent_lrw
5131 },
5132 };
5133
5134 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5135 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5136 /* Generated from AES-XTS test vectors */
5137 {
5138 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5139 @@ -13279,7 +13439,7 @@ static struct cipher_testvec serpent_xts
5140 },
5141 };
5142
5143 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5144 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5145 /* Generated from AES-XTS test vectors */
5146 /* same as enc vectors with input and result reversed */
5147 {
5148 @@ -13623,18 +13783,7 @@ static struct cipher_testvec serpent_xts
5149 };
5150
5151 /* Cast6 test vectors from RFC 2612 */
5152 -#define CAST6_ENC_TEST_VECTORS 4
5153 -#define CAST6_DEC_TEST_VECTORS 4
5154 -#define CAST6_CBC_ENC_TEST_VECTORS 1
5155 -#define CAST6_CBC_DEC_TEST_VECTORS 1
5156 -#define CAST6_CTR_ENC_TEST_VECTORS 2
5157 -#define CAST6_CTR_DEC_TEST_VECTORS 2
5158 -#define CAST6_LRW_ENC_TEST_VECTORS 1
5159 -#define CAST6_LRW_DEC_TEST_VECTORS 1
5160 -#define CAST6_XTS_ENC_TEST_VECTORS 1
5161 -#define CAST6_XTS_DEC_TEST_VECTORS 1
5162 -
5163 -static struct cipher_testvec cast6_enc_tv_template[] = {
5164 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5165 {
5166 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5167 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5168 @@ -13805,7 +13954,7 @@ static struct cipher_testvec cast6_enc_t
5169 },
5170 };
5171
5172 -static struct cipher_testvec cast6_dec_tv_template[] = {
5173 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5174 {
5175 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5176 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5177 @@ -13976,7 +14125,7 @@ static struct cipher_testvec cast6_dec_t
5178 },
5179 };
5180
5181 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5182 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5183 { /* Generated from TF test vectors */
5184 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5185 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5186 @@ -14117,7 +14266,7 @@ static struct cipher_testvec cast6_cbc_e
5187 },
5188 };
5189
5190 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5191 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5192 { /* Generated from TF test vectors */
5193 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5194 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5195 @@ -14258,7 +14407,7 @@ static struct cipher_testvec cast6_cbc_d
5196 },
5197 };
5198
5199 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5200 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5201 { /* Generated from TF test vectors */
5202 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5203 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5204 @@ -14415,7 +14564,7 @@ static struct cipher_testvec cast6_ctr_e
5205 },
5206 };
5207
5208 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5209 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5210 { /* Generated from TF test vectors */
5211 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5212 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5213 @@ -14572,7 +14721,7 @@ static struct cipher_testvec cast6_ctr_d
5214 },
5215 };
5216
5217 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5218 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5219 { /* Generated from TF test vectors */
5220 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5221 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5222 @@ -14719,7 +14868,7 @@ static struct cipher_testvec cast6_lrw_e
5223 },
5224 };
5225
5226 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5227 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5228 { /* Generated from TF test vectors */
5229 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5230 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5231 @@ -14866,7 +15015,7 @@ static struct cipher_testvec cast6_lrw_d
5232 },
5233 };
5234
5235 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5236 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5237 { /* Generated from TF test vectors */
5238 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5239 "\x23\x53\x60\x28\x74\x71\x35\x26"
5240 @@ -15015,7 +15164,7 @@ static struct cipher_testvec cast6_xts_e
5241 },
5242 };
5243
5244 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5245 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5246 { /* Generated from TF test vectors */
5247 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5248 "\x23\x53\x60\x28\x74\x71\x35\x26"
5249 @@ -15168,39 +15317,7 @@ static struct cipher_testvec cast6_xts_d
5250 /*
5251 * AES test vectors.
5252 */
5253 -#define AES_ENC_TEST_VECTORS 4
5254 -#define AES_DEC_TEST_VECTORS 4
5255 -#define AES_CBC_ENC_TEST_VECTORS 5
5256 -#define AES_CBC_DEC_TEST_VECTORS 5
5257 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5258 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5259 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5260 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5261 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5262 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5263 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5264 -#define AES_LRW_ENC_TEST_VECTORS 8
5265 -#define AES_LRW_DEC_TEST_VECTORS 8
5266 -#define AES_XTS_ENC_TEST_VECTORS 5
5267 -#define AES_XTS_DEC_TEST_VECTORS 5
5268 -#define AES_CTR_ENC_TEST_VECTORS 5
5269 -#define AES_CTR_DEC_TEST_VECTORS 5
5270 -#define AES_OFB_ENC_TEST_VECTORS 1
5271 -#define AES_OFB_DEC_TEST_VECTORS 1
5272 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5273 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5274 -#define AES_GCM_ENC_TEST_VECTORS 9
5275 -#define AES_GCM_DEC_TEST_VECTORS 8
5276 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5277 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5278 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5279 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5280 -#define AES_CCM_ENC_TEST_VECTORS 8
5281 -#define AES_CCM_DEC_TEST_VECTORS 7
5282 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5283 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5284 -
5285 -static struct cipher_testvec aes_enc_tv_template[] = {
5286 +static const struct cipher_testvec aes_enc_tv_template[] = {
5287 { /* From FIPS-197 */
5288 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5289 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5290 @@ -15372,7 +15489,7 @@ static struct cipher_testvec aes_enc_tv_
5291 },
5292 };
5293
5294 -static struct cipher_testvec aes_dec_tv_template[] = {
5295 +static const struct cipher_testvec aes_dec_tv_template[] = {
5296 { /* From FIPS-197 */
5297 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5298 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5299 @@ -15544,7 +15661,7 @@ static struct cipher_testvec aes_dec_tv_
5300 },
5301 };
5302
5303 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5304 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5305 { /* From RFC 3602 */
5306 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5307 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5308 @@ -15766,7 +15883,7 @@ static struct cipher_testvec aes_cbc_enc
5309 },
5310 };
5311
5312 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5313 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5314 { /* From RFC 3602 */
5315 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5316 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5317 @@ -15988,7 +16105,7 @@ static struct cipher_testvec aes_cbc_dec
5318 },
5319 };
5320
5321 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5322 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5323 { /* Input data from RFC 2410 Case 1 */
5324 #ifdef __LITTLE_ENDIAN
5325 .key = "\x08\x00" /* rta length */
5326 @@ -16030,7 +16147,7 @@ static struct aead_testvec hmac_md5_ecb_
5327 },
5328 };
5329
5330 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5331 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5332 {
5333 #ifdef __LITTLE_ENDIAN
5334 .key = "\x08\x00" /* rta length */
5335 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_
5336 },
5337 };
5338
5339 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5340 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5341 { /* RFC 3602 Case 1 */
5342 #ifdef __LITTLE_ENDIAN
5343 .key = "\x08\x00" /* rta length */
5344 @@ -16341,7 +16458,7 @@ static struct aead_testvec hmac_sha1_aes
5345 },
5346 };
5347
5348 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5349 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5350 { /* Input data from RFC 2410 Case 1 */
5351 #ifdef __LITTLE_ENDIAN
5352 .key = "\x08\x00" /* rta length */
5353 @@ -16387,7 +16504,7 @@ static struct aead_testvec hmac_sha1_ecb
5354 },
5355 };
5356
5357 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5358 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5359 {
5360 #ifdef __LITTLE_ENDIAN
5361 .key = "\x08\x00" /* rta length */
5362 @@ -16433,7 +16550,7 @@ static struct aead_testvec hmac_sha1_ecb
5363 },
5364 };
5365
5366 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5367 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5368 { /* RFC 3602 Case 1 */
5369 #ifdef __LITTLE_ENDIAN
5370 .key = "\x08\x00" /* rta length */
5371 @@ -16716,7 +16833,7 @@ static struct aead_testvec hmac_sha256_a
5372 },
5373 };
5374
5375 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5376 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5377 { /* RFC 3602 Case 1 */
5378 #ifdef __LITTLE_ENDIAN
5379 .key = "\x08\x00" /* rta length */
5380 @@ -17055,9 +17172,7 @@ static struct aead_testvec hmac_sha512_a
5381 },
5382 };
5383
5384 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5385 -
5386 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5387 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5388 { /*Generated with cryptopp*/
5389 #ifdef __LITTLE_ENDIAN
5390 .key = "\x08\x00" /* rta length */
5391 @@ -17116,9 +17231,7 @@ static struct aead_testvec hmac_sha1_des
5392 },
5393 };
5394
5395 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1
5396 -
5397 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5398 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5399 { /*Generated with cryptopp*/
5400 #ifdef __LITTLE_ENDIAN
5401 .key = "\x08\x00" /* rta length */
5402 @@ -17177,9 +17290,7 @@ static struct aead_testvec hmac_sha224_d
5403 },
5404 };
5405
5406 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1
5407 -
5408 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5409 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5410 { /*Generated with cryptopp*/
5411 #ifdef __LITTLE_ENDIAN
5412 .key = "\x08\x00" /* rta length */
5413 @@ -17240,9 +17351,7 @@ static struct aead_testvec hmac_sha256_d
5414 },
5415 };
5416
5417 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1
5418 -
5419 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5420 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5421 { /*Generated with cryptopp*/
5422 #ifdef __LITTLE_ENDIAN
5423 .key = "\x08\x00" /* rta length */
5424 @@ -17307,9 +17416,7 @@ static struct aead_testvec hmac_sha384_d
5425 },
5426 };
5427
5428 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1
5429 -
5430 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5431 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5432 { /*Generated with cryptopp*/
5433 #ifdef __LITTLE_ENDIAN
5434 .key = "\x08\x00" /* rta length */
5435 @@ -17378,9 +17485,7 @@ static struct aead_testvec hmac_sha512_d
5436 },
5437 };
5438
5439 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1
5440 -
5441 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5442 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5443 { /*Generated with cryptopp*/
5444 #ifdef __LITTLE_ENDIAN
5445 .key = "\x08\x00" /* rta length */
5446 @@ -17441,9 +17546,7 @@ static struct aead_testvec hmac_sha1_des
5447 },
5448 };
5449
5450 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1
5451 -
5452 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5453 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5454 { /*Generated with cryptopp*/
5455 #ifdef __LITTLE_ENDIAN
5456 .key = "\x08\x00" /* rta length */
5457 @@ -17504,9 +17607,7 @@ static struct aead_testvec hmac_sha224_d
5458 },
5459 };
5460
5461 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1
5462 -
5463 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5464 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5465 { /*Generated with cryptopp*/
5466 #ifdef __LITTLE_ENDIAN
5467 .key = "\x08\x00" /* rta length */
5468 @@ -17569,9 +17670,7 @@ static struct aead_testvec hmac_sha256_d
5469 },
5470 };
5471
5472 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1
5473 -
5474 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5475 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5476 { /*Generated with cryptopp*/
5477 #ifdef __LITTLE_ENDIAN
5478 .key = "\x08\x00" /* rta length */
5479 @@ -17638,9 +17737,7 @@ static struct aead_testvec hmac_sha384_d
5480 },
5481 };
5482
5483 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1
5484 -
5485 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5486 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5487 { /*Generated with cryptopp*/
5488 #ifdef __LITTLE_ENDIAN
5489 .key = "\x08\x00" /* rta length */
5490 @@ -17711,7 +17808,7 @@ static struct aead_testvec hmac_sha512_d
5491 },
5492 };
5493
5494 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5495 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5496 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5497 { /* LRW-32-AES 1 */
5498 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5499 @@ -17964,7 +18061,7 @@ static struct cipher_testvec aes_lrw_enc
5500 }
5501 };
5502
5503 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5504 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5505 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5506 /* same as enc vectors with input and result reversed */
5507 { /* LRW-32-AES 1 */
5508 @@ -18218,7 +18315,7 @@ static struct cipher_testvec aes_lrw_dec
5509 }
5510 };
5511
5512 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5513 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5514 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5515 { /* XTS-AES 1 */
5516 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5517 @@ -18561,7 +18658,7 @@ static struct cipher_testvec aes_xts_enc
5518 }
5519 };
5520
5521 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5522 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5523 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5524 { /* XTS-AES 1 */
5525 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5526 @@ -18905,7 +19002,7 @@ static struct cipher_testvec aes_xts_dec
5527 };
5528
5529
5530 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5531 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5532 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5533 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5534 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5535 @@ -19260,7 +19357,7 @@ static struct cipher_testvec aes_ctr_enc
5536 },
5537 };
5538
5539 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5540 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5541 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5542 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5543 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5544 @@ -19615,7 +19712,7 @@ static struct cipher_testvec aes_ctr_dec
5545 },
5546 };
5547
5548 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5549 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5550 { /* From RFC 3686 */
5551 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5552 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5553 @@ -20747,7 +20844,7 @@ static struct cipher_testvec aes_ctr_rfc
5554 },
5555 };
5556
5557 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5558 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5559 { /* From RFC 3686 */
5560 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5561 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5562 @@ -20838,7 +20935,7 @@ static struct cipher_testvec aes_ctr_rfc
5563 },
5564 };
5565
5566 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5567 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5568 /* From NIST Special Publication 800-38A, Appendix F.5 */
5569 {
5570 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5571 @@ -20867,7 +20964,7 @@ static struct cipher_testvec aes_ofb_enc
5572 }
5573 };
5574
5575 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5576 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5577 /* From NIST Special Publication 800-38A, Appendix F.5 */
5578 {
5579 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5580 @@ -20896,7 +20993,7 @@ static struct cipher_testvec aes_ofb_dec
5581 }
5582 };
5583
5584 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5585 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5586 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5587 .key = zeroed_string,
5588 .klen = 16,
5589 @@ -21056,7 +21153,7 @@ static struct aead_testvec aes_gcm_enc_t
5590 }
5591 };
5592
5593 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5594 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5595 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5596 .key = zeroed_string,
5597 .klen = 32,
5598 @@ -21258,7 +21355,7 @@ static struct aead_testvec aes_gcm_dec_t
5599 }
5600 };
5601
5602 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5603 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5604 { /* Generated using Crypto++ */
5605 .key = zeroed_string,
5606 .klen = 20,
5607 @@ -21871,7 +21968,7 @@ static struct aead_testvec aes_gcm_rfc41
5608 }
5609 };
5610
5611 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5612 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5613 { /* Generated using Crypto++ */
5614 .key = zeroed_string,
5615 .klen = 20,
5616 @@ -22485,7 +22582,7 @@ static struct aead_testvec aes_gcm_rfc41
5617 }
5618 };
5619
5620 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5621 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5622 { /* From draft-mcgrew-gcm-test-01 */
5623 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5624 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5625 @@ -22516,7 +22613,7 @@ static struct aead_testvec aes_gcm_rfc45
5626 }
5627 };
5628
5629 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5630 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5631 { /* From draft-mcgrew-gcm-test-01 */
5632 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5633 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5634 @@ -22575,7 +22672,7 @@ static struct aead_testvec aes_gcm_rfc45
5635 },
5636 };
5637
5638 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5639 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5640 { /* From RFC 3610 */
5641 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5642 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5643 @@ -22859,7 +22956,7 @@ static struct aead_testvec aes_ccm_enc_t
5644 }
5645 };
5646
5647 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5648 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5649 { /* From RFC 3610 */
5650 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5651 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5652 @@ -23191,7 +23288,7 @@ static struct aead_testvec aes_ccm_dec_t
5653 * These vectors are copied/generated from the ones for rfc4106 with
5654 * the key truncated by one byte..
5655 */
5656 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5657 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5658 { /* Generated using Crypto++ */
5659 .key = zeroed_string,
5660 .klen = 19,
5661 @@ -23804,7 +23901,7 @@ static struct aead_testvec aes_ccm_rfc43
5662 }
5663 };
5664
5665 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5666 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5667 { /* Generated using Crypto++ */
5668 .key = zeroed_string,
5669 .klen = 19,
5670 @@ -24420,9 +24517,7 @@ static struct aead_testvec aes_ccm_rfc43
5671 /*
5672 * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5673 */
5674 -#define RFC7539_ENC_TEST_VECTORS 2
5675 -#define RFC7539_DEC_TEST_VECTORS 2
5676 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5677 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5678 {
5679 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5680 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5681 @@ -24554,7 +24649,7 @@ static struct aead_testvec rfc7539_enc_t
5682 },
5683 };
5684
5685 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5686 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5687 {
5688 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5689 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5690 @@ -24689,9 +24784,7 @@ static struct aead_testvec rfc7539_dec_t
5691 /*
5692 * draft-irtf-cfrg-chacha20-poly1305
5693 */
5694 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5695 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5696 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5697 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5698 {
5699 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5700 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5701 @@ -24779,7 +24872,7 @@ static struct aead_testvec rfc7539esp_en
5702 },
5703 };
5704
5705 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5706 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5707 {
5708 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5709 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5710 @@ -24875,7 +24968,7 @@ static struct aead_testvec rfc7539esp_de
5711 * semiblock of the ciphertext from the test vector. For decryption, iv is
5712 * the first semiblock of the ciphertext.
5713 */
5714 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5715 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5716 {
5717 .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5718 "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5719 @@ -24890,7 +24983,7 @@ static struct cipher_testvec aes_kw_enc_
5720 },
5721 };
5722
5723 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5724 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5725 {
5726 .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5727 "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5728 @@ -24913,9 +25006,7 @@ static struct cipher_testvec aes_kw_dec_
5729 * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5730 * Only AES-128 is supported at this time.
5731 */
5732 -#define ANSI_CPRNG_AES_TEST_VECTORS 6
5733 -
5734 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5735 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5736 {
5737 .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5738 "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5739 @@ -25011,7 +25102,7 @@ static struct cprng_testvec ansi_cprng_a
5740 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5741 * w/o personalization string, w/ and w/o additional input string).
5742 */
5743 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5744 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5745 {
5746 .entropy = (unsigned char *)
5747 "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5748 @@ -25169,7 +25260,7 @@ static struct drbg_testvec drbg_pr_sha25
5749 },
5750 };
5751
5752 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5753 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5754 {
5755 .entropy = (unsigned char *)
5756 "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5757 @@ -25327,7 +25418,7 @@ static struct drbg_testvec drbg_pr_hmac_
5758 },
5759 };
5760
5761 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5762 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5763 {
5764 .entropy = (unsigned char *)
5765 "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5766 @@ -25451,7 +25542,7 @@ static struct drbg_testvec drbg_pr_ctr_a
5767 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5768 * w/o personalization string, w/ and w/o additional input string).
5769 */
5770 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5771 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5772 {
5773 .entropy = (unsigned char *)
5774 "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5775 @@ -25573,7 +25664,7 @@ static struct drbg_testvec drbg_nopr_sha
5776 },
5777 };
5778
5779 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5780 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5781 {
5782 .entropy = (unsigned char *)
5783 "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5784 @@ -25695,7 +25786,7 @@ static struct drbg_testvec drbg_nopr_hma
5785 },
5786 };
5787
5788 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5789 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5790 {
5791 .entropy = (unsigned char *)
5792 "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5793 @@ -25719,7 +25810,7 @@ static struct drbg_testvec drbg_nopr_ctr
5794 },
5795 };
5796
5797 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5798 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5799 {
5800 .entropy = (unsigned char *)
5801 "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5802 @@ -25743,7 +25834,7 @@ static struct drbg_testvec drbg_nopr_ctr
5803 },
5804 };
5805
5806 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5807 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5808 {
5809 .entropy = (unsigned char *)
5810 "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5811 @@ -25832,14 +25923,7 @@ static struct drbg_testvec drbg_nopr_ctr
5812 };
5813
5814 /* Cast5 test vectors from RFC 2144 */
5815 -#define CAST5_ENC_TEST_VECTORS 4
5816 -#define CAST5_DEC_TEST_VECTORS 4
5817 -#define CAST5_CBC_ENC_TEST_VECTORS 1
5818 -#define CAST5_CBC_DEC_TEST_VECTORS 1
5819 -#define CAST5_CTR_ENC_TEST_VECTORS 2
5820 -#define CAST5_CTR_DEC_TEST_VECTORS 2
5821 -
5822 -static struct cipher_testvec cast5_enc_tv_template[] = {
5823 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5824 {
5825 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5826 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5827 @@ -26000,7 +26084,7 @@ static struct cipher_testvec cast5_enc_t
5828 },
5829 };
5830
5831 -static struct cipher_testvec cast5_dec_tv_template[] = {
5832 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5833 {
5834 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5835 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5836 @@ -26161,7 +26245,7 @@ static struct cipher_testvec cast5_dec_t
5837 },
5838 };
5839
5840 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5841 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5842 { /* Generated from TF test vectors */
5843 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5844 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5845 @@ -26299,7 +26383,7 @@ static struct cipher_testvec cast5_cbc_e
5846 },
5847 };
5848
5849 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5850 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5851 { /* Generated from TF test vectors */
5852 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5853 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5854 @@ -26437,7 +26521,7 @@ static struct cipher_testvec cast5_cbc_d
5855 },
5856 };
5857
5858 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5859 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5860 { /* Generated from TF test vectors */
5861 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5862 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5863 @@ -26588,7 +26672,7 @@ static struct cipher_testvec cast5_ctr_e
5864 },
5865 };
5866
5867 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5868 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5869 { /* Generated from TF test vectors */
5870 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5871 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5872 @@ -26742,10 +26826,7 @@ static struct cipher_testvec cast5_ctr_d
5873 /*
5874 * ARC4 test vectors from OpenSSL
5875 */
5876 -#define ARC4_ENC_TEST_VECTORS 7
5877 -#define ARC4_DEC_TEST_VECTORS 7
5878 -
5879 -static struct cipher_testvec arc4_enc_tv_template[] = {
5880 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5881 {
5882 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5883 .klen = 8,
5884 @@ -26811,7 +26892,7 @@ static struct cipher_testvec arc4_enc_tv
5885 },
5886 };
5887
5888 -static struct cipher_testvec arc4_dec_tv_template[] = {
5889 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5890 {
5891 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5892 .klen = 8,
5893 @@ -26880,10 +26961,7 @@ static struct cipher_testvec arc4_dec_tv
5894 /*
5895 * TEA test vectors
5896 */
5897 -#define TEA_ENC_TEST_VECTORS 4
5898 -#define TEA_DEC_TEST_VECTORS 4
5899 -
5900 -static struct cipher_testvec tea_enc_tv_template[] = {
5901 +static const struct cipher_testvec tea_enc_tv_template[] = {
5902 {
5903 .key = zeroed_string,
5904 .klen = 16,
5905 @@ -26926,7 +27004,7 @@ static struct cipher_testvec tea_enc_tv_
5906 }
5907 };
5908
5909 -static struct cipher_testvec tea_dec_tv_template[] = {
5910 +static const struct cipher_testvec tea_dec_tv_template[] = {
5911 {
5912 .key = zeroed_string,
5913 .klen = 16,
5914 @@ -26972,10 +27050,7 @@ static struct cipher_testvec tea_dec_tv_
5915 /*
5916 * XTEA test vectors
5917 */
5918 -#define XTEA_ENC_TEST_VECTORS 4
5919 -#define XTEA_DEC_TEST_VECTORS 4
5920 -
5921 -static struct cipher_testvec xtea_enc_tv_template[] = {
5922 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5923 {
5924 .key = zeroed_string,
5925 .klen = 16,
5926 @@ -27018,7 +27093,7 @@ static struct cipher_testvec xtea_enc_tv
5927 }
5928 };
5929
5930 -static struct cipher_testvec xtea_dec_tv_template[] = {
5931 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5932 {
5933 .key = zeroed_string,
5934 .klen = 16,
5935 @@ -27064,10 +27139,7 @@ static struct cipher_testvec xtea_dec_tv
5936 /*
5937 * KHAZAD test vectors.
5938 */
5939 -#define KHAZAD_ENC_TEST_VECTORS 5
5940 -#define KHAZAD_DEC_TEST_VECTORS 5
5941 -
5942 -static struct cipher_testvec khazad_enc_tv_template[] = {
5943 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5944 {
5945 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5946 "\x00\x00\x00\x00\x00\x00\x00\x00",
5947 @@ -27113,7 +27185,7 @@ static struct cipher_testvec khazad_enc_
5948 },
5949 };
5950
5951 -static struct cipher_testvec khazad_dec_tv_template[] = {
5952 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5953 {
5954 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5955 "\x00\x00\x00\x00\x00\x00\x00\x00",
5956 @@ -27163,12 +27235,7 @@ static struct cipher_testvec khazad_dec_
5957 * Anubis test vectors.
5958 */
5959
5960 -#define ANUBIS_ENC_TEST_VECTORS 5
5961 -#define ANUBIS_DEC_TEST_VECTORS 5
5962 -#define ANUBIS_CBC_ENC_TEST_VECTORS 2
5963 -#define ANUBIS_CBC_DEC_TEST_VECTORS 2
5964 -
5965 -static struct cipher_testvec anubis_enc_tv_template[] = {
5966 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5967 {
5968 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5969 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5970 @@ -27231,7 +27298,7 @@ static struct cipher_testvec anubis_enc_
5971 },
5972 };
5973
5974 -static struct cipher_testvec anubis_dec_tv_template[] = {
5975 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5976 {
5977 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5978 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5979 @@ -27294,7 +27361,7 @@ static struct cipher_testvec anubis_dec_
5980 },
5981 };
5982
5983 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5984 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5985 {
5986 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5987 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5988 @@ -27329,7 +27396,7 @@ static struct cipher_testvec anubis_cbc_
5989 },
5990 };
5991
5992 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5993 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5994 {
5995 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5996 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5997 @@ -27367,10 +27434,7 @@ static struct cipher_testvec anubis_cbc_
5998 /*
5999 * XETA test vectors
6000 */
6001 -#define XETA_ENC_TEST_VECTORS 4
6002 -#define XETA_DEC_TEST_VECTORS 4
6003 -
6004 -static struct cipher_testvec xeta_enc_tv_template[] = {
6005 +static const struct cipher_testvec xeta_enc_tv_template[] = {
6006 {
6007 .key = zeroed_string,
6008 .klen = 16,
6009 @@ -27413,7 +27477,7 @@ static struct cipher_testvec xeta_enc_tv
6010 }
6011 };
6012
6013 -static struct cipher_testvec xeta_dec_tv_template[] = {
6014 +static const struct cipher_testvec xeta_dec_tv_template[] = {
6015 {
6016 .key = zeroed_string,
6017 .klen = 16,
6018 @@ -27459,10 +27523,7 @@ static struct cipher_testvec xeta_dec_tv
6019 /*
6020 * FCrypt test vectors
6021 */
6022 -#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
6023 -#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
6024 -
6025 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6026 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6027 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6028 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6029 .klen = 8,
6030 @@ -27523,7 +27584,7 @@ static struct cipher_testvec fcrypt_pcbc
6031 }
6032 };
6033
6034 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6035 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6036 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6037 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6038 .klen = 8,
6039 @@ -27587,18 +27648,7 @@ static struct cipher_testvec fcrypt_pcbc
6040 /*
6041 * CAMELLIA test vectors.
6042 */
6043 -#define CAMELLIA_ENC_TEST_VECTORS 4
6044 -#define CAMELLIA_DEC_TEST_VECTORS 4
6045 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6046 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6047 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6048 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6049 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6050 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6051 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6052 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6053 -
6054 -static struct cipher_testvec camellia_enc_tv_template[] = {
6055 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6056 {
6057 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6058 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6059 @@ -27898,7 +27948,7 @@ static struct cipher_testvec camellia_en
6060 },
6061 };
6062
6063 -static struct cipher_testvec camellia_dec_tv_template[] = {
6064 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6065 {
6066 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6067 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6068 @@ -28198,7 +28248,7 @@ static struct cipher_testvec camellia_de
6069 },
6070 };
6071
6072 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6073 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6074 {
6075 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6076 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6077 @@ -28494,7 +28544,7 @@ static struct cipher_testvec camellia_cb
6078 },
6079 };
6080
6081 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6082 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6083 {
6084 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6085 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6086 @@ -28790,7 +28840,7 @@ static struct cipher_testvec camellia_cb
6087 },
6088 };
6089
6090 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6091 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6092 { /* Generated with Crypto++ */
6093 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6094 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6095 @@ -29457,7 +29507,7 @@ static struct cipher_testvec camellia_ct
6096 },
6097 };
6098
6099 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6100 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6101 { /* Generated with Crypto++ */
6102 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6103 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6104 @@ -30124,7 +30174,7 @@ static struct cipher_testvec camellia_ct
6105 },
6106 };
6107
6108 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6109 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6110 /* Generated from AES-LRW test vectors */
6111 {
6112 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6113 @@ -30376,7 +30426,7 @@ static struct cipher_testvec camellia_lr
6114 },
6115 };
6116
6117 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6118 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6119 /* Generated from AES-LRW test vectors */
6120 /* same as enc vectors with input and result reversed */
6121 {
6122 @@ -30629,7 +30679,7 @@ static struct cipher_testvec camellia_lr
6123 },
6124 };
6125
6126 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6127 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6128 /* Generated from AES-XTS test vectors */
6129 {
6130 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6131 @@ -30971,7 +31021,7 @@ static struct cipher_testvec camellia_xt
6132 },
6133 };
6134
6135 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6136 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6137 /* Generated from AES-XTS test vectors */
6138 /* same as enc vectors with input and result reversed */
6139 {
6140 @@ -31317,10 +31367,7 @@ static struct cipher_testvec camellia_xt
6141 /*
6142 * SEED test vectors
6143 */
6144 -#define SEED_ENC_TEST_VECTORS 4
6145 -#define SEED_DEC_TEST_VECTORS 4
6146 -
6147 -static struct cipher_testvec seed_enc_tv_template[] = {
6148 +static const struct cipher_testvec seed_enc_tv_template[] = {
6149 {
6150 .key = zeroed_string,
6151 .klen = 16,
6152 @@ -31362,7 +31409,7 @@ static struct cipher_testvec seed_enc_tv
6153 }
6154 };
6155
6156 -static struct cipher_testvec seed_dec_tv_template[] = {
6157 +static const struct cipher_testvec seed_dec_tv_template[] = {
6158 {
6159 .key = zeroed_string,
6160 .klen = 16,
6161 @@ -31404,8 +31451,7 @@ static struct cipher_testvec seed_dec_tv
6162 }
6163 };
6164
6165 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6166 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6167 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6168 /*
6169 * Testvectors from verified.test-vectors submitted to ECRYPT.
6170 * They are truncated to size 39, 64, 111, 129 to test a variety
6171 @@ -32574,8 +32620,7 @@ static struct cipher_testvec salsa20_str
6172 },
6173 };
6174
6175 -#define CHACHA20_ENC_TEST_VECTORS 4
6176 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6177 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6178 { /* RFC7539 A.2. Test Vector #1 */
6179 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6180 "\x00\x00\x00\x00\x00\x00\x00\x00"
6181 @@ -33086,9 +33131,7 @@ static struct cipher_testvec chacha20_en
6182 /*
6183 * CTS (Cipher Text Stealing) mode tests
6184 */
6185 -#define CTS_MODE_ENC_TEST_VECTORS 6
6186 -#define CTS_MODE_DEC_TEST_VECTORS 6
6187 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6188 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6189 { /* from rfc3962 */
6190 .klen = 16,
6191 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6192 @@ -33190,7 +33233,7 @@ static struct cipher_testvec cts_mode_en
6193 }
6194 };
6195
6196 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6197 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6198 { /* from rfc3962 */
6199 .klen = 16,
6200 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6201 @@ -33308,10 +33351,7 @@ struct comp_testvec {
6202 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6203 */
6204
6205 -#define DEFLATE_COMP_TEST_VECTORS 2
6206 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6207 -
6208 -static struct comp_testvec deflate_comp_tv_template[] = {
6209 +static const struct comp_testvec deflate_comp_tv_template[] = {
6210 {
6211 .inlen = 70,
6212 .outlen = 38,
6213 @@ -33347,7 +33387,7 @@ static struct comp_testvec deflate_comp_
6214 },
6215 };
6216
6217 -static struct comp_testvec deflate_decomp_tv_template[] = {
6218 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6219 {
6220 .inlen = 122,
6221 .outlen = 191,
6222 @@ -33386,10 +33426,7 @@ static struct comp_testvec deflate_decom
6223 /*
6224 * LZO test vectors (null-terminated strings).
6225 */
6226 -#define LZO_COMP_TEST_VECTORS 2
6227 -#define LZO_DECOMP_TEST_VECTORS 2
6228 -
6229 -static struct comp_testvec lzo_comp_tv_template[] = {
6230 +static const struct comp_testvec lzo_comp_tv_template[] = {
6231 {
6232 .inlen = 70,
6233 .outlen = 57,
6234 @@ -33429,7 +33466,7 @@ static struct comp_testvec lzo_comp_tv_t
6235 },
6236 };
6237
6238 -static struct comp_testvec lzo_decomp_tv_template[] = {
6239 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6240 {
6241 .inlen = 133,
6242 .outlen = 159,
6243 @@ -33472,7 +33509,7 @@ static struct comp_testvec lzo_decomp_tv
6244 */
6245 #define MICHAEL_MIC_TEST_VECTORS 6
6246
6247 -static struct hash_testvec michael_mic_tv_template[] = {
6248 +static const struct hash_testvec michael_mic_tv_template[] = {
6249 {
6250 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6251 .ksize = 8,
6252 @@ -33520,9 +33557,7 @@ static struct hash_testvec michael_mic_t
6253 /*
6254 * CRC32 test vectors
6255 */
6256 -#define CRC32_TEST_VECTORS 14
6257 -
6258 -static struct hash_testvec crc32_tv_template[] = {
6259 +static const struct hash_testvec crc32_tv_template[] = {
6260 {
6261 .key = "\x87\xa9\xcb\xed",
6262 .ksize = 4,
6263 @@ -33954,9 +33989,7 @@ static struct hash_testvec crc32_tv_temp
6264 /*
6265 * CRC32C test vectors
6266 */
6267 -#define CRC32C_TEST_VECTORS 15
6268 -
6269 -static struct hash_testvec crc32c_tv_template[] = {
6270 +static const struct hash_testvec crc32c_tv_template[] = {
6271 {
6272 .psize = 0,
6273 .digest = "\x00\x00\x00\x00",
6274 @@ -34392,9 +34425,7 @@ static struct hash_testvec crc32c_tv_tem
6275 /*
6276 * Blakcifn CRC test vectors
6277 */
6278 -#define BFIN_CRC_TEST_VECTORS 6
6279 -
6280 -static struct hash_testvec bfin_crc_tv_template[] = {
6281 +static const struct hash_testvec bfin_crc_tv_template[] = {
6282 {
6283 .psize = 0,
6284 .digest = "\x00\x00\x00\x00",
6285 @@ -34479,9 +34510,6 @@ static struct hash_testvec bfin_crc_tv_t
6286
6287 };
6288
6289 -#define LZ4_COMP_TEST_VECTORS 1
6290 -#define LZ4_DECOMP_TEST_VECTORS 1
6291 -
6292 static struct comp_testvec lz4_comp_tv_template[] = {
6293 {
6294 .inlen = 70,
6295 @@ -34512,9 +34540,6 @@ static struct comp_testvec lz4_decomp_tv
6296 },
6297 };
6298
6299 -#define LZ4HC_COMP_TEST_VECTORS 1
6300 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6301 -
6302 static struct comp_testvec lz4hc_comp_tv_template[] = {
6303 {
6304 .inlen = 70,
6305 --- /dev/null
6306 +++ b/crypto/tls.c
6307 @@ -0,0 +1,607 @@
6308 +/*
6309 + * Copyright 2013 Freescale Semiconductor, Inc.
6310 + * Copyright 2017 NXP Semiconductor, Inc.
6311 + *
6312 + * This program is free software; you can redistribute it and/or modify it
6313 + * under the terms of the GNU General Public License as published by the Free
6314 + * Software Foundation; either version 2 of the License, or (at your option)
6315 + * any later version.
6316 + *
6317 + */
6318 +
6319 +#include <crypto/internal/aead.h>
6320 +#include <crypto/internal/hash.h>
6321 +#include <crypto/internal/skcipher.h>
6322 +#include <crypto/authenc.h>
6323 +#include <crypto/null.h>
6324 +#include <crypto/scatterwalk.h>
6325 +#include <linux/err.h>
6326 +#include <linux/init.h>
6327 +#include <linux/module.h>
6328 +#include <linux/rtnetlink.h>
6329 +
6330 +struct tls_instance_ctx {
6331 + struct crypto_ahash_spawn auth;
6332 + struct crypto_skcipher_spawn enc;
6333 +};
6334 +
6335 +struct crypto_tls_ctx {
6336 + unsigned int reqoff;
6337 + struct crypto_ahash *auth;
6338 + struct crypto_skcipher *enc;
6339 + struct crypto_skcipher *null;
6340 +};
6341 +
6342 +struct tls_request_ctx {
6343 + /*
6344 + * cryptlen holds the payload length in the case of encryption or
6345 + * payload_len + icv_len + padding_len in case of decryption
6346 + */
6347 + unsigned int cryptlen;
6348 + /* working space for partial results */
6349 + struct scatterlist tmp[2];
6350 + struct scatterlist cipher[2];
6351 + struct scatterlist dst[2];
6352 + char tail[];
6353 +};
6354 +
6355 +struct async_op {
6356 + struct completion completion;
6357 + int err;
6358 +};
6359 +
6360 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6361 +{
6362 + struct async_op *areq = req->data;
6363 +
6364 + if (err == -EINPROGRESS)
6365 + return;
6366 +
6367 + areq->err = err;
6368 + complete(&areq->completion);
6369 +}
6370 +
6371 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6372 + unsigned int keylen)
6373 +{
6374 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6375 + struct crypto_ahash *auth = ctx->auth;
6376 + struct crypto_skcipher *enc = ctx->enc;
6377 + struct crypto_authenc_keys keys;
6378 + int err = -EINVAL;
6379 +
6380 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6381 + goto badkey;
6382 +
6383 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6384 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6385 + CRYPTO_TFM_REQ_MASK);
6386 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6387 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6388 + CRYPTO_TFM_RES_MASK);
6389 +
6390 + if (err)
6391 + goto out;
6392 +
6393 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6394 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6395 + CRYPTO_TFM_REQ_MASK);
6396 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6397 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6398 + CRYPTO_TFM_RES_MASK);
6399 +
6400 +out:
6401 + return err;
6402 +
6403 +badkey:
6404 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6405 + goto out;
6406 +}
6407 +
6408 +/**
6409 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6410 + * @hash: (output) buffer to save the digest into
6411 + * @src: (input) scatterlist with the assoc and payload data
6412 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
6413 + * @req: (input) aead request
6414 + **/
6415 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6416 + unsigned int srclen, struct aead_request *req)
6417 +{
6418 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6419 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6420 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6421 + struct async_op ahash_op;
6422 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6423 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6424 + int err = -EBADMSG;
6425 +
6426 + /* Bail out if the request assoc len is 0 */
6427 + if (!req->assoclen)
6428 + return err;
6429 +
6430 + init_completion(&ahash_op.completion);
6431 +
6432 + /* the hash transform to be executed comes from the original request */
6433 + ahash_request_set_tfm(ahreq, ctx->auth);
6434 + /* prepare the hash request with input data and result pointer */
6435 + ahash_request_set_crypt(ahreq, src, hash, srclen);
6436 + /* set the notifier for when the async hash function returns */
6437 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6438 + tls_async_op_done, &ahash_op);
6439 +
6440 + /* Calculate the digest on the given data. The result is put in hash */
6441 + err = crypto_ahash_digest(ahreq);
6442 + if (err == -EINPROGRESS) {
6443 + err = wait_for_completion_interruptible(&ahash_op.completion);
6444 + if (!err)
6445 + err = ahash_op.err;
6446 + }
6447 +
6448 + return err;
6449 +}
6450 +
6451 +/**
6452 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6453 + * @hash: (output) buffer to save the digest and padding into
6454 + * @phashlen: (output) the size of digest + padding
6455 + * @req: (input) aead request
6456 + **/
6457 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6458 + struct aead_request *req)
6459 +{
6460 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6461 + unsigned int hash_size = crypto_aead_authsize(tls);
6462 + unsigned int block_size = crypto_aead_blocksize(tls);
6463 + unsigned int srclen = req->cryptlen + hash_size;
6464 + unsigned int icvlen = req->cryptlen + req->assoclen;
6465 + unsigned int padlen;
6466 + int err;
6467 +
6468 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
6469 + if (err)
6470 + goto out;
6471 +
6472 + /* add padding after digest */
6473 + padlen = block_size - (srclen % block_size);
6474 + memset(hash + hash_size, padlen - 1, padlen);
6475 +
6476 + *phashlen = hash_size + padlen;
6477 +out:
6478 + return err;
6479 +}
6480 +
6481 +static int crypto_tls_copy_data(struct aead_request *req,
6482 + struct scatterlist *src,
6483 + struct scatterlist *dst,
6484 + unsigned int len)
6485 +{
6486 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6487 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6488 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6489 +
6490 + skcipher_request_set_tfm(skreq, ctx->null);
6491 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6492 + NULL, NULL);
6493 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6494 +
6495 + return crypto_skcipher_encrypt(skreq);
6496 +}
6497 +
6498 +static int crypto_tls_encrypt(struct aead_request *req)
6499 +{
6500 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6501 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6502 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6503 + struct skcipher_request *skreq;
6504 + struct scatterlist *cipher = treq_ctx->cipher;
6505 + struct scatterlist *tmp = treq_ctx->tmp;
6506 + struct scatterlist *sg, *src, *dst;
6507 + unsigned int cryptlen, phashlen;
6508 + u8 *hash = treq_ctx->tail;
6509 + int err;
6510 +
6511 + /*
6512 + * The hash result is saved at the beginning of the tls request ctx
6513 + * and is aligned as required by the hash transform. Enough space was
6514 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
6515 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6516 + * the result is not overwritten by the second (cipher) request.
6517 + */
6518 + hash = (u8 *)ALIGN((unsigned long)hash +
6519 + crypto_ahash_alignmask(ctx->auth),
6520 + crypto_ahash_alignmask(ctx->auth) + 1);
6521 +
6522 + /*
6523 + * STEP 1: create ICV together with necessary padding
6524 + */
6525 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
6526 + if (err)
6527 + return err;
6528 +
6529 + /*
6530 + * STEP 2: Hash and padding are combined with the payload
6531 + * depending on the form it arrives. Scatter tables must have at least
6532 + * one page of data before chaining with another table and can't have
6533 + * an empty data page. The following code addresses these requirements.
6534 + *
6535 + * If the payload is empty, only the hash is encrypted, otherwise the
6536 + * payload scatterlist is merged with the hash. A special merging case
6537 + * is when the payload has only one page of data. In that case the
6538 + * payload page is moved to another scatterlist and prepared there for
6539 + * encryption.
6540 + */
6541 + if (req->cryptlen) {
6542 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6543 +
6544 + sg_init_table(cipher, 2);
6545 + sg_set_buf(cipher + 1, hash, phashlen);
6546 +
6547 + if (sg_is_last(src)) {
6548 + sg_set_page(cipher, sg_page(src), req->cryptlen,
6549 + src->offset);
6550 + src = cipher;
6551 + } else {
6552 + unsigned int rem_len = req->cryptlen;
6553 +
6554 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6555 + rem_len -= min(rem_len, sg->length);
6556 +
6557 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6558 + sg_chain(sg, 1, cipher);
6559 + }
6560 + } else {
6561 + sg_init_one(cipher, hash, phashlen);
6562 + src = cipher;
6563 + }
6564 +
6565 + /**
6566 + * If src != dst copy the associated data from source to destination.
6567 + * In both cases fast-forward passed the associated data in the dest.
6568 + */
6569 + if (req->src != req->dst) {
6570 + err = crypto_tls_copy_data(req, req->src, req->dst,
6571 + req->assoclen);
6572 + if (err)
6573 + return err;
6574 + }
6575 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6576 +
6577 + /*
6578 + * STEP 3: encrypt the frame and return the result
6579 + */
6580 + cryptlen = req->cryptlen + phashlen;
6581 +
6582 + /*
6583 + * The hash and the cipher are applied at different times and their
6584 + * requests can use the same memory space without interference
6585 + */
6586 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6587 + skcipher_request_set_tfm(skreq, ctx->enc);
6588 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6589 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6590 + req->base.complete, req->base.data);
6591 + /*
6592 + * Apply the cipher transform. The result will be in req->dst when the
6593 + * asynchronuous call terminates
6594 + */
6595 + err = crypto_skcipher_encrypt(skreq);
6596 +
6597 + return err;
6598 +}
6599 +
6600 +static int crypto_tls_decrypt(struct aead_request *req)
6601 +{
6602 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6603 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6604 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6605 + unsigned int cryptlen = req->cryptlen;
6606 + unsigned int hash_size = crypto_aead_authsize(tls);
6607 + unsigned int block_size = crypto_aead_blocksize(tls);
6608 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6609 + struct scatterlist *tmp = treq_ctx->tmp;
6610 + struct scatterlist *src, *dst;
6611 +
6612 + u8 padding[255]; /* padding can be 0-255 bytes */
6613 + u8 pad_size;
6614 + u16 *len_field;
6615 + u8 *ihash, *hash = treq_ctx->tail;
6616 +
6617 + int paderr = 0;
6618 + int err = -EINVAL;
6619 + int i;
6620 + struct async_op ciph_op;
6621 +
6622 + /*
6623 + * Rule out bad packets. The input packet length must be at least one
6624 + * byte more than the hash_size
6625 + */
6626 + if (cryptlen <= hash_size || cryptlen % block_size)
6627 + goto out;
6628 +
6629 + /*
6630 + * Step 1 - Decrypt the source. Fast-forward past the associated data
6631 + * to the encrypted data. The result will be overwritten in place so
6632 + * that the decrypted data will be adjacent to the associated data. The
6633 + * last step (computing the hash) will have it's input data already
6634 + * prepared and ready to be accessed at req->src.
6635 + */
6636 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6637 + dst = src;
6638 +
6639 + init_completion(&ciph_op.completion);
6640 + skcipher_request_set_tfm(skreq, ctx->enc);
6641 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6642 + tls_async_op_done, &ciph_op);
6643 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6644 + err = crypto_skcipher_decrypt(skreq);
6645 + if (err == -EINPROGRESS) {
6646 + err = wait_for_completion_interruptible(&ciph_op.completion);
6647 + if (!err)
6648 + err = ciph_op.err;
6649 + }
6650 + if (err)
6651 + goto out;
6652 +
6653 + /*
6654 + * Step 2 - Verify padding
6655 + * Retrieve the last byte of the payload; this is the padding size.
6656 + */
6657 + cryptlen -= 1;
6658 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6659 +
6660 + /* RFC recommendation for invalid padding size. */
6661 + if (cryptlen < pad_size + hash_size) {
6662 + pad_size = 0;
6663 + paderr = -EBADMSG;
6664 + }
6665 + cryptlen -= pad_size;
6666 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6667 +
6668 + /* Padding content must be equal with pad_size. We verify it all */
6669 + for (i = 0; i < pad_size; i++)
6670 + if (padding[i] != pad_size)
6671 + paderr = -EBADMSG;
6672 +
6673 + /*
6674 + * Step 3 - Verify hash
6675 + * Align the digest result as required by the hash transform. Enough
6676 + * space was allocated in crypto_tls_init_tfm
6677 + */
6678 + hash = (u8 *)ALIGN((unsigned long)hash +
6679 + crypto_ahash_alignmask(ctx->auth),
6680 + crypto_ahash_alignmask(ctx->auth) + 1);
6681 + /*
6682 + * Two bytes at the end of the associated data make the length field.
6683 + * It must be updated with the length of the cleartext message before
6684 + * the hash is calculated.
6685 + */
6686 + len_field = sg_virt(req->src) + req->assoclen - 2;
6687 + cryptlen -= hash_size;
6688 + *len_field = htons(cryptlen);
6689 +
6690 + /* This is the hash from the decrypted packet. Save it for later */
6691 + ihash = hash + hash_size;
6692 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6693 +
6694 + /* Now compute and compare our ICV with the one from the packet */
6695 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6696 + if (!err)
6697 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6698 +
6699 + if (req->src != req->dst) {
6700 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6701 + req->assoclen);
6702 + if (err)
6703 + goto out;
6704 + }
6705 +
6706 + /* return the first found error */
6707 + if (paderr)
6708 + err = paderr;
6709 +
6710 +out:
6711 + aead_request_complete(req, err);
6712 + return err;
6713 +}
6714 +
6715 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6716 +{
6717 + struct aead_instance *inst = aead_alg_instance(tfm);
6718 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6719 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6720 + struct crypto_ahash *auth;
6721 + struct crypto_skcipher *enc;
6722 + struct crypto_skcipher *null;
6723 + int err;
6724 +
6725 + auth = crypto_spawn_ahash(&ictx->auth);
6726 + if (IS_ERR(auth))
6727 + return PTR_ERR(auth);
6728 +
6729 + enc = crypto_spawn_skcipher(&ictx->enc);
6730 + err = PTR_ERR(enc);
6731 + if (IS_ERR(enc))
6732 + goto err_free_ahash;
6733 +
6734 + null = crypto_get_default_null_skcipher2();
6735 + err = PTR_ERR(null);
6736 + if (IS_ERR(null))
6737 + goto err_free_skcipher;
6738 +
6739 + ctx->auth = auth;
6740 + ctx->enc = enc;
6741 + ctx->null = null;
6742 +
6743 + /*
6744 + * Allow enough space for two digests. The two digests will be compared
6745 + * during the decryption phase. One will come from the decrypted packet
6746 + * and the other will be calculated. For encryption, one digest is
6747 + * padded (up to a cipher blocksize) and chained with the payload
6748 + */
6749 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6750 + crypto_ahash_alignmask(auth),
6751 + crypto_ahash_alignmask(auth) + 1) +
6752 + max(crypto_ahash_digestsize(auth),
6753 + crypto_skcipher_blocksize(enc));
6754 +
6755 + crypto_aead_set_reqsize(tfm,
6756 + sizeof(struct tls_request_ctx) +
6757 + ctx->reqoff +
6758 + max_t(unsigned int,
6759 + crypto_ahash_reqsize(auth) +
6760 + sizeof(struct ahash_request),
6761 + crypto_skcipher_reqsize(enc) +
6762 + sizeof(struct skcipher_request)));
6763 +
6764 + return 0;
6765 +
6766 +err_free_skcipher:
6767 + crypto_free_skcipher(enc);
6768 +err_free_ahash:
6769 + crypto_free_ahash(auth);
6770 + return err;
6771 +}
6772 +
6773 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6774 +{
6775 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6776 +
6777 + crypto_free_ahash(ctx->auth);
6778 + crypto_free_skcipher(ctx->enc);
6779 + crypto_put_default_null_skcipher2();
6780 +}
6781 +
6782 +static void crypto_tls_free(struct aead_instance *inst)
6783 +{
6784 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6785 +
6786 + crypto_drop_skcipher(&ctx->enc);
6787 + crypto_drop_ahash(&ctx->auth);
6788 + kfree(inst);
6789 +}
6790 +
6791 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6792 +{
6793 + struct crypto_attr_type *algt;
6794 + struct aead_instance *inst;
6795 + struct hash_alg_common *auth;
6796 + struct crypto_alg *auth_base;
6797 + struct skcipher_alg *enc;
6798 + struct tls_instance_ctx *ctx;
6799 + const char *enc_name;
6800 + int err;
6801 +
6802 + algt = crypto_get_attr_type(tb);
6803 + if (IS_ERR(algt))
6804 + return PTR_ERR(algt);
6805 +
6806 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6807 + return -EINVAL;
6808 +
6809 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6810 + CRYPTO_ALG_TYPE_AHASH_MASK |
6811 + crypto_requires_sync(algt->type, algt->mask));
6812 + if (IS_ERR(auth))
6813 + return PTR_ERR(auth);
6814 +
6815 + auth_base = &auth->base;
6816 +
6817 + enc_name = crypto_attr_alg_name(tb[2]);
6818 + err = PTR_ERR(enc_name);
6819 + if (IS_ERR(enc_name))
6820 + goto out_put_auth;
6821 +
6822 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6823 + err = -ENOMEM;
6824 + if (!inst)
6825 + goto out_put_auth;
6826 +
6827 + ctx = aead_instance_ctx(inst);
6828 +
6829 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
6830 + aead_crypto_instance(inst));
6831 + if (err)
6832 + goto err_free_inst;
6833 +
6834 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6835 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6836 + crypto_requires_sync(algt->type,
6837 + algt->mask));
6838 + if (err)
6839 + goto err_drop_auth;
6840 +
6841 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
6842 +
6843 + err = -ENAMETOOLONG;
6844 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6845 + "tls10(%s,%s)", auth_base->cra_name,
6846 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6847 + goto err_drop_enc;
6848 +
6849 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6850 + "tls10(%s,%s)", auth_base->cra_driver_name,
6851 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6852 + goto err_drop_enc;
6853 +
6854 + inst->alg.base.cra_flags = (auth_base->cra_flags |
6855 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6856 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6857 + auth_base->cra_priority;
6858 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6859 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6860 + enc->base.cra_alignmask;
6861 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6862 +
6863 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6864 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6865 + inst->alg.maxauthsize = auth->digestsize;
6866 +
6867 + inst->alg.init = crypto_tls_init_tfm;
6868 + inst->alg.exit = crypto_tls_exit_tfm;
6869 +
6870 + inst->alg.setkey = crypto_tls_setkey;
6871 + inst->alg.encrypt = crypto_tls_encrypt;
6872 + inst->alg.decrypt = crypto_tls_decrypt;
6873 +
6874 + inst->free = crypto_tls_free;
6875 +
6876 + err = aead_register_instance(tmpl, inst);
6877 + if (err)
6878 + goto err_drop_enc;
6879 +
6880 +out:
6881 + crypto_mod_put(auth_base);
6882 + return err;
6883 +
6884 +err_drop_enc:
6885 + crypto_drop_skcipher(&ctx->enc);
6886 +err_drop_auth:
6887 + crypto_drop_ahash(&ctx->auth);
6888 +err_free_inst:
6889 + kfree(inst);
6890 +out_put_auth:
6891 + goto out;
6892 +}
6893 +
6894 +static struct crypto_template crypto_tls_tmpl = {
6895 + .name = "tls10",
6896 + .create = crypto_tls_create,
6897 + .module = THIS_MODULE,
6898 +};
6899 +
6900 +static int __init crypto_tls_module_init(void)
6901 +{
6902 + return crypto_register_template(&crypto_tls_tmpl);
6903 +}
6904 +
6905 +static void __exit crypto_tls_module_exit(void)
6906 +{
6907 + crypto_unregister_template(&crypto_tls_tmpl);
6908 +}
6909 +
6910 +module_init(crypto_tls_module_init);
6911 +module_exit(crypto_tls_module_exit);
6912 +
6913 +MODULE_LICENSE("GPL");
6914 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6915 --- a/drivers/crypto/caam/Kconfig
6916 +++ b/drivers/crypto/caam/Kconfig
6917 @@ -1,6 +1,11 @@
6918 +config CRYPTO_DEV_FSL_CAAM_COMMON
6919 + tristate
6920 +
6921 config CRYPTO_DEV_FSL_CAAM
6922 - tristate "Freescale CAAM-Multicore driver backend"
6923 + tristate "Freescale CAAM-Multicore platform driver backend"
6924 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6925 + select CRYPTO_DEV_FSL_CAAM_COMMON
6926 + select SOC_BUS
6927 help
6928 Enables the driver module for Freescale's Cryptographic Accelerator
6929 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6930 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6931 To compile this driver as a module, choose M here: the module
6932 will be called caam.
6933
6934 +if CRYPTO_DEV_FSL_CAAM
6935 +
6936 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6937 + bool "Enable debug output in CAAM driver"
6938 + help
6939 + Selecting this will enable printing of various debug
6940 + information in the CAAM driver.
6941 +
6942 config CRYPTO_DEV_FSL_CAAM_JR
6943 tristate "Freescale CAAM Job Ring driver backend"
6944 - depends on CRYPTO_DEV_FSL_CAAM
6945 default y
6946 help
6947 Enables the driver module for Job Rings which are part of
6948 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6949 To compile this driver as a module, choose M here: the module
6950 will be called caam_jr.
6951
6952 +if CRYPTO_DEV_FSL_CAAM_JR
6953 +
6954 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6955 int "Job Ring size"
6956 - depends on CRYPTO_DEV_FSL_CAAM_JR
6957 range 2 9
6958 default "9"
6959 help
6960 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6961
6962 config CRYPTO_DEV_FSL_CAAM_INTC
6963 bool "Job Ring interrupt coalescing"
6964 - depends on CRYPTO_DEV_FSL_CAAM_JR
6965 help
6966 Enable the Job Ring's interrupt coalescing feature.
6967
6968 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
6969
6970 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6971 tristate "Register algorithm implementations with the Crypto API"
6972 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6973 default y
6974 select CRYPTO_AEAD
6975 select CRYPTO_AUTHENC
6976 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6977 To compile this as a module, choose M here: the module
6978 will be called caamalg.
6979
6980 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6981 + tristate "Queue Interface as Crypto API backend"
6982 + depends on FSL_SDK_DPA && NET
6983 + default y
6984 + select CRYPTO_AUTHENC
6985 + select CRYPTO_BLKCIPHER
6986 + help
6987 + Selecting this will use CAAM Queue Interface (QI) for sending
6988 + & receiving crypto jobs to/from CAAM. This gives better performance
6989 + than job ring interface when the number of cores are more than the
6990 + number of job rings assigned to the kernel. The number of portals
6991 + assigned to the kernel should also be more than the number of
6992 + job rings.
6993 +
6994 + To compile this as a module, choose M here: the module
6995 + will be called caamalg_qi.
6996 +
6997 config CRYPTO_DEV_FSL_CAAM_AHASH_API
6998 tristate "Register hash algorithm implementations with Crypto API"
6999 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7000 default y
7001 select CRYPTO_HASH
7002 help
7003 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
7004
7005 config CRYPTO_DEV_FSL_CAAM_PKC_API
7006 tristate "Register public key cryptography implementations with Crypto API"
7007 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7008 default y
7009 select CRYPTO_RSA
7010 help
7011 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
7012
7013 config CRYPTO_DEV_FSL_CAAM_RNG_API
7014 tristate "Register caam device for hwrng API"
7015 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7016 default y
7017 select CRYPTO_RNG
7018 select HW_RANDOM
7019 @@ -124,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
7020 To compile this as a module, choose M here: the module
7021 will be called caamrng.
7022
7023 -config CRYPTO_DEV_FSL_CAAM_IMX
7024 - def_bool SOC_IMX6 || SOC_IMX7D
7025 - depends on CRYPTO_DEV_FSL_CAAM
7026 +endif # CRYPTO_DEV_FSL_CAAM_JR
7027
7028 -config CRYPTO_DEV_FSL_CAAM_DEBUG
7029 - bool "Enable debug output in CAAM driver"
7030 - depends on CRYPTO_DEV_FSL_CAAM
7031 - help
7032 - Selecting this will enable printing of various debug
7033 - information in the CAAM driver.
7034 +endif # CRYPTO_DEV_FSL_CAAM
7035 +
7036 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7037 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7038 + depends on FSL_MC_DPIO
7039 + select CRYPTO_DEV_FSL_CAAM_COMMON
7040 + select CRYPTO_BLKCIPHER
7041 + select CRYPTO_AUTHENC
7042 + select CRYPTO_AEAD
7043 + select CRYPTO_HASH
7044 + ---help---
7045 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7046 + It handles DPSECI DPAA2 objects that sit on the Management Complex
7047 + (MC) fsl-mc bus.
7048 +
7049 + To compile this as a module, choose M here: the module
7050 + will be called dpaa2_caam.
7051 +
7052 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7053 + def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7054 + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7055 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7056 +
7057 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
7058 + def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
7059 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7060 --- a/drivers/crypto/caam/Makefile
7061 +++ b/drivers/crypto/caam/Makefile
7062 @@ -5,13 +5,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
7063 ccflags-y := -DDEBUG
7064 endif
7065
7066 +ccflags-y += -DVERSION=\"\"
7067 +
7068 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7069 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7070 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7071 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7072 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7073 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7074 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7075 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
7076 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7077 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7078
7079 caam-objs := ctrl.o
7080 -caam_jr-objs := jr.o key_gen.o error.o
7081 +caam_jr-objs := jr.o key_gen.o
7082 caam_pkc-y := caampkc.o pkc_desc.o
7083 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7084 + ccflags-y += -DCONFIG_CAAM_QI
7085 + caam-objs += qi.o
7086 +endif
7087 +
7088 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7089 +
7090 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
7091 --- a/drivers/crypto/caam/caamalg.c
7092 +++ b/drivers/crypto/caam/caamalg.c
7093 @@ -2,6 +2,7 @@
7094 * caam - Freescale FSL CAAM support for crypto API
7095 *
7096 * Copyright 2008-2011 Freescale Semiconductor, Inc.
7097 + * Copyright 2016 NXP
7098 *
7099 * Based on talitos crypto API driver.
7100 *
7101 @@ -53,6 +54,7 @@
7102 #include "error.h"
7103 #include "sg_sw_sec4.h"
7104 #include "key_gen.h"
7105 +#include "caamalg_desc.h"
7106
7107 /*
7108 * crypto alg
7109 @@ -62,8 +64,6 @@
7110 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
7111 CTR_RFC3686_NONCE_SIZE + \
7112 SHA512_DIGEST_SIZE * 2)
7113 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7114 -#define CAAM_MAX_IV_LENGTH 16
7115
7116 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7117 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7118 @@ -71,37 +71,6 @@
7119 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7120 CAAM_CMD_SZ * 5)
7121
7122 -/* length of descriptors text */
7123 -#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
7124 -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7125 -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7126 -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
7127 -
7128 -/* Note: Nonce is counted in enckeylen */
7129 -#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
7130 -
7131 -#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
7132 -#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7133 -#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7134 -
7135 -#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
7136 -#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7137 -#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7138 -
7139 -#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
7140 -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7141 -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7142 -
7143 -#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
7144 -#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7145 -#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7146 -
7147 -#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
7148 -#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
7149 - 20 * CAAM_CMD_SZ)
7150 -#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
7151 - 15 * CAAM_CMD_SZ)
7152 -
7153 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7154 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7155
7156 @@ -112,47 +81,11 @@
7157 #define debug(format, arg...)
7158 #endif
7159
7160 -#ifdef DEBUG
7161 -#include <linux/highmem.h>
7162 -
7163 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7164 - int prefix_type, int rowsize, int groupsize,
7165 - struct scatterlist *sg, size_t tlen, bool ascii,
7166 - bool may_sleep)
7167 -{
7168 - struct scatterlist *it;
7169 - void *it_page;
7170 - size_t len;
7171 - void *buf;
7172 -
7173 - for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7174 - /*
7175 - * make sure the scatterlist's page
7176 - * has a valid virtual memory mapping
7177 - */
7178 - it_page = kmap_atomic(sg_page(it));
7179 - if (unlikely(!it_page)) {
7180 - printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7181 - return;
7182 - }
7183 -
7184 - buf = it_page + it->offset;
7185 - len = min_t(size_t, tlen, it->length);
7186 - print_hex_dump(level, prefix_str, prefix_type, rowsize,
7187 - groupsize, buf, len, ascii);
7188 - tlen -= len;
7189 -
7190 - kunmap_atomic(it_page);
7191 - }
7192 -}
7193 -#endif
7194 -
7195 static struct list_head alg_list;
7196
7197 struct caam_alg_entry {
7198 int class1_alg_type;
7199 int class2_alg_type;
7200 - int alg_op;
7201 bool rfc3686;
7202 bool geniv;
7203 };
7204 @@ -163,302 +96,71 @@ struct caam_aead_alg {
7205 bool registered;
7206 };
7207
7208 -/* Set DK bit in class 1 operation if shared */
7209 -static inline void append_dec_op1(u32 *desc, u32 type)
7210 -{
7211 - u32 *jump_cmd, *uncond_jump_cmd;
7212 -
7213 - /* DK bit is valid only for AES */
7214 - if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7215 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7216 - OP_ALG_DECRYPT);
7217 - return;
7218 - }
7219 -
7220 - jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7221 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7222 - OP_ALG_DECRYPT);
7223 - uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7224 - set_jump_tgt_here(desc, jump_cmd);
7225 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7226 - OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7227 - set_jump_tgt_here(desc, uncond_jump_cmd);
7228 -}
7229 -
7230 -/*
7231 - * For aead functions, read payload and write payload,
7232 - * both of which are specified in req->src and req->dst
7233 - */
7234 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7235 -{
7236 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7237 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7238 - KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7239 -}
7240 -
7241 -/*
7242 - * For ablkcipher encrypt and decrypt, read from req->src and
7243 - * write to req->dst
7244 - */
7245 -static inline void ablkcipher_append_src_dst(u32 *desc)
7246 -{
7247 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7248 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7249 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7250 - KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7251 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7252 -}
7253 -
7254 /*
7255 * per-session context
7256 */
7257 struct caam_ctx {
7258 - struct device *jrdev;
7259 u32 sh_desc_enc[DESC_MAX_USED_LEN];
7260 u32 sh_desc_dec[DESC_MAX_USED_LEN];
7261 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7262 + u8 key[CAAM_MAX_KEY_SIZE];
7263 dma_addr_t sh_desc_enc_dma;
7264 dma_addr_t sh_desc_dec_dma;
7265 dma_addr_t sh_desc_givenc_dma;
7266 - u32 class1_alg_type;
7267 - u32 class2_alg_type;
7268 - u32 alg_op;
7269 - u8 key[CAAM_MAX_KEY_SIZE];
7270 dma_addr_t key_dma;
7271 - unsigned int enckeylen;
7272 - unsigned int split_key_len;
7273 - unsigned int split_key_pad_len;
7274 + enum dma_data_direction dir;
7275 + struct device *jrdev;
7276 + struct alginfo adata;
7277 + struct alginfo cdata;
7278 unsigned int authsize;
7279 };
7280
7281 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7282 - int keys_fit_inline, bool is_rfc3686)
7283 -{
7284 - u32 *nonce;
7285 - unsigned int enckeylen = ctx->enckeylen;
7286 -
7287 - /*
7288 - * RFC3686 specific:
7289 - * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7290 - * | enckeylen = encryption key size + nonce size
7291 - */
7292 - if (is_rfc3686)
7293 - enckeylen -= CTR_RFC3686_NONCE_SIZE;
7294 -
7295 - if (keys_fit_inline) {
7296 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7297 - ctx->split_key_len, CLASS_2 |
7298 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7299 - append_key_as_imm(desc, (void *)ctx->key +
7300 - ctx->split_key_pad_len, enckeylen,
7301 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7302 - } else {
7303 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7304 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7305 - append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7306 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7307 - }
7308 -
7309 - /* Load Counter into CONTEXT1 reg */
7310 - if (is_rfc3686) {
7311 - nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7312 - enckeylen);
7313 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7314 - LDST_CLASS_IND_CCB |
7315 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7316 - append_move(desc,
7317 - MOVE_SRC_OUTFIFO |
7318 - MOVE_DEST_CLASS1CTX |
7319 - (16 << MOVE_OFFSET_SHIFT) |
7320 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7321 - }
7322 -}
7323 -
7324 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7325 - int keys_fit_inline, bool is_rfc3686)
7326 -{
7327 - u32 *key_jump_cmd;
7328 -
7329 - /* Note: Context registers are saved. */
7330 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7331 -
7332 - /* Skip if already shared */
7333 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7334 - JUMP_COND_SHRD);
7335 -
7336 - append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7337 -
7338 - set_jump_tgt_here(desc, key_jump_cmd);
7339 -}
7340 -
7341 static int aead_null_set_sh_desc(struct crypto_aead *aead)
7342 {
7343 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7344 struct device *jrdev = ctx->jrdev;
7345 - bool keys_fit_inline = false;
7346 - u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7347 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
7348 u32 *desc;
7349 + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7350 + ctx->adata.keylen_pad;
7351
7352 /*
7353 * Job Descriptor and Shared Descriptors
7354 * must all fit into the 64-word Descriptor h/w Buffer
7355 */
7356 - if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7357 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7358 - keys_fit_inline = true;
7359 + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7360 + ctx->adata.key_inline = true;
7361 + ctx->adata.key_virt = ctx->key;
7362 + } else {
7363 + ctx->adata.key_inline = false;
7364 + ctx->adata.key_dma = ctx->key_dma;
7365 + }
7366
7367 /* aead_encrypt shared descriptor */
7368 desc = ctx->sh_desc_enc;
7369 -
7370 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7371 -
7372 - /* Skip if already shared */
7373 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7374 - JUMP_COND_SHRD);
7375 - if (keys_fit_inline)
7376 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7377 - ctx->split_key_len, CLASS_2 |
7378 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7379 - else
7380 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7381 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7382 - set_jump_tgt_here(desc, key_jump_cmd);
7383 -
7384 - /* assoclen + cryptlen = seqinlen */
7385 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7386 -
7387 - /* Prepare to read and write cryptlen + assoclen bytes */
7388 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7389 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7390 -
7391 - /*
7392 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7393 - * thus need to do some magic, i.e. self-patch the descriptor
7394 - * buffer.
7395 - */
7396 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7397 - MOVE_DEST_MATH3 |
7398 - (0x6 << MOVE_LEN_SHIFT));
7399 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7400 - MOVE_DEST_DESCBUF |
7401 - MOVE_WAITCOMP |
7402 - (0x8 << MOVE_LEN_SHIFT));
7403 -
7404 - /* Class 2 operation */
7405 - append_operation(desc, ctx->class2_alg_type |
7406 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7407 -
7408 - /* Read and write cryptlen bytes */
7409 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7410 -
7411 - set_move_tgt_here(desc, read_move_cmd);
7412 - set_move_tgt_here(desc, write_move_cmd);
7413 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7414 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7415 - MOVE_AUX_LS);
7416 -
7417 - /* Write ICV */
7418 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7419 - LDST_SRCDST_BYTE_CONTEXT);
7420 -
7421 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7422 - desc_bytes(desc),
7423 - DMA_TO_DEVICE);
7424 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7425 - dev_err(jrdev, "unable to map shared descriptor\n");
7426 - return -ENOMEM;
7427 - }
7428 -#ifdef DEBUG
7429 - print_hex_dump(KERN_ERR,
7430 - "aead null enc shdesc@"__stringify(__LINE__)": ",
7431 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7432 - desc_bytes(desc), 1);
7433 -#endif
7434 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
7435 + ctrlpriv->era);
7436 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7437 + desc_bytes(desc), ctx->dir);
7438
7439 /*
7440 * Job Descriptor and Shared Descriptors
7441 * must all fit into the 64-word Descriptor h/w Buffer
7442 */
7443 - keys_fit_inline = false;
7444 - if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7445 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7446 - keys_fit_inline = true;
7447 -
7448 - desc = ctx->sh_desc_dec;
7449 + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7450 + ctx->adata.key_inline = true;
7451 + ctx->adata.key_virt = ctx->key;
7452 + } else {
7453 + ctx->adata.key_inline = false;
7454 + ctx->adata.key_dma = ctx->key_dma;
7455 + }
7456
7457 /* aead_decrypt shared descriptor */
7458 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7459 -
7460 - /* Skip if already shared */
7461 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7462 - JUMP_COND_SHRD);
7463 - if (keys_fit_inline)
7464 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7465 - ctx->split_key_len, CLASS_2 |
7466 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7467 - else
7468 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7469 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7470 - set_jump_tgt_here(desc, key_jump_cmd);
7471 -
7472 - /* Class 2 operation */
7473 - append_operation(desc, ctx->class2_alg_type |
7474 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7475 -
7476 - /* assoclen + cryptlen = seqoutlen */
7477 - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7478 -
7479 - /* Prepare to read and write cryptlen + assoclen bytes */
7480 - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7481 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7482 -
7483 - /*
7484 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7485 - * thus need to do some magic, i.e. self-patch the descriptor
7486 - * buffer.
7487 - */
7488 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7489 - MOVE_DEST_MATH2 |
7490 - (0x6 << MOVE_LEN_SHIFT));
7491 - write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7492 - MOVE_DEST_DESCBUF |
7493 - MOVE_WAITCOMP |
7494 - (0x8 << MOVE_LEN_SHIFT));
7495 -
7496 - /* Read and write cryptlen bytes */
7497 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7498 -
7499 - /*
7500 - * Insert a NOP here, since we need at least 4 instructions between
7501 - * code patching the descriptor buffer and the location being patched.
7502 - */
7503 - jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7504 - set_jump_tgt_here(desc, jump_cmd);
7505 -
7506 - set_move_tgt_here(desc, read_move_cmd);
7507 - set_move_tgt_here(desc, write_move_cmd);
7508 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7509 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7510 - MOVE_AUX_LS);
7511 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7512 -
7513 - /* Load ICV */
7514 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7515 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7516 -
7517 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7518 - desc_bytes(desc),
7519 - DMA_TO_DEVICE);
7520 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7521 - dev_err(jrdev, "unable to map shared descriptor\n");
7522 - return -ENOMEM;
7523 - }
7524 -#ifdef DEBUG
7525 - print_hex_dump(KERN_ERR,
7526 - "aead null dec shdesc@"__stringify(__LINE__)": ",
7527 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7528 - desc_bytes(desc), 1);
7529 -#endif
7530 + desc = ctx->sh_desc_dec;
7531 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
7532 + ctrlpriv->era);
7533 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7534 + desc_bytes(desc), ctx->dir);
7535
7536 return 0;
7537 }
7538 @@ -470,11 +172,12 @@ static int aead_set_sh_desc(struct crypt
7539 unsigned int ivsize = crypto_aead_ivsize(aead);
7540 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7541 struct device *jrdev = ctx->jrdev;
7542 - bool keys_fit_inline;
7543 - u32 geniv, moveiv;
7544 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
7545 u32 ctx1_iv_off = 0;
7546 - u32 *desc;
7547 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7548 + u32 *desc, *nonce = NULL;
7549 + u32 inl_mask;
7550 + unsigned int data_len[2];
7551 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7552 OP_ALG_AAI_CTR_MOD128);
7553 const bool is_rfc3686 = alg->caam.rfc3686;
7554
7555 @@ -482,7 +185,7 @@ static int aead_set_sh_desc(struct crypt
7556 return 0;
7557
7558 /* NULL encryption / decryption */
7559 - if (!ctx->enckeylen)
7560 + if (!ctx->cdata.keylen)
7561 return aead_null_set_sh_desc(aead);
7562
7563 /*
7564 @@ -497,8 +200,14 @@ static int aead_set_sh_desc(struct crypt
7565 * RFC3686 specific:
7566 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7567 */
7568 - if (is_rfc3686)
7569 + if (is_rfc3686) {
7570 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7571 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7572 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7573 + }
7574 +
7575 + data_len[0] = ctx->adata.keylen_pad;
7576 + data_len[1] = ctx->cdata.keylen;
7577
7578 if (alg->caam.geniv)
7579 goto skip_enc;
7580 @@ -507,146 +216,64 @@ static int aead_set_sh_desc(struct crypt
7581 * Job Descriptor and Shared Descriptors
7582 * must all fit into the 64-word Descriptor h/w Buffer
7583 */
7584 - keys_fit_inline = false;
7585 - if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7586 - ctx->split_key_pad_len + ctx->enckeylen +
7587 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7588 - CAAM_DESC_BYTES_MAX)
7589 - keys_fit_inline = true;
7590 -
7591 - /* aead_encrypt shared descriptor */
7592 - desc = ctx->sh_desc_enc;
7593 -
7594 - /* Note: Context registers are saved. */
7595 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7596 -
7597 - /* Class 2 operation */
7598 - append_operation(desc, ctx->class2_alg_type |
7599 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7600 -
7601 - /* Read and write assoclen bytes */
7602 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7603 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7604 + if (desc_inline_query(DESC_AEAD_ENC_LEN +
7605 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7606 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7607 + ARRAY_SIZE(data_len)) < 0)
7608 + return -EINVAL;
7609
7610 - /* Skip assoc data */
7611 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7612 + if (inl_mask & 1)
7613 + ctx->adata.key_virt = ctx->key;
7614 + else
7615 + ctx->adata.key_dma = ctx->key_dma;
7616
7617 - /* read assoc before reading payload */
7618 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7619 - FIFOLDST_VLF);
7620 + if (inl_mask & 2)
7621 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7622 + else
7623 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7624
7625 - /* Load Counter into CONTEXT1 reg */
7626 - if (is_rfc3686)
7627 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7628 - LDST_SRCDST_BYTE_CONTEXT |
7629 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7630 - LDST_OFFSET_SHIFT));
7631 -
7632 - /* Class 1 operation */
7633 - append_operation(desc, ctx->class1_alg_type |
7634 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7635 -
7636 - /* Read and write cryptlen bytes */
7637 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7638 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7639 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7640 -
7641 - /* Write ICV */
7642 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7643 - LDST_SRCDST_BYTE_CONTEXT);
7644 + ctx->adata.key_inline = !!(inl_mask & 1);
7645 + ctx->cdata.key_inline = !!(inl_mask & 2);
7646
7647 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7648 - desc_bytes(desc),
7649 - DMA_TO_DEVICE);
7650 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7651 - dev_err(jrdev, "unable to map shared descriptor\n");
7652 - return -ENOMEM;
7653 - }
7654 -#ifdef DEBUG
7655 - print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7656 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7657 - desc_bytes(desc), 1);
7658 -#endif
7659 + /* aead_encrypt shared descriptor */
7660 + desc = ctx->sh_desc_enc;
7661 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7662 + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7663 + false, ctrlpriv->era);
7664 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7665 + desc_bytes(desc), ctx->dir);
7666
7667 skip_enc:
7668 /*
7669 * Job Descriptor and Shared Descriptors
7670 * must all fit into the 64-word Descriptor h/w Buffer
7671 */
7672 - keys_fit_inline = false;
7673 - if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7674 - ctx->split_key_pad_len + ctx->enckeylen +
7675 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7676 - CAAM_DESC_BYTES_MAX)
7677 - keys_fit_inline = true;
7678 -
7679 - /* aead_decrypt shared descriptor */
7680 - desc = ctx->sh_desc_dec;
7681 -
7682 - /* Note: Context registers are saved. */
7683 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7684 -
7685 - /* Class 2 operation */
7686 - append_operation(desc, ctx->class2_alg_type |
7687 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7688 + if (desc_inline_query(DESC_AEAD_DEC_LEN +
7689 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7690 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7691 + ARRAY_SIZE(data_len)) < 0)
7692 + return -EINVAL;
7693
7694 - /* Read and write assoclen bytes */
7695 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7696 - if (alg->caam.geniv)
7697 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7698 + if (inl_mask & 1)
7699 + ctx->adata.key_virt = ctx->key;
7700 else
7701 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7702 -
7703 - /* Skip assoc data */
7704 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7705 -
7706 - /* read assoc before reading payload */
7707 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7708 - KEY_VLF);
7709 -
7710 - if (alg->caam.geniv) {
7711 - append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7712 - LDST_SRCDST_BYTE_CONTEXT |
7713 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
7714 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7715 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7716 - }
7717 -
7718 - /* Load Counter into CONTEXT1 reg */
7719 - if (is_rfc3686)
7720 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7721 - LDST_SRCDST_BYTE_CONTEXT |
7722 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7723 - LDST_OFFSET_SHIFT));
7724 + ctx->adata.key_dma = ctx->key_dma;
7725
7726 - /* Choose operation */
7727 - if (ctr_mode)
7728 - append_operation(desc, ctx->class1_alg_type |
7729 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7730 + if (inl_mask & 2)
7731 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7732 else
7733 - append_dec_op1(desc, ctx->class1_alg_type);
7734 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7735
7736 - /* Read and write cryptlen bytes */
7737 - append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7738 - append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7739 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7740 -
7741 - /* Load ICV */
7742 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7743 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7744 + ctx->adata.key_inline = !!(inl_mask & 1);
7745 + ctx->cdata.key_inline = !!(inl_mask & 2);
7746
7747 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7748 - desc_bytes(desc),
7749 - DMA_TO_DEVICE);
7750 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7751 - dev_err(jrdev, "unable to map shared descriptor\n");
7752 - return -ENOMEM;
7753 - }
7754 -#ifdef DEBUG
7755 - print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7756 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7757 - desc_bytes(desc), 1);
7758 -#endif
7759 + /* aead_decrypt shared descriptor */
7760 + desc = ctx->sh_desc_dec;
7761 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7762 + ctx->authsize, alg->caam.geniv, is_rfc3686,
7763 + nonce, ctx1_iv_off, false, ctrlpriv->era);
7764 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7765 + desc_bytes(desc), ctx->dir);
7766
7767 if (!alg->caam.geniv)
7768 goto skip_givenc;
7769 @@ -655,107 +282,32 @@ skip_enc:
7770 * Job Descriptor and Shared Descriptors
7771 * must all fit into the 64-word Descriptor h/w Buffer
7772 */
7773 - keys_fit_inline = false;
7774 - if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7775 - ctx->split_key_pad_len + ctx->enckeylen +
7776 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7777 - CAAM_DESC_BYTES_MAX)
7778 - keys_fit_inline = true;
7779 -
7780 - /* aead_givencrypt shared descriptor */
7781 - desc = ctx->sh_desc_enc;
7782 -
7783 - /* Note: Context registers are saved. */
7784 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7785 + if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7786 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7787 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7788 + ARRAY_SIZE(data_len)) < 0)
7789 + return -EINVAL;
7790
7791 - if (is_rfc3686)
7792 - goto copy_iv;
7793 + if (inl_mask & 1)
7794 + ctx->adata.key_virt = ctx->key;
7795 + else
7796 + ctx->adata.key_dma = ctx->key_dma;
7797
7798 - /* Generate IV */
7799 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7800 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7801 - NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7802 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7803 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7804 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7805 - append_move(desc, MOVE_WAITCOMP |
7806 - MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7807 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7808 - (ivsize << MOVE_LEN_SHIFT));
7809 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7810 -
7811 -copy_iv:
7812 - /* Copy IV to class 1 context */
7813 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7814 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7815 - (ivsize << MOVE_LEN_SHIFT));
7816 -
7817 - /* Return to encryption */
7818 - append_operation(desc, ctx->class2_alg_type |
7819 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7820 -
7821 - /* Read and write assoclen bytes */
7822 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7823 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7824 -
7825 - /* ivsize + cryptlen = seqoutlen - authsize */
7826 - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7827 -
7828 - /* Skip assoc data */
7829 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7830 -
7831 - /* read assoc before reading payload */
7832 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7833 - KEY_VLF);
7834 -
7835 - /* Copy iv from outfifo to class 2 fifo */
7836 - moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7837 - NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7838 - append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7839 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7840 - append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7841 - LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7842 + if (inl_mask & 2)
7843 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7844 + else
7845 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7846
7847 - /* Load Counter into CONTEXT1 reg */
7848 - if (is_rfc3686)
7849 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7850 - LDST_SRCDST_BYTE_CONTEXT |
7851 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7852 - LDST_OFFSET_SHIFT));
7853 -
7854 - /* Class 1 operation */
7855 - append_operation(desc, ctx->class1_alg_type |
7856 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7857 -
7858 - /* Will write ivsize + cryptlen */
7859 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7860 -
7861 - /* Not need to reload iv */
7862 - append_seq_fifo_load(desc, ivsize,
7863 - FIFOLD_CLASS_SKIP);
7864 -
7865 - /* Will read cryptlen */
7866 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7867 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7868 - FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7869 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7870 -
7871 - /* Write ICV */
7872 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7873 - LDST_SRCDST_BYTE_CONTEXT);
7874 + ctx->adata.key_inline = !!(inl_mask & 1);
7875 + ctx->cdata.key_inline = !!(inl_mask & 2);
7876
7877 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7878 - desc_bytes(desc),
7879 - DMA_TO_DEVICE);
7880 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7881 - dev_err(jrdev, "unable to map shared descriptor\n");
7882 - return -ENOMEM;
7883 - }
7884 -#ifdef DEBUG
7885 - print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7886 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7887 - desc_bytes(desc), 1);
7888 -#endif
7889 + /* aead_givencrypt shared descriptor */
7890 + desc = ctx->sh_desc_enc;
7891 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7892 + ctx->authsize, is_rfc3686, nonce,
7893 + ctx1_iv_off, false, ctrlpriv->era);
7894 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7895 + desc_bytes(desc), ctx->dir);
7896
7897 skip_givenc:
7898 return 0;
7899 @@ -776,12 +328,12 @@ static int gcm_set_sh_desc(struct crypto
7900 {
7901 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7902 struct device *jrdev = ctx->jrdev;
7903 - bool keys_fit_inline = false;
7904 - u32 *key_jump_cmd, *zero_payload_jump_cmd,
7905 - *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7906 + unsigned int ivsize = crypto_aead_ivsize(aead);
7907 u32 *desc;
7908 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7909 + ctx->cdata.keylen;
7910
7911 - if (!ctx->enckeylen || !ctx->authsize)
7912 + if (!ctx->cdata.keylen || !ctx->authsize)
7913 return 0;
7914
7915 /*
7916 @@ -789,175 +341,35 @@ static int gcm_set_sh_desc(struct crypto
7917 * Job Descriptor and Shared Descriptor
7918 * must fit into the 64-word Descriptor h/w Buffer
7919 */
7920 - if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7921 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7922 - keys_fit_inline = true;
7923 + if (rem_bytes >= DESC_GCM_ENC_LEN) {
7924 + ctx->cdata.key_inline = true;
7925 + ctx->cdata.key_virt = ctx->key;
7926 + } else {
7927 + ctx->cdata.key_inline = false;
7928 + ctx->cdata.key_dma = ctx->key_dma;
7929 + }
7930
7931 desc = ctx->sh_desc_enc;
7932 -
7933 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7934 -
7935 - /* skip key loading if they are loaded due to sharing */
7936 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7937 - JUMP_COND_SHRD | JUMP_COND_SELF);
7938 - if (keys_fit_inline)
7939 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7940 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7941 - else
7942 - append_key(desc, ctx->key_dma, ctx->enckeylen,
7943 - CLASS_1 | KEY_DEST_CLASS_REG);
7944 - set_jump_tgt_here(desc, key_jump_cmd);
7945 -
7946 - /* class 1 operation */
7947 - append_operation(desc, ctx->class1_alg_type |
7948 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7949 -
7950 - /* if assoclen + cryptlen is ZERO, skip to ICV write */
7951 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7952 - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7953 - JUMP_COND_MATH_Z);
7954 -
7955 - /* if assoclen is ZERO, skip reading the assoc data */
7956 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7957 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7958 - JUMP_COND_MATH_Z);
7959 -
7960 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7961 -
7962 - /* skip assoc data */
7963 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7964 -
7965 - /* cryptlen = seqinlen - assoclen */
7966 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7967 -
7968 - /* if cryptlen is ZERO jump to zero-payload commands */
7969 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7970 - JUMP_COND_MATH_Z);
7971 -
7972 - /* read assoc data */
7973 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7974 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7975 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7976 -
7977 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7978 -
7979 - /* write encrypted data */
7980 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7981 -
7982 - /* read payload data */
7983 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7984 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7985 -
7986 - /* jump the zero-payload commands */
7987 - append_jump(desc, JUMP_TEST_ALL | 2);
7988 -
7989 - /* zero-payload commands */
7990 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
7991 -
7992 - /* read assoc data */
7993 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7994 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
7995 -
7996 - /* There is no input data */
7997 - set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
7998 -
7999 - /* write ICV */
8000 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8001 - LDST_SRCDST_BYTE_CONTEXT);
8002 -
8003 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8004 - desc_bytes(desc),
8005 - DMA_TO_DEVICE);
8006 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8007 - dev_err(jrdev, "unable to map shared descriptor\n");
8008 - return -ENOMEM;
8009 - }
8010 -#ifdef DEBUG
8011 - print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
8012 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8013 - desc_bytes(desc), 1);
8014 -#endif
8015 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8016 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8017 + desc_bytes(desc), ctx->dir);
8018
8019 /*
8020 * Job Descriptor and Shared Descriptors
8021 * must all fit into the 64-word Descriptor h/w Buffer
8022 */
8023 - keys_fit_inline = false;
8024 - if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8025 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8026 - keys_fit_inline = true;
8027 + if (rem_bytes >= DESC_GCM_DEC_LEN) {
8028 + ctx->cdata.key_inline = true;
8029 + ctx->cdata.key_virt = ctx->key;
8030 + } else {
8031 + ctx->cdata.key_inline = false;
8032 + ctx->cdata.key_dma = ctx->key_dma;
8033 + }
8034
8035 desc = ctx->sh_desc_dec;
8036 -
8037 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8038 -
8039 - /* skip key loading if they are loaded due to sharing */
8040 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8041 - JUMP_TEST_ALL | JUMP_COND_SHRD |
8042 - JUMP_COND_SELF);
8043 - if (keys_fit_inline)
8044 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8045 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8046 - else
8047 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8048 - CLASS_1 | KEY_DEST_CLASS_REG);
8049 - set_jump_tgt_here(desc, key_jump_cmd);
8050 -
8051 - /* class 1 operation */
8052 - append_operation(desc, ctx->class1_alg_type |
8053 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8054 -
8055 - /* if assoclen is ZERO, skip reading the assoc data */
8056 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8057 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8058 - JUMP_COND_MATH_Z);
8059 -
8060 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8061 -
8062 - /* skip assoc data */
8063 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8064 -
8065 - /* read assoc data */
8066 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8067 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8068 -
8069 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8070 -
8071 - /* cryptlen = seqoutlen - assoclen */
8072 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8073 -
8074 - /* jump to zero-payload command if cryptlen is zero */
8075 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8076 - JUMP_COND_MATH_Z);
8077 -
8078 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8079 -
8080 - /* store encrypted data */
8081 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8082 -
8083 - /* read payload data */
8084 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8085 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8086 -
8087 - /* zero-payload command */
8088 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
8089 -
8090 - /* read ICV */
8091 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8092 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8093 -
8094 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8095 - desc_bytes(desc),
8096 - DMA_TO_DEVICE);
8097 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8098 - dev_err(jrdev, "unable to map shared descriptor\n");
8099 - return -ENOMEM;
8100 - }
8101 -#ifdef DEBUG
8102 - print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8103 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8104 - desc_bytes(desc), 1);
8105 -#endif
8106 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8107 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8108 + desc_bytes(desc), ctx->dir);
8109
8110 return 0;
8111 }
8112 @@ -976,11 +388,12 @@ static int rfc4106_set_sh_desc(struct cr
8113 {
8114 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8115 struct device *jrdev = ctx->jrdev;
8116 - bool keys_fit_inline = false;
8117 - u32 *key_jump_cmd;
8118 + unsigned int ivsize = crypto_aead_ivsize(aead);
8119 u32 *desc;
8120 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8121 + ctx->cdata.keylen;
8122
8123 - if (!ctx->enckeylen || !ctx->authsize)
8124 + if (!ctx->cdata.keylen || !ctx->authsize)
8125 return 0;
8126
8127 /*
8128 @@ -988,148 +401,37 @@ static int rfc4106_set_sh_desc(struct cr
8129 * Job Descriptor and Shared Descriptor
8130 * must fit into the 64-word Descriptor h/w Buffer
8131 */
8132 - if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8133 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8134 - keys_fit_inline = true;
8135 + if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8136 + ctx->cdata.key_inline = true;
8137 + ctx->cdata.key_virt = ctx->key;
8138 + } else {
8139 + ctx->cdata.key_inline = false;
8140 + ctx->cdata.key_dma = ctx->key_dma;
8141 + }
8142
8143 desc = ctx->sh_desc_enc;
8144 -
8145 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8146 -
8147 - /* Skip key loading if it is loaded due to sharing */
8148 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8149 - JUMP_COND_SHRD);
8150 - if (keys_fit_inline)
8151 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8152 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8153 - else
8154 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8155 - CLASS_1 | KEY_DEST_CLASS_REG);
8156 - set_jump_tgt_here(desc, key_jump_cmd);
8157 -
8158 - /* Class 1 operation */
8159 - append_operation(desc, ctx->class1_alg_type |
8160 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8161 -
8162 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8163 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8164 -
8165 - /* Read assoc data */
8166 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8167 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8168 -
8169 - /* Skip IV */
8170 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8171 -
8172 - /* Will read cryptlen bytes */
8173 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8174 -
8175 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8176 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8177 -
8178 - /* Skip assoc data */
8179 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8180 -
8181 - /* cryptlen = seqoutlen - assoclen */
8182 - append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8183 -
8184 - /* Write encrypted data */
8185 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8186 -
8187 - /* Read payload data */
8188 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8189 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8190 -
8191 - /* Write ICV */
8192 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8193 - LDST_SRCDST_BYTE_CONTEXT);
8194 -
8195 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8196 - desc_bytes(desc),
8197 - DMA_TO_DEVICE);
8198 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8199 - dev_err(jrdev, "unable to map shared descriptor\n");
8200 - return -ENOMEM;
8201 - }
8202 -#ifdef DEBUG
8203 - print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8204 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8205 - desc_bytes(desc), 1);
8206 -#endif
8207 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8208 + false);
8209 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8210 + desc_bytes(desc), ctx->dir);
8211
8212 /*
8213 * Job Descriptor and Shared Descriptors
8214 * must all fit into the 64-word Descriptor h/w Buffer
8215 */
8216 - keys_fit_inline = false;
8217 - if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8218 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8219 - keys_fit_inline = true;
8220 + if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8221 + ctx->cdata.key_inline = true;
8222 + ctx->cdata.key_virt = ctx->key;
8223 + } else {
8224 + ctx->cdata.key_inline = false;
8225 + ctx->cdata.key_dma = ctx->key_dma;
8226 + }
8227
8228 desc = ctx->sh_desc_dec;
8229 -
8230 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8231 -
8232 - /* Skip key loading if it is loaded due to sharing */
8233 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8234 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8235 - if (keys_fit_inline)
8236 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8237 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8238 - else
8239 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8240 - CLASS_1 | KEY_DEST_CLASS_REG);
8241 - set_jump_tgt_here(desc, key_jump_cmd);
8242 -
8243 - /* Class 1 operation */
8244 - append_operation(desc, ctx->class1_alg_type |
8245 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8246 -
8247 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8248 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8249 -
8250 - /* Read assoc data */
8251 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8252 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8253 -
8254 - /* Skip IV */
8255 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8256 -
8257 - /* Will read cryptlen bytes */
8258 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8259 -
8260 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8261 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8262 -
8263 - /* Skip assoc data */
8264 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8265 -
8266 - /* Will write cryptlen bytes */
8267 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8268 -
8269 - /* Store payload data */
8270 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8271 -
8272 - /* Read encrypted data */
8273 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8274 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8275 -
8276 - /* Read ICV */
8277 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8278 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8279 -
8280 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8281 - desc_bytes(desc),
8282 - DMA_TO_DEVICE);
8283 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8284 - dev_err(jrdev, "unable to map shared descriptor\n");
8285 - return -ENOMEM;
8286 - }
8287 -#ifdef DEBUG
8288 - print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8289 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8290 - desc_bytes(desc), 1);
8291 -#endif
8292 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8293 + false);
8294 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8295 + desc_bytes(desc), ctx->dir);
8296
8297 return 0;
8298 }
8299 @@ -1149,12 +451,12 @@ static int rfc4543_set_sh_desc(struct cr
8300 {
8301 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8302 struct device *jrdev = ctx->jrdev;
8303 - bool keys_fit_inline = false;
8304 - u32 *key_jump_cmd;
8305 - u32 *read_move_cmd, *write_move_cmd;
8306 + unsigned int ivsize = crypto_aead_ivsize(aead);
8307 u32 *desc;
8308 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8309 + ctx->cdata.keylen;
8310
8311 - if (!ctx->enckeylen || !ctx->authsize)
8312 + if (!ctx->cdata.keylen || !ctx->authsize)
8313 return 0;
8314
8315 /*
8316 @@ -1162,151 +464,37 @@ static int rfc4543_set_sh_desc(struct cr
8317 * Job Descriptor and Shared Descriptor
8318 * must fit into the 64-word Descriptor h/w Buffer
8319 */
8320 - if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8321 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8322 - keys_fit_inline = true;
8323 + if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8324 + ctx->cdata.key_inline = true;
8325 + ctx->cdata.key_virt = ctx->key;
8326 + } else {
8327 + ctx->cdata.key_inline = false;
8328 + ctx->cdata.key_dma = ctx->key_dma;
8329 + }
8330
8331 desc = ctx->sh_desc_enc;
8332 -
8333 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8334 -
8335 - /* Skip key loading if it is loaded due to sharing */
8336 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8337 - JUMP_COND_SHRD);
8338 - if (keys_fit_inline)
8339 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8340 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8341 - else
8342 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8343 - CLASS_1 | KEY_DEST_CLASS_REG);
8344 - set_jump_tgt_here(desc, key_jump_cmd);
8345 -
8346 - /* Class 1 operation */
8347 - append_operation(desc, ctx->class1_alg_type |
8348 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8349 -
8350 - /* assoclen + cryptlen = seqinlen */
8351 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8352 -
8353 - /*
8354 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8355 - * thus need to do some magic, i.e. self-patch the descriptor
8356 - * buffer.
8357 - */
8358 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8359 - (0x6 << MOVE_LEN_SHIFT));
8360 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8361 - (0x8 << MOVE_LEN_SHIFT));
8362 -
8363 - /* Will read assoclen + cryptlen bytes */
8364 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8365 -
8366 - /* Will write assoclen + cryptlen bytes */
8367 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8368 -
8369 - /* Read and write assoclen + cryptlen bytes */
8370 - aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8371 -
8372 - set_move_tgt_here(desc, read_move_cmd);
8373 - set_move_tgt_here(desc, write_move_cmd);
8374 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8375 - /* Move payload data to OFIFO */
8376 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8377 -
8378 - /* Write ICV */
8379 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8380 - LDST_SRCDST_BYTE_CONTEXT);
8381 -
8382 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8383 - desc_bytes(desc),
8384 - DMA_TO_DEVICE);
8385 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8386 - dev_err(jrdev, "unable to map shared descriptor\n");
8387 - return -ENOMEM;
8388 - }
8389 -#ifdef DEBUG
8390 - print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8391 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8392 - desc_bytes(desc), 1);
8393 -#endif
8394 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8395 + false);
8396 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8397 + desc_bytes(desc), ctx->dir);
8398
8399 /*
8400 * Job Descriptor and Shared Descriptors
8401 * must all fit into the 64-word Descriptor h/w Buffer
8402 */
8403 - keys_fit_inline = false;
8404 - if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8405 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8406 - keys_fit_inline = true;
8407 + if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8408 + ctx->cdata.key_inline = true;
8409 + ctx->cdata.key_virt = ctx->key;
8410 + } else {
8411 + ctx->cdata.key_inline = false;
8412 + ctx->cdata.key_dma = ctx->key_dma;
8413 + }
8414
8415 desc = ctx->sh_desc_dec;
8416 -
8417 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8418 -
8419 - /* Skip key loading if it is loaded due to sharing */
8420 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8421 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8422 - if (keys_fit_inline)
8423 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8424 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8425 - else
8426 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8427 - CLASS_1 | KEY_DEST_CLASS_REG);
8428 - set_jump_tgt_here(desc, key_jump_cmd);
8429 -
8430 - /* Class 1 operation */
8431 - append_operation(desc, ctx->class1_alg_type |
8432 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8433 -
8434 - /* assoclen + cryptlen = seqoutlen */
8435 - append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8436 -
8437 - /*
8438 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8439 - * thus need to do some magic, i.e. self-patch the descriptor
8440 - * buffer.
8441 - */
8442 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8443 - (0x6 << MOVE_LEN_SHIFT));
8444 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8445 - (0x8 << MOVE_LEN_SHIFT));
8446 -
8447 - /* Will read assoclen + cryptlen bytes */
8448 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8449 -
8450 - /* Will write assoclen + cryptlen bytes */
8451 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8452 -
8453 - /* Store payload data */
8454 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8455 -
8456 - /* In-snoop assoclen + cryptlen data */
8457 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8458 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8459 -
8460 - set_move_tgt_here(desc, read_move_cmd);
8461 - set_move_tgt_here(desc, write_move_cmd);
8462 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8463 - /* Move payload data to OFIFO */
8464 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8465 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8466 -
8467 - /* Read ICV */
8468 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8469 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8470 -
8471 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8472 - desc_bytes(desc),
8473 - DMA_TO_DEVICE);
8474 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8475 - dev_err(jrdev, "unable to map shared descriptor\n");
8476 - return -ENOMEM;
8477 - }
8478 -#ifdef DEBUG
8479 - print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8480 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8481 - desc_bytes(desc), 1);
8482 -#endif
8483 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8484 + false);
8485 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8486 + desc_bytes(desc), ctx->dir);
8487
8488 return 0;
8489 }
8490 @@ -1322,74 +510,67 @@ static int rfc4543_setauthsize(struct cr
8491 return 0;
8492 }
8493
8494 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8495 - u32 authkeylen)
8496 -{
8497 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8498 - ctx->split_key_pad_len, key_in, authkeylen,
8499 - ctx->alg_op);
8500 -}
8501 -
8502 static int aead_setkey(struct crypto_aead *aead,
8503 const u8 *key, unsigned int keylen)
8504 {
8505 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8506 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8507 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8508 struct device *jrdev = ctx->jrdev;
8509 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
8510 struct crypto_authenc_keys keys;
8511 int ret = 0;
8512
8513 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8514 goto badkey;
8515
8516 - /* Pick class 2 key length from algorithm submask */
8517 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8518 - OP_ALG_ALGSEL_SHIFT] * 2;
8519 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8520 -
8521 - if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8522 - goto badkey;
8523 -
8524 #ifdef DEBUG
8525 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8526 keys.authkeylen + keys.enckeylen, keys.enckeylen,
8527 keys.authkeylen);
8528 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8529 - ctx->split_key_len, ctx->split_key_pad_len);
8530 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8531 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8532 #endif
8533
8534 - ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8535 + /*
8536 + * If DKP is supported, use it in the shared descriptor to generate
8537 + * the split key.
8538 + */
8539 + if (ctrlpriv->era >= 6) {
8540 + ctx->adata.keylen = keys.authkeylen;
8541 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
8542 + OP_ALG_ALGSEL_MASK);
8543 +
8544 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8545 + goto badkey;
8546 +
8547 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
8548 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
8549 + keys.enckeylen);
8550 + dma_sync_single_for_device(jrdev, ctx->key_dma,
8551 + ctx->adata.keylen_pad +
8552 + keys.enckeylen, ctx->dir);
8553 + goto skip_split_key;
8554 + }
8555 +
8556 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8557 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
8558 + keys.enckeylen);
8559 if (ret) {
8560 goto badkey;
8561 }
8562
8563 /* postpend encryption key to auth split key */
8564 - memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8565 -
8566 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8567 - keys.enckeylen, DMA_TO_DEVICE);
8568 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8569 - dev_err(jrdev, "unable to map key i/o memory\n");
8570 - return -ENOMEM;
8571 - }
8572 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8573 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8574 + keys.enckeylen, ctx->dir);
8575 #ifdef DEBUG
8576 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8577 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8578 - ctx->split_key_pad_len + keys.enckeylen, 1);
8579 + ctx->adata.keylen_pad + keys.enckeylen, 1);
8580 #endif
8581
8582 - ctx->enckeylen = keys.enckeylen;
8583 -
8584 - ret = aead_set_sh_desc(aead);
8585 - if (ret) {
8586 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8587 - keys.enckeylen, DMA_TO_DEVICE);
8588 - }
8589 -
8590 - return ret;
8591 +skip_split_key:
8592 + ctx->cdata.keylen = keys.enckeylen;
8593 + return aead_set_sh_desc(aead);
8594 badkey:
8595 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8596 return -EINVAL;
8597 @@ -1400,7 +581,6 @@ static int gcm_setkey(struct crypto_aead
8598 {
8599 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8600 struct device *jrdev = ctx->jrdev;
8601 - int ret = 0;
8602
8603 #ifdef DEBUG
8604 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8605 @@ -1408,21 +588,10 @@ static int gcm_setkey(struct crypto_aead
8606 #endif
8607
8608 memcpy(ctx->key, key, keylen);
8609 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8610 - DMA_TO_DEVICE);
8611 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8612 - dev_err(jrdev, "unable to map key i/o memory\n");
8613 - return -ENOMEM;
8614 - }
8615 - ctx->enckeylen = keylen;
8616 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
8617 + ctx->cdata.keylen = keylen;
8618
8619 - ret = gcm_set_sh_desc(aead);
8620 - if (ret) {
8621 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8622 - DMA_TO_DEVICE);
8623 - }
8624 -
8625 - return ret;
8626 + return gcm_set_sh_desc(aead);
8627 }
8628
8629 static int rfc4106_setkey(struct crypto_aead *aead,
8630 @@ -1430,7 +599,6 @@ static int rfc4106_setkey(struct crypto_
8631 {
8632 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8633 struct device *jrdev = ctx->jrdev;
8634 - int ret = 0;
8635
8636 if (keylen < 4)
8637 return -EINVAL;
8638 @@ -1446,22 +614,10 @@ static int rfc4106_setkey(struct crypto_
8639 * The last four bytes of the key material are used as the salt value
8640 * in the nonce. Update the AES key length.
8641 */
8642 - ctx->enckeylen = keylen - 4;
8643 -
8644 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8645 - DMA_TO_DEVICE);
8646 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8647 - dev_err(jrdev, "unable to map key i/o memory\n");
8648 - return -ENOMEM;
8649 - }
8650 -
8651 - ret = rfc4106_set_sh_desc(aead);
8652 - if (ret) {
8653 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8654 - DMA_TO_DEVICE);
8655 - }
8656 -
8657 - return ret;
8658 + ctx->cdata.keylen = keylen - 4;
8659 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8660 + ctx->dir);
8661 + return rfc4106_set_sh_desc(aead);
8662 }
8663
8664 static int rfc4543_setkey(struct crypto_aead *aead,
8665 @@ -1469,7 +625,6 @@ static int rfc4543_setkey(struct crypto_
8666 {
8667 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8668 struct device *jrdev = ctx->jrdev;
8669 - int ret = 0;
8670
8671 if (keylen < 4)
8672 return -EINVAL;
8673 @@ -1485,43 +640,28 @@ static int rfc4543_setkey(struct crypto_
8674 * The last four bytes of the key material are used as the salt value
8675 * in the nonce. Update the AES key length.
8676 */
8677 - ctx->enckeylen = keylen - 4;
8678 -
8679 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8680 - DMA_TO_DEVICE);
8681 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8682 - dev_err(jrdev, "unable to map key i/o memory\n");
8683 - return -ENOMEM;
8684 - }
8685 -
8686 - ret = rfc4543_set_sh_desc(aead);
8687 - if (ret) {
8688 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8689 - DMA_TO_DEVICE);
8690 - }
8691 -
8692 - return ret;
8693 + ctx->cdata.keylen = keylen - 4;
8694 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8695 + ctx->dir);
8696 + return rfc4543_set_sh_desc(aead);
8697 }
8698
8699 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8700 const u8 *key, unsigned int keylen)
8701 {
8702 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8703 - struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8704 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8705 const char *alg_name = crypto_tfm_alg_name(tfm);
8706 struct device *jrdev = ctx->jrdev;
8707 - int ret = 0;
8708 - u32 *key_jump_cmd;
8709 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8710 u32 *desc;
8711 - u8 *nonce;
8712 - u32 geniv;
8713 u32 ctx1_iv_off = 0;
8714 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8715 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8716 OP_ALG_AAI_CTR_MOD128);
8717 const bool is_rfc3686 = (ctr_mode &&
8718 (strstr(alg_name, "rfc3686") != NULL));
8719
8720 + memcpy(ctx->key, key, keylen);
8721 #ifdef DEBUG
8722 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8723 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8724 @@ -1544,215 +684,33 @@ static int ablkcipher_setkey(struct cryp
8725 keylen -= CTR_RFC3686_NONCE_SIZE;
8726 }
8727
8728 - memcpy(ctx->key, key, keylen);
8729 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8730 - DMA_TO_DEVICE);
8731 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8732 - dev_err(jrdev, "unable to map key i/o memory\n");
8733 - return -ENOMEM;
8734 - }
8735 - ctx->enckeylen = keylen;
8736 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8737 + ctx->cdata.keylen = keylen;
8738 + ctx->cdata.key_virt = ctx->key;
8739 + ctx->cdata.key_inline = true;
8740
8741 /* ablkcipher_encrypt shared descriptor */
8742 desc = ctx->sh_desc_enc;
8743 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8744 - /* Skip if already shared */
8745 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8746 - JUMP_COND_SHRD);
8747 -
8748 - /* Load class1 key only */
8749 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8750 - ctx->enckeylen, CLASS_1 |
8751 - KEY_DEST_CLASS_REG);
8752 -
8753 - /* Load nonce into CONTEXT1 reg */
8754 - if (is_rfc3686) {
8755 - nonce = (u8 *)key + keylen;
8756 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8757 - LDST_CLASS_IND_CCB |
8758 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8759 - append_move(desc, MOVE_WAITCOMP |
8760 - MOVE_SRC_OUTFIFO |
8761 - MOVE_DEST_CLASS1CTX |
8762 - (16 << MOVE_OFFSET_SHIFT) |
8763 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8764 - }
8765 -
8766 - set_jump_tgt_here(desc, key_jump_cmd);
8767 -
8768 - /* Load iv */
8769 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8770 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8771 -
8772 - /* Load counter into CONTEXT1 reg */
8773 - if (is_rfc3686)
8774 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8775 - LDST_SRCDST_BYTE_CONTEXT |
8776 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8777 - LDST_OFFSET_SHIFT));
8778 -
8779 - /* Load operation */
8780 - append_operation(desc, ctx->class1_alg_type |
8781 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8782 -
8783 - /* Perform operation */
8784 - ablkcipher_append_src_dst(desc);
8785 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8786 + ctx1_iv_off);
8787 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8788 + desc_bytes(desc), ctx->dir);
8789
8790 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8791 - desc_bytes(desc),
8792 - DMA_TO_DEVICE);
8793 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8794 - dev_err(jrdev, "unable to map shared descriptor\n");
8795 - return -ENOMEM;
8796 - }
8797 -#ifdef DEBUG
8798 - print_hex_dump(KERN_ERR,
8799 - "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8800 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8801 - desc_bytes(desc), 1);
8802 -#endif
8803 /* ablkcipher_decrypt shared descriptor */
8804 desc = ctx->sh_desc_dec;
8805 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8806 + ctx1_iv_off);
8807 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8808 + desc_bytes(desc), ctx->dir);
8809
8810 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8811 - /* Skip if already shared */
8812 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8813 - JUMP_COND_SHRD);
8814 -
8815 - /* Load class1 key only */
8816 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8817 - ctx->enckeylen, CLASS_1 |
8818 - KEY_DEST_CLASS_REG);
8819 -
8820 - /* Load nonce into CONTEXT1 reg */
8821 - if (is_rfc3686) {
8822 - nonce = (u8 *)key + keylen;
8823 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8824 - LDST_CLASS_IND_CCB |
8825 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8826 - append_move(desc, MOVE_WAITCOMP |
8827 - MOVE_SRC_OUTFIFO |
8828 - MOVE_DEST_CLASS1CTX |
8829 - (16 << MOVE_OFFSET_SHIFT) |
8830 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8831 - }
8832 -
8833 - set_jump_tgt_here(desc, key_jump_cmd);
8834 -
8835 - /* load IV */
8836 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8837 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8838 -
8839 - /* Load counter into CONTEXT1 reg */
8840 - if (is_rfc3686)
8841 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8842 - LDST_SRCDST_BYTE_CONTEXT |
8843 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8844 - LDST_OFFSET_SHIFT));
8845 -
8846 - /* Choose operation */
8847 - if (ctr_mode)
8848 - append_operation(desc, ctx->class1_alg_type |
8849 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8850 - else
8851 - append_dec_op1(desc, ctx->class1_alg_type);
8852 -
8853 - /* Perform operation */
8854 - ablkcipher_append_src_dst(desc);
8855 -
8856 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8857 - desc_bytes(desc),
8858 - DMA_TO_DEVICE);
8859 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8860 - dev_err(jrdev, "unable to map shared descriptor\n");
8861 - return -ENOMEM;
8862 - }
8863 -
8864 -#ifdef DEBUG
8865 - print_hex_dump(KERN_ERR,
8866 - "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8867 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8868 - desc_bytes(desc), 1);
8869 -#endif
8870 /* ablkcipher_givencrypt shared descriptor */
8871 desc = ctx->sh_desc_givenc;
8872 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8873 + ctx1_iv_off);
8874 + dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8875 + desc_bytes(desc), ctx->dir);
8876
8877 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8878 - /* Skip if already shared */
8879 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8880 - JUMP_COND_SHRD);
8881 -
8882 - /* Load class1 key only */
8883 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8884 - ctx->enckeylen, CLASS_1 |
8885 - KEY_DEST_CLASS_REG);
8886 -
8887 - /* Load Nonce into CONTEXT1 reg */
8888 - if (is_rfc3686) {
8889 - nonce = (u8 *)key + keylen;
8890 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8891 - LDST_CLASS_IND_CCB |
8892 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8893 - append_move(desc, MOVE_WAITCOMP |
8894 - MOVE_SRC_OUTFIFO |
8895 - MOVE_DEST_CLASS1CTX |
8896 - (16 << MOVE_OFFSET_SHIFT) |
8897 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8898 - }
8899 - set_jump_tgt_here(desc, key_jump_cmd);
8900 -
8901 - /* Generate IV */
8902 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8903 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8904 - NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8905 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8906 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8907 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8908 - append_move(desc, MOVE_WAITCOMP |
8909 - MOVE_SRC_INFIFO |
8910 - MOVE_DEST_CLASS1CTX |
8911 - (crt->ivsize << MOVE_LEN_SHIFT) |
8912 - (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8913 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8914 -
8915 - /* Copy generated IV to memory */
8916 - append_seq_store(desc, crt->ivsize,
8917 - LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8918 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
8919 -
8920 - /* Load Counter into CONTEXT1 reg */
8921 - if (is_rfc3686)
8922 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8923 - LDST_SRCDST_BYTE_CONTEXT |
8924 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8925 - LDST_OFFSET_SHIFT));
8926 -
8927 - if (ctx1_iv_off)
8928 - append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8929 - (1 << JUMP_OFFSET_SHIFT));
8930 -
8931 - /* Load operation */
8932 - append_operation(desc, ctx->class1_alg_type |
8933 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8934 -
8935 - /* Perform operation */
8936 - ablkcipher_append_src_dst(desc);
8937 -
8938 - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8939 - desc_bytes(desc),
8940 - DMA_TO_DEVICE);
8941 - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8942 - dev_err(jrdev, "unable to map shared descriptor\n");
8943 - return -ENOMEM;
8944 - }
8945 -#ifdef DEBUG
8946 - print_hex_dump(KERN_ERR,
8947 - "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8948 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8949 - desc_bytes(desc), 1);
8950 -#endif
8951 -
8952 - return ret;
8953 + return 0;
8954 }
8955
8956 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8957 @@ -1760,8 +718,7 @@ static int xts_ablkcipher_setkey(struct
8958 {
8959 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8960 struct device *jrdev = ctx->jrdev;
8961 - u32 *key_jump_cmd, *desc;
8962 - __be64 sector_size = cpu_to_be64(512);
8963 + u32 *desc;
8964
8965 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
8966 crypto_ablkcipher_set_flags(ablkcipher,
8967 @@ -1771,126 +728,38 @@ static int xts_ablkcipher_setkey(struct
8968 }
8969
8970 memcpy(ctx->key, key, keylen);
8971 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8972 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8973 - dev_err(jrdev, "unable to map key i/o memory\n");
8974 - return -ENOMEM;
8975 - }
8976 - ctx->enckeylen = keylen;
8977 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8978 + ctx->cdata.keylen = keylen;
8979 + ctx->cdata.key_virt = ctx->key;
8980 + ctx->cdata.key_inline = true;
8981
8982 /* xts_ablkcipher_encrypt shared descriptor */
8983 desc = ctx->sh_desc_enc;
8984 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8985 - /* Skip if already shared */
8986 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8987 - JUMP_COND_SHRD);
8988 -
8989 - /* Load class1 keys only */
8990 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8991 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8992 -
8993 - /* Load sector size with index 40 bytes (0x28) */
8994 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
8995 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
8996 - append_data(desc, (void *)&sector_size, 8);
8997 -
8998 - set_jump_tgt_here(desc, key_jump_cmd);
8999 -
9000 - /*
9001 - * create sequence for loading the sector index
9002 - * Upper 8B of IV - will be used as sector index
9003 - * Lower 8B of IV - will be discarded
9004 - */
9005 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9006 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9007 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9008 -
9009 - /* Load operation */
9010 - append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
9011 - OP_ALG_ENCRYPT);
9012 -
9013 - /* Perform operation */
9014 - ablkcipher_append_src_dst(desc);
9015 -
9016 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9017 - DMA_TO_DEVICE);
9018 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
9019 - dev_err(jrdev, "unable to map shared descriptor\n");
9020 - return -ENOMEM;
9021 - }
9022 -#ifdef DEBUG
9023 - print_hex_dump(KERN_ERR,
9024 - "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
9025 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9026 -#endif
9027 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
9028 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
9029 + desc_bytes(desc), ctx->dir);
9030
9031 /* xts_ablkcipher_decrypt shared descriptor */
9032 desc = ctx->sh_desc_dec;
9033 -
9034 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
9035 - /* Skip if already shared */
9036 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
9037 - JUMP_COND_SHRD);
9038 -
9039 - /* Load class1 key only */
9040 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
9041 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
9042 -
9043 - /* Load sector size with index 40 bytes (0x28) */
9044 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
9045 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
9046 - append_data(desc, (void *)&sector_size, 8);
9047 -
9048 - set_jump_tgt_here(desc, key_jump_cmd);
9049 -
9050 - /*
9051 - * create sequence for loading the sector index
9052 - * Upper 8B of IV - will be used as sector index
9053 - * Lower 8B of IV - will be discarded
9054 - */
9055 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9056 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9057 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9058 -
9059 - /* Load operation */
9060 - append_dec_op1(desc, ctx->class1_alg_type);
9061 -
9062 - /* Perform operation */
9063 - ablkcipher_append_src_dst(desc);
9064 -
9065 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9066 - DMA_TO_DEVICE);
9067 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9068 - dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9069 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9070 - dev_err(jrdev, "unable to map shared descriptor\n");
9071 - return -ENOMEM;
9072 - }
9073 -#ifdef DEBUG
9074 - print_hex_dump(KERN_ERR,
9075 - "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9076 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9077 -#endif
9078 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9079 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9080 + desc_bytes(desc), ctx->dir);
9081
9082 return 0;
9083 }
9084
9085 /*
9086 * aead_edesc - s/w-extended aead descriptor
9087 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9088 - * @src_nents: number of segments in input scatterlist
9089 - * @dst_nents: number of segments in output scatterlist
9090 - * @iv_dma: dma address of iv for checking continuity and link table
9091 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9092 + * @src_nents: number of segments in input s/w scatterlist
9093 + * @dst_nents: number of segments in output s/w scatterlist
9094 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9095 * @sec4_sg_dma: bus physical mapped address of h/w link table
9096 + * @sec4_sg: pointer to h/w link table
9097 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9098 */
9099 struct aead_edesc {
9100 - int assoc_nents;
9101 int src_nents;
9102 int dst_nents;
9103 - dma_addr_t iv_dma;
9104 int sec4_sg_bytes;
9105 dma_addr_t sec4_sg_dma;
9106 struct sec4_sg_entry *sec4_sg;
9107 @@ -1899,12 +768,12 @@ struct aead_edesc {
9108
9109 /*
9110 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9111 - * @src_nents: number of segments in input scatterlist
9112 - * @dst_nents: number of segments in output scatterlist
9113 + * @src_nents: number of segments in input s/w scatterlist
9114 + * @dst_nents: number of segments in output s/w scatterlist
9115 * @iv_dma: dma address of iv for checking continuity and link table
9116 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9117 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9118 * @sec4_sg_dma: bus physical mapped address of h/w link table
9119 + * @sec4_sg: pointer to h/w link table
9120 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9121 */
9122 struct ablkcipher_edesc {
9123 @@ -1924,10 +793,11 @@ static void caam_unmap(struct device *de
9124 int sec4_sg_bytes)
9125 {
9126 if (dst != src) {
9127 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9128 - dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9129 + if (src_nents)
9130 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9131 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9132 } else {
9133 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9134 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9135 }
9136
9137 if (iv_dma)
9138 @@ -2021,8 +891,7 @@ static void ablkcipher_encrypt_done(stru
9139 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9140 #endif
9141
9142 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9143 - offsetof(struct ablkcipher_edesc, hw_desc));
9144 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9145
9146 if (err)
9147 caam_jr_strstatus(jrdev, err);
9148 @@ -2031,10 +900,10 @@ static void ablkcipher_encrypt_done(stru
9149 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9150 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9151 edesc->src_nents > 1 ? 100 : ivsize, 1);
9152 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9153 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9154 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9155 #endif
9156 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9157 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9158 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9159
9160 ablkcipher_unmap(jrdev, edesc, req);
9161
9162 @@ -2062,8 +931,7 @@ static void ablkcipher_decrypt_done(stru
9163 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9164 #endif
9165
9166 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9167 - offsetof(struct ablkcipher_edesc, hw_desc));
9168 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9169 if (err)
9170 caam_jr_strstatus(jrdev, err);
9171
9172 @@ -2071,10 +939,10 @@ static void ablkcipher_decrypt_done(stru
9173 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9174 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9175 ivsize, 1);
9176 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9177 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9178 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9179 #endif
9180 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9181 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9182 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9183
9184 ablkcipher_unmap(jrdev, edesc, req);
9185
9186 @@ -2114,7 +982,7 @@ static void init_aead_job(struct aead_re
9187 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9188
9189 if (all_contig) {
9190 - src_dma = sg_dma_address(req->src);
9191 + src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9192 in_options = 0;
9193 } else {
9194 src_dma = edesc->sec4_sg_dma;
9195 @@ -2129,7 +997,7 @@ static void init_aead_job(struct aead_re
9196 out_options = in_options;
9197
9198 if (unlikely(req->src != req->dst)) {
9199 - if (!edesc->dst_nents) {
9200 + if (edesc->dst_nents == 1) {
9201 dst_dma = sg_dma_address(req->dst);
9202 } else {
9203 dst_dma = edesc->sec4_sg_dma +
9204 @@ -2147,9 +1015,6 @@ static void init_aead_job(struct aead_re
9205 append_seq_out_ptr(desc, dst_dma,
9206 req->assoclen + req->cryptlen - authsize,
9207 out_options);
9208 -
9209 - /* REG3 = assoclen */
9210 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9211 }
9212
9213 static void init_gcm_job(struct aead_request *req,
9214 @@ -2164,6 +1029,7 @@ static void init_gcm_job(struct aead_req
9215 unsigned int last;
9216
9217 init_aead_job(req, edesc, all_contig, encrypt);
9218 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9219
9220 /* BUG This should not be specific to generic GCM. */
9221 last = 0;
9222 @@ -2175,7 +1041,7 @@ static void init_gcm_job(struct aead_req
9223 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9224 /* Append Salt */
9225 if (!generic_gcm)
9226 - append_data(desc, ctx->key + ctx->enckeylen, 4);
9227 + append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9228 /* Append IV */
9229 append_data(desc, req->iv, ivsize);
9230 /* End of blank commands */
9231 @@ -2190,7 +1056,8 @@ static void init_authenc_job(struct aead
9232 struct caam_aead_alg, aead);
9233 unsigned int ivsize = crypto_aead_ivsize(aead);
9234 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9235 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9236 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
9237 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9238 OP_ALG_AAI_CTR_MOD128);
9239 const bool is_rfc3686 = alg->caam.rfc3686;
9240 u32 *desc = edesc->hw_desc;
9241 @@ -2213,6 +1080,15 @@ static void init_authenc_job(struct aead
9242
9243 init_aead_job(req, edesc, all_contig, encrypt);
9244
9245 + /*
9246 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
9247 + * having DPOVRD as destination.
9248 + */
9249 + if (ctrlpriv->era < 3)
9250 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9251 + else
9252 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
9253 +
9254 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
9255 append_load_as_imm(desc, req->iv, ivsize,
9256 LDST_CLASS_1_CCB |
9257 @@ -2236,16 +1112,15 @@ static void init_ablkcipher_job(u32 *sh_
9258 int len, sec4_sg_index = 0;
9259
9260 #ifdef DEBUG
9261 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9262 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9263 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9264 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9265 ivsize, 1);
9266 - printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9267 - dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
9268 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9269 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9270 + pr_err("asked=%d, nbytes%d\n",
9271 + (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9272 #endif
9273 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
9274 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9275 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9276
9277 len = desc_len(sh_desc);
9278 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9279 @@ -2261,7 +1136,7 @@ static void init_ablkcipher_job(u32 *sh_
9280 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9281
9282 if (likely(req->src == req->dst)) {
9283 - if (!edesc->src_nents && iv_contig) {
9284 + if (edesc->src_nents == 1 && iv_contig) {
9285 dst_dma = sg_dma_address(req->src);
9286 } else {
9287 dst_dma = edesc->sec4_sg_dma +
9288 @@ -2269,7 +1144,7 @@ static void init_ablkcipher_job(u32 *sh_
9289 out_options = LDST_SGF;
9290 }
9291 } else {
9292 - if (!edesc->dst_nents) {
9293 + if (edesc->dst_nents == 1) {
9294 dst_dma = sg_dma_address(req->dst);
9295 } else {
9296 dst_dma = edesc->sec4_sg_dma +
9297 @@ -2296,20 +1171,18 @@ static void init_ablkcipher_giv_job(u32
9298 int len, sec4_sg_index = 0;
9299
9300 #ifdef DEBUG
9301 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9302 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9303 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9304 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9305 ivsize, 1);
9306 - dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9307 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9308 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9309 #endif
9310 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9311 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9312 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9313
9314 len = desc_len(sh_desc);
9315 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9316
9317 - if (!edesc->src_nents) {
9318 + if (edesc->src_nents == 1) {
9319 src_dma = sg_dma_address(req->src);
9320 in_options = 0;
9321 } else {
9322 @@ -2340,87 +1213,100 @@ static struct aead_edesc *aead_edesc_all
9323 struct crypto_aead *aead = crypto_aead_reqtfm(req);
9324 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9325 struct device *jrdev = ctx->jrdev;
9326 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9327 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9328 - int src_nents, dst_nents = 0;
9329 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9330 + GFP_KERNEL : GFP_ATOMIC;
9331 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9332 struct aead_edesc *edesc;
9333 - int sgc;
9334 - bool all_contig = true;
9335 - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9336 + int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9337 unsigned int authsize = ctx->authsize;
9338
9339 if (unlikely(req->dst != req->src)) {
9340 - src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9341 - dst_nents = sg_count(req->dst,
9342 - req->assoclen + req->cryptlen +
9343 - (encrypt ? authsize : (-authsize)));
9344 - } else {
9345 - src_nents = sg_count(req->src,
9346 - req->assoclen + req->cryptlen +
9347 - (encrypt ? authsize : 0));
9348 - }
9349 -
9350 - /* Check if data are contiguous. */
9351 - all_contig = !src_nents;
9352 - if (!all_contig) {
9353 - src_nents = src_nents ? : 1;
9354 - sec4_sg_len = src_nents;
9355 - }
9356 -
9357 - sec4_sg_len += dst_nents;
9358 -
9359 - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9360 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9361 + req->cryptlen);
9362 + if (unlikely(src_nents < 0)) {
9363 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9364 + req->assoclen + req->cryptlen);
9365 + return ERR_PTR(src_nents);
9366 + }
9367
9368 - /* allocate space for base edesc and hw desc commands, link tables */
9369 - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9370 - GFP_DMA | flags);
9371 - if (!edesc) {
9372 - dev_err(jrdev, "could not allocate extended descriptor\n");
9373 - return ERR_PTR(-ENOMEM);
9374 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9375 + req->cryptlen +
9376 + (encrypt ? authsize :
9377 + (-authsize)));
9378 + if (unlikely(dst_nents < 0)) {
9379 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9380 + req->assoclen + req->cryptlen +
9381 + (encrypt ? authsize : (-authsize)));
9382 + return ERR_PTR(dst_nents);
9383 + }
9384 + } else {
9385 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9386 + req->cryptlen +
9387 + (encrypt ? authsize : 0));
9388 + if (unlikely(src_nents < 0)) {
9389 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9390 + req->assoclen + req->cryptlen +
9391 + (encrypt ? authsize : 0));
9392 + return ERR_PTR(src_nents);
9393 + }
9394 }
9395
9396 if (likely(req->src == req->dst)) {
9397 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9398 - DMA_BIDIRECTIONAL);
9399 - if (unlikely(!sgc)) {
9400 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9401 + DMA_BIDIRECTIONAL);
9402 + if (unlikely(!mapped_src_nents)) {
9403 dev_err(jrdev, "unable to map source\n");
9404 - kfree(edesc);
9405 return ERR_PTR(-ENOMEM);
9406 }
9407 } else {
9408 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9409 - DMA_TO_DEVICE);
9410 - if (unlikely(!sgc)) {
9411 - dev_err(jrdev, "unable to map source\n");
9412 - kfree(edesc);
9413 - return ERR_PTR(-ENOMEM);
9414 + /* Cover also the case of null (zero length) input data */
9415 + if (src_nents) {
9416 + mapped_src_nents = dma_map_sg(jrdev, req->src,
9417 + src_nents, DMA_TO_DEVICE);
9418 + if (unlikely(!mapped_src_nents)) {
9419 + dev_err(jrdev, "unable to map source\n");
9420 + return ERR_PTR(-ENOMEM);
9421 + }
9422 + } else {
9423 + mapped_src_nents = 0;
9424 }
9425
9426 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9427 - DMA_FROM_DEVICE);
9428 - if (unlikely(!sgc)) {
9429 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9430 + DMA_FROM_DEVICE);
9431 + if (unlikely(!mapped_dst_nents)) {
9432 dev_err(jrdev, "unable to map destination\n");
9433 - dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9434 - DMA_TO_DEVICE);
9435 - kfree(edesc);
9436 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9437 return ERR_PTR(-ENOMEM);
9438 }
9439 }
9440
9441 + sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9442 + sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9443 + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9444 +
9445 + /* allocate space for base edesc and hw desc commands, link tables */
9446 + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9447 + GFP_DMA | flags);
9448 + if (!edesc) {
9449 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9450 + 0, 0, 0);
9451 + return ERR_PTR(-ENOMEM);
9452 + }
9453 +
9454 edesc->src_nents = src_nents;
9455 edesc->dst_nents = dst_nents;
9456 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9457 desc_bytes;
9458 - *all_contig_ptr = all_contig;
9459 + *all_contig_ptr = !(mapped_src_nents > 1);
9460
9461 sec4_sg_index = 0;
9462 - if (!all_contig) {
9463 - sg_to_sec4_sg_last(req->src, src_nents,
9464 - edesc->sec4_sg + sec4_sg_index, 0);
9465 - sec4_sg_index += src_nents;
9466 + if (mapped_src_nents > 1) {
9467 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9468 + edesc->sec4_sg + sec4_sg_index, 0);
9469 + sec4_sg_index += mapped_src_nents;
9470 }
9471 - if (dst_nents) {
9472 - sg_to_sec4_sg_last(req->dst, dst_nents,
9473 + if (mapped_dst_nents > 1) {
9474 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9475 edesc->sec4_sg + sec4_sg_index, 0);
9476 }
9477
9478 @@ -2573,13 +1459,9 @@ static int aead_decrypt(struct aead_requ
9479 u32 *desc;
9480 int ret = 0;
9481
9482 -#ifdef DEBUG
9483 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9484 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9485 - dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9486 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9487 - req->assoclen + req->cryptlen, 1, may_sleep);
9488 -#endif
9489 + caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9490 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9491 + req->assoclen + req->cryptlen, 1);
9492
9493 /* allocate extended descriptor */
9494 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9495 @@ -2619,51 +1501,80 @@ static struct ablkcipher_edesc *ablkciph
9496 struct device *jrdev = ctx->jrdev;
9497 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9498 GFP_KERNEL : GFP_ATOMIC;
9499 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9500 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9501 struct ablkcipher_edesc *edesc;
9502 dma_addr_t iv_dma = 0;
9503 - bool iv_contig = false;
9504 - int sgc;
9505 + bool in_contig;
9506 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9507 - int sec4_sg_index;
9508 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9509
9510 - src_nents = sg_count(req->src, req->nbytes);
9511 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9512 + if (unlikely(src_nents < 0)) {
9513 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9514 + req->nbytes);
9515 + return ERR_PTR(src_nents);
9516 + }
9517
9518 - if (req->dst != req->src)
9519 - dst_nents = sg_count(req->dst, req->nbytes);
9520 + if (req->dst != req->src) {
9521 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9522 + if (unlikely(dst_nents < 0)) {
9523 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9524 + req->nbytes);
9525 + return ERR_PTR(dst_nents);
9526 + }
9527 + }
9528
9529 if (likely(req->src == req->dst)) {
9530 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9531 - DMA_BIDIRECTIONAL);
9532 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9533 + DMA_BIDIRECTIONAL);
9534 + if (unlikely(!mapped_src_nents)) {
9535 + dev_err(jrdev, "unable to map source\n");
9536 + return ERR_PTR(-ENOMEM);
9537 + }
9538 } else {
9539 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9540 - DMA_TO_DEVICE);
9541 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9542 - DMA_FROM_DEVICE);
9543 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9544 + DMA_TO_DEVICE);
9545 + if (unlikely(!mapped_src_nents)) {
9546 + dev_err(jrdev, "unable to map source\n");
9547 + return ERR_PTR(-ENOMEM);
9548 + }
9549 +
9550 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9551 + DMA_FROM_DEVICE);
9552 + if (unlikely(!mapped_dst_nents)) {
9553 + dev_err(jrdev, "unable to map destination\n");
9554 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9555 + return ERR_PTR(-ENOMEM);
9556 + }
9557 }
9558
9559 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9560 if (dma_mapping_error(jrdev, iv_dma)) {
9561 dev_err(jrdev, "unable to map IV\n");
9562 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9563 + 0, 0, 0);
9564 return ERR_PTR(-ENOMEM);
9565 }
9566
9567 - /*
9568 - * Check if iv can be contiguous with source and destination.
9569 - * If so, include it. If not, create scatterlist.
9570 - */
9571 - if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9572 - iv_contig = true;
9573 - else
9574 - src_nents = src_nents ? : 1;
9575 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9576 - sizeof(struct sec4_sg_entry);
9577 + if (mapped_src_nents == 1 &&
9578 + iv_dma + ivsize == sg_dma_address(req->src)) {
9579 + in_contig = true;
9580 + sec4_sg_ents = 0;
9581 + } else {
9582 + in_contig = false;
9583 + sec4_sg_ents = 1 + mapped_src_nents;
9584 + }
9585 + dst_sg_idx = sec4_sg_ents;
9586 + sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9587 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9588
9589 /* allocate space for base edesc and hw desc commands, link tables */
9590 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9591 GFP_DMA | flags);
9592 if (!edesc) {
9593 dev_err(jrdev, "could not allocate extended descriptor\n");
9594 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9595 + iv_dma, ivsize, 0, 0);
9596 return ERR_PTR(-ENOMEM);
9597 }
9598
9599 @@ -2673,23 +1584,24 @@ static struct ablkcipher_edesc *ablkciph
9600 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9601 desc_bytes;
9602
9603 - sec4_sg_index = 0;
9604 - if (!iv_contig) {
9605 + if (!in_contig) {
9606 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9607 - sg_to_sec4_sg_last(req->src, src_nents,
9608 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9609 edesc->sec4_sg + 1, 0);
9610 - sec4_sg_index += 1 + src_nents;
9611 }
9612
9613 - if (dst_nents) {
9614 - sg_to_sec4_sg_last(req->dst, dst_nents,
9615 - edesc->sec4_sg + sec4_sg_index, 0);
9616 + if (mapped_dst_nents > 1) {
9617 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9618 + edesc->sec4_sg + dst_sg_idx, 0);
9619 }
9620
9621 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9622 sec4_sg_bytes, DMA_TO_DEVICE);
9623 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9624 dev_err(jrdev, "unable to map S/G table\n");
9625 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9626 + iv_dma, ivsize, 0, 0);
9627 + kfree(edesc);
9628 return ERR_PTR(-ENOMEM);
9629 }
9630
9631 @@ -2701,7 +1613,7 @@ static struct ablkcipher_edesc *ablkciph
9632 sec4_sg_bytes, 1);
9633 #endif
9634
9635 - *iv_contig_out = iv_contig;
9636 + *iv_contig_out = in_contig;
9637 return edesc;
9638 }
9639
9640 @@ -2792,30 +1704,54 @@ static struct ablkcipher_edesc *ablkciph
9641 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9642 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9643 struct device *jrdev = ctx->jrdev;
9644 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9645 - CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9646 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9647 GFP_KERNEL : GFP_ATOMIC;
9648 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9649 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9650 struct ablkcipher_edesc *edesc;
9651 dma_addr_t iv_dma = 0;
9652 - bool iv_contig = false;
9653 - int sgc;
9654 + bool out_contig;
9655 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9656 - int sec4_sg_index;
9657 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9658
9659 - src_nents = sg_count(req->src, req->nbytes);
9660 -
9661 - if (unlikely(req->dst != req->src))
9662 - dst_nents = sg_count(req->dst, req->nbytes);
9663 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9664 + if (unlikely(src_nents < 0)) {
9665 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9666 + req->nbytes);
9667 + return ERR_PTR(src_nents);
9668 + }
9669
9670 if (likely(req->src == req->dst)) {
9671 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9672 - DMA_BIDIRECTIONAL);
9673 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9674 + DMA_BIDIRECTIONAL);
9675 + if (unlikely(!mapped_src_nents)) {
9676 + dev_err(jrdev, "unable to map source\n");
9677 + return ERR_PTR(-ENOMEM);
9678 + }
9679 +
9680 + dst_nents = src_nents;
9681 + mapped_dst_nents = src_nents;
9682 } else {
9683 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9684 - DMA_TO_DEVICE);
9685 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9686 - DMA_FROM_DEVICE);
9687 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9688 + DMA_TO_DEVICE);
9689 + if (unlikely(!mapped_src_nents)) {
9690 + dev_err(jrdev, "unable to map source\n");
9691 + return ERR_PTR(-ENOMEM);
9692 + }
9693 +
9694 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9695 + if (unlikely(dst_nents < 0)) {
9696 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9697 + req->nbytes);
9698 + return ERR_PTR(dst_nents);
9699 + }
9700 +
9701 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9702 + DMA_FROM_DEVICE);
9703 + if (unlikely(!mapped_dst_nents)) {
9704 + dev_err(jrdev, "unable to map destination\n");
9705 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9706 + return ERR_PTR(-ENOMEM);
9707 + }
9708 }
9709
9710 /*
9711 @@ -2825,21 +1761,29 @@ static struct ablkcipher_edesc *ablkciph
9712 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9713 if (dma_mapping_error(jrdev, iv_dma)) {
9714 dev_err(jrdev, "unable to map IV\n");
9715 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9716 + 0, 0, 0);
9717 return ERR_PTR(-ENOMEM);
9718 }
9719
9720 - if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9721 - iv_contig = true;
9722 - else
9723 - dst_nents = dst_nents ? : 1;
9724 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9725 - sizeof(struct sec4_sg_entry);
9726 + sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9727 + dst_sg_idx = sec4_sg_ents;
9728 + if (mapped_dst_nents == 1 &&
9729 + iv_dma + ivsize == sg_dma_address(req->dst)) {
9730 + out_contig = true;
9731 + } else {
9732 + out_contig = false;
9733 + sec4_sg_ents += 1 + mapped_dst_nents;
9734 + }
9735
9736 /* allocate space for base edesc and hw desc commands, link tables */
9737 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9738 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9739 GFP_DMA | flags);
9740 if (!edesc) {
9741 dev_err(jrdev, "could not allocate extended descriptor\n");
9742 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9743 + iv_dma, ivsize, 0, 0);
9744 return ERR_PTR(-ENOMEM);
9745 }
9746
9747 @@ -2849,24 +1793,24 @@ static struct ablkcipher_edesc *ablkciph
9748 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9749 desc_bytes;
9750
9751 - sec4_sg_index = 0;
9752 - if (src_nents) {
9753 - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9754 - sec4_sg_index += src_nents;
9755 - }
9756 + if (mapped_src_nents > 1)
9757 + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9758 + 0);
9759
9760 - if (!iv_contig) {
9761 - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9762 + if (!out_contig) {
9763 + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9764 iv_dma, ivsize, 0);
9765 - sec4_sg_index += 1;
9766 - sg_to_sec4_sg_last(req->dst, dst_nents,
9767 - edesc->sec4_sg + sec4_sg_index, 0);
9768 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9769 + edesc->sec4_sg + dst_sg_idx + 1, 0);
9770 }
9771
9772 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9773 sec4_sg_bytes, DMA_TO_DEVICE);
9774 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9775 dev_err(jrdev, "unable to map S/G table\n");
9776 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9777 + iv_dma, ivsize, 0, 0);
9778 + kfree(edesc);
9779 return ERR_PTR(-ENOMEM);
9780 }
9781 edesc->iv_dma = iv_dma;
9782 @@ -2878,7 +1822,7 @@ static struct ablkcipher_edesc *ablkciph
9783 sec4_sg_bytes, 1);
9784 #endif
9785
9786 - *iv_contig_out = iv_contig;
9787 + *iv_contig_out = out_contig;
9788 return edesc;
9789 }
9790
9791 @@ -2889,7 +1833,7 @@ static int ablkcipher_givencrypt(struct
9792 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9793 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9794 struct device *jrdev = ctx->jrdev;
9795 - bool iv_contig;
9796 + bool iv_contig = false;
9797 u32 *desc;
9798 int ret = 0;
9799
9800 @@ -2933,7 +1877,6 @@ struct caam_alg_template {
9801 } template_u;
9802 u32 class1_alg_type;
9803 u32 class2_alg_type;
9804 - u32 alg_op;
9805 };
9806
9807 static struct caam_alg_template driver_algs[] = {
9808 @@ -3118,7 +2061,6 @@ static struct caam_aead_alg driver_aeads
9809 .caam = {
9810 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9811 OP_ALG_AAI_HMAC_PRECOMP,
9812 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9813 },
9814 },
9815 {
9816 @@ -3140,7 +2082,6 @@ static struct caam_aead_alg driver_aeads
9817 .caam = {
9818 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9819 OP_ALG_AAI_HMAC_PRECOMP,
9820 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9821 },
9822 },
9823 {
9824 @@ -3162,7 +2103,6 @@ static struct caam_aead_alg driver_aeads
9825 .caam = {
9826 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9827 OP_ALG_AAI_HMAC_PRECOMP,
9828 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9829 },
9830 },
9831 {
9832 @@ -3184,7 +2124,6 @@ static struct caam_aead_alg driver_aeads
9833 .caam = {
9834 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9835 OP_ALG_AAI_HMAC_PRECOMP,
9836 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9837 },
9838 },
9839 {
9840 @@ -3206,7 +2145,6 @@ static struct caam_aead_alg driver_aeads
9841 .caam = {
9842 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9843 OP_ALG_AAI_HMAC_PRECOMP,
9844 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9845 },
9846 },
9847 {
9848 @@ -3228,7 +2166,6 @@ static struct caam_aead_alg driver_aeads
9849 .caam = {
9850 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9851 OP_ALG_AAI_HMAC_PRECOMP,
9852 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9853 },
9854 },
9855 {
9856 @@ -3250,7 +2187,6 @@ static struct caam_aead_alg driver_aeads
9857 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9858 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9859 OP_ALG_AAI_HMAC_PRECOMP,
9860 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9861 },
9862 },
9863 {
9864 @@ -3273,7 +2209,6 @@ static struct caam_aead_alg driver_aeads
9865 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9866 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9867 OP_ALG_AAI_HMAC_PRECOMP,
9868 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9869 .geniv = true,
9870 },
9871 },
9872 @@ -3296,7 +2231,6 @@ static struct caam_aead_alg driver_aeads
9873 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9874 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9875 OP_ALG_AAI_HMAC_PRECOMP,
9876 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9877 },
9878 },
9879 {
9880 @@ -3319,7 +2253,6 @@ static struct caam_aead_alg driver_aeads
9881 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9882 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9883 OP_ALG_AAI_HMAC_PRECOMP,
9884 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9885 .geniv = true,
9886 },
9887 },
9888 @@ -3342,7 +2275,6 @@ static struct caam_aead_alg driver_aeads
9889 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9890 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9891 OP_ALG_AAI_HMAC_PRECOMP,
9892 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9893 },
9894 },
9895 {
9896 @@ -3365,7 +2297,6 @@ static struct caam_aead_alg driver_aeads
9897 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9898 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9899 OP_ALG_AAI_HMAC_PRECOMP,
9900 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9901 .geniv = true,
9902 },
9903 },
9904 @@ -3388,7 +2319,6 @@ static struct caam_aead_alg driver_aeads
9905 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9906 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9907 OP_ALG_AAI_HMAC_PRECOMP,
9908 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9909 },
9910 },
9911 {
9912 @@ -3411,7 +2341,6 @@ static struct caam_aead_alg driver_aeads
9913 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9914 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9915 OP_ALG_AAI_HMAC_PRECOMP,
9916 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9917 .geniv = true,
9918 },
9919 },
9920 @@ -3434,7 +2363,6 @@ static struct caam_aead_alg driver_aeads
9921 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9922 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9923 OP_ALG_AAI_HMAC_PRECOMP,
9924 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9925 },
9926 },
9927 {
9928 @@ -3457,7 +2385,6 @@ static struct caam_aead_alg driver_aeads
9929 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9930 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9931 OP_ALG_AAI_HMAC_PRECOMP,
9932 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9933 .geniv = true,
9934 },
9935 },
9936 @@ -3480,7 +2407,6 @@ static struct caam_aead_alg driver_aeads
9937 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9938 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9939 OP_ALG_AAI_HMAC_PRECOMP,
9940 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9941 },
9942 },
9943 {
9944 @@ -3503,7 +2429,6 @@ static struct caam_aead_alg driver_aeads
9945 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9946 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9947 OP_ALG_AAI_HMAC_PRECOMP,
9948 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9949 .geniv = true,
9950 },
9951 },
9952 @@ -3526,7 +2451,6 @@ static struct caam_aead_alg driver_aeads
9953 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9954 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9955 OP_ALG_AAI_HMAC_PRECOMP,
9956 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9957 }
9958 },
9959 {
9960 @@ -3549,7 +2473,6 @@ static struct caam_aead_alg driver_aeads
9961 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9962 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9963 OP_ALG_AAI_HMAC_PRECOMP,
9964 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9965 .geniv = true,
9966 }
9967 },
9968 @@ -3573,7 +2496,6 @@ static struct caam_aead_alg driver_aeads
9969 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9970 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9971 OP_ALG_AAI_HMAC_PRECOMP,
9972 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9973 },
9974 },
9975 {
9976 @@ -3597,7 +2519,6 @@ static struct caam_aead_alg driver_aeads
9977 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9978 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9979 OP_ALG_AAI_HMAC_PRECOMP,
9980 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9981 .geniv = true,
9982 },
9983 },
9984 @@ -3621,7 +2542,6 @@ static struct caam_aead_alg driver_aeads
9985 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9986 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9987 OP_ALG_AAI_HMAC_PRECOMP,
9988 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9989 },
9990 },
9991 {
9992 @@ -3645,7 +2565,6 @@ static struct caam_aead_alg driver_aeads
9993 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9994 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9995 OP_ALG_AAI_HMAC_PRECOMP,
9996 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9997 .geniv = true,
9998 },
9999 },
10000 @@ -3669,7 +2588,6 @@ static struct caam_aead_alg driver_aeads
10001 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10002 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10003 OP_ALG_AAI_HMAC_PRECOMP,
10004 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10005 },
10006 },
10007 {
10008 @@ -3693,7 +2611,6 @@ static struct caam_aead_alg driver_aeads
10009 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10010 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10011 OP_ALG_AAI_HMAC_PRECOMP,
10012 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10013 .geniv = true,
10014 },
10015 },
10016 @@ -3717,7 +2634,6 @@ static struct caam_aead_alg driver_aeads
10017 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10018 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10019 OP_ALG_AAI_HMAC_PRECOMP,
10020 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10021 },
10022 },
10023 {
10024 @@ -3741,7 +2657,6 @@ static struct caam_aead_alg driver_aeads
10025 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10026 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10027 OP_ALG_AAI_HMAC_PRECOMP,
10028 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10029 .geniv = true,
10030 },
10031 },
10032 @@ -3765,7 +2680,6 @@ static struct caam_aead_alg driver_aeads
10033 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10034 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10035 OP_ALG_AAI_HMAC_PRECOMP,
10036 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10037 },
10038 },
10039 {
10040 @@ -3789,7 +2703,6 @@ static struct caam_aead_alg driver_aeads
10041 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10042 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10043 OP_ALG_AAI_HMAC_PRECOMP,
10044 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10045 .geniv = true,
10046 },
10047 },
10048 @@ -3812,7 +2725,6 @@ static struct caam_aead_alg driver_aeads
10049 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10050 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10051 OP_ALG_AAI_HMAC_PRECOMP,
10052 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10053 },
10054 },
10055 {
10056 @@ -3835,7 +2747,6 @@ static struct caam_aead_alg driver_aeads
10057 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10058 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10059 OP_ALG_AAI_HMAC_PRECOMP,
10060 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10061 .geniv = true,
10062 },
10063 },
10064 @@ -3858,7 +2769,6 @@ static struct caam_aead_alg driver_aeads
10065 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10066 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10067 OP_ALG_AAI_HMAC_PRECOMP,
10068 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10069 },
10070 },
10071 {
10072 @@ -3881,7 +2791,6 @@ static struct caam_aead_alg driver_aeads
10073 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10074 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10075 OP_ALG_AAI_HMAC_PRECOMP,
10076 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10077 .geniv = true,
10078 },
10079 },
10080 @@ -3904,7 +2813,6 @@ static struct caam_aead_alg driver_aeads
10081 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10082 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10083 OP_ALG_AAI_HMAC_PRECOMP,
10084 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10085 },
10086 },
10087 {
10088 @@ -3927,7 +2835,6 @@ static struct caam_aead_alg driver_aeads
10089 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10090 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10091 OP_ALG_AAI_HMAC_PRECOMP,
10092 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10093 .geniv = true,
10094 },
10095 },
10096 @@ -3950,7 +2857,6 @@ static struct caam_aead_alg driver_aeads
10097 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10098 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10099 OP_ALG_AAI_HMAC_PRECOMP,
10100 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10101 },
10102 },
10103 {
10104 @@ -3973,7 +2879,6 @@ static struct caam_aead_alg driver_aeads
10105 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10106 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10107 OP_ALG_AAI_HMAC_PRECOMP,
10108 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10109 .geniv = true,
10110 },
10111 },
10112 @@ -3996,7 +2901,6 @@ static struct caam_aead_alg driver_aeads
10113 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10114 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10115 OP_ALG_AAI_HMAC_PRECOMP,
10116 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10117 },
10118 },
10119 {
10120 @@ -4019,7 +2923,6 @@ static struct caam_aead_alg driver_aeads
10121 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10122 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10123 OP_ALG_AAI_HMAC_PRECOMP,
10124 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10125 .geniv = true,
10126 },
10127 },
10128 @@ -4042,7 +2945,6 @@ static struct caam_aead_alg driver_aeads
10129 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10130 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10131 OP_ALG_AAI_HMAC_PRECOMP,
10132 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10133 },
10134 },
10135 {
10136 @@ -4065,7 +2967,6 @@ static struct caam_aead_alg driver_aeads
10137 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10138 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10139 OP_ALG_AAI_HMAC_PRECOMP,
10140 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10141 .geniv = true,
10142 },
10143 },
10144 @@ -4090,7 +2991,6 @@ static struct caam_aead_alg driver_aeads
10145 OP_ALG_AAI_CTR_MOD128,
10146 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10147 OP_ALG_AAI_HMAC_PRECOMP,
10148 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10149 .rfc3686 = true,
10150 },
10151 },
10152 @@ -4115,7 +3015,6 @@ static struct caam_aead_alg driver_aeads
10153 OP_ALG_AAI_CTR_MOD128,
10154 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10155 OP_ALG_AAI_HMAC_PRECOMP,
10156 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10157 .rfc3686 = true,
10158 .geniv = true,
10159 },
10160 @@ -4141,7 +3040,6 @@ static struct caam_aead_alg driver_aeads
10161 OP_ALG_AAI_CTR_MOD128,
10162 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10163 OP_ALG_AAI_HMAC_PRECOMP,
10164 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10165 .rfc3686 = true,
10166 },
10167 },
10168 @@ -4166,7 +3064,6 @@ static struct caam_aead_alg driver_aeads
10169 OP_ALG_AAI_CTR_MOD128,
10170 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10171 OP_ALG_AAI_HMAC_PRECOMP,
10172 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10173 .rfc3686 = true,
10174 .geniv = true,
10175 },
10176 @@ -4192,7 +3089,6 @@ static struct caam_aead_alg driver_aeads
10177 OP_ALG_AAI_CTR_MOD128,
10178 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10179 OP_ALG_AAI_HMAC_PRECOMP,
10180 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10181 .rfc3686 = true,
10182 },
10183 },
10184 @@ -4217,7 +3113,6 @@ static struct caam_aead_alg driver_aeads
10185 OP_ALG_AAI_CTR_MOD128,
10186 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10187 OP_ALG_AAI_HMAC_PRECOMP,
10188 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10189 .rfc3686 = true,
10190 .geniv = true,
10191 },
10192 @@ -4243,7 +3138,6 @@ static struct caam_aead_alg driver_aeads
10193 OP_ALG_AAI_CTR_MOD128,
10194 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10195 OP_ALG_AAI_HMAC_PRECOMP,
10196 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10197 .rfc3686 = true,
10198 },
10199 },
10200 @@ -4268,7 +3162,6 @@ static struct caam_aead_alg driver_aeads
10201 OP_ALG_AAI_CTR_MOD128,
10202 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10203 OP_ALG_AAI_HMAC_PRECOMP,
10204 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10205 .rfc3686 = true,
10206 .geniv = true,
10207 },
10208 @@ -4294,7 +3187,6 @@ static struct caam_aead_alg driver_aeads
10209 OP_ALG_AAI_CTR_MOD128,
10210 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10211 OP_ALG_AAI_HMAC_PRECOMP,
10212 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10213 .rfc3686 = true,
10214 },
10215 },
10216 @@ -4319,7 +3211,6 @@ static struct caam_aead_alg driver_aeads
10217 OP_ALG_AAI_CTR_MOD128,
10218 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10219 OP_ALG_AAI_HMAC_PRECOMP,
10220 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10221 .rfc3686 = true,
10222 .geniv = true,
10223 },
10224 @@ -4345,7 +3236,6 @@ static struct caam_aead_alg driver_aeads
10225 OP_ALG_AAI_CTR_MOD128,
10226 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10227 OP_ALG_AAI_HMAC_PRECOMP,
10228 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10229 .rfc3686 = true,
10230 },
10231 },
10232 @@ -4370,7 +3260,6 @@ static struct caam_aead_alg driver_aeads
10233 OP_ALG_AAI_CTR_MOD128,
10234 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10235 OP_ALG_AAI_HMAC_PRECOMP,
10236 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10237 .rfc3686 = true,
10238 .geniv = true,
10239 },
10240 @@ -4383,18 +3272,44 @@ struct caam_crypto_alg {
10241 struct caam_alg_entry caam;
10242 };
10243
10244 -static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10245 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
10246 + bool uses_dkp)
10247 {
10248 + dma_addr_t dma_addr;
10249 + struct caam_drv_private *priv;
10250 +
10251 ctx->jrdev = caam_jr_alloc();
10252 if (IS_ERR(ctx->jrdev)) {
10253 pr_err("Job Ring Device allocation for transform failed\n");
10254 return PTR_ERR(ctx->jrdev);
10255 }
10256
10257 + priv = dev_get_drvdata(ctx->jrdev->parent);
10258 + if (priv->era >= 6 && uses_dkp)
10259 + ctx->dir = DMA_BIDIRECTIONAL;
10260 + else
10261 + ctx->dir = DMA_TO_DEVICE;
10262 +
10263 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10264 + offsetof(struct caam_ctx,
10265 + sh_desc_enc_dma),
10266 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
10267 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10268 + dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10269 + caam_jr_free(ctx->jrdev);
10270 + return -ENOMEM;
10271 + }
10272 +
10273 + ctx->sh_desc_enc_dma = dma_addr;
10274 + ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10275 + sh_desc_dec);
10276 + ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10277 + sh_desc_givenc);
10278 + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10279 +
10280 /* copy descriptor header template value */
10281 - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10282 - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10283 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10284 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10285 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10286
10287 return 0;
10288 }
10289 @@ -4406,7 +3321,7 @@ static int caam_cra_init(struct crypto_t
10290 container_of(alg, struct caam_crypto_alg, crypto_alg);
10291 struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
10292
10293 - return caam_init_common(ctx, &caam_alg->caam);
10294 + return caam_init_common(ctx, &caam_alg->caam, false);
10295 }
10296
10297 static int caam_aead_init(struct crypto_aead *tfm)
10298 @@ -4416,30 +3331,15 @@ static int caam_aead_init(struct crypto_
10299 container_of(alg, struct caam_aead_alg, aead);
10300 struct caam_ctx *ctx = crypto_aead_ctx(tfm);
10301
10302 - return caam_init_common(ctx, &caam_alg->caam);
10303 + return caam_init_common(ctx, &caam_alg->caam,
10304 + alg->setkey == aead_setkey);
10305 }
10306
10307 static void caam_exit_common(struct caam_ctx *ctx)
10308 {
10309 - if (ctx->sh_desc_enc_dma &&
10310 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10311 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10312 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10313 - if (ctx->sh_desc_dec_dma &&
10314 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10315 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10316 - desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10317 - if (ctx->sh_desc_givenc_dma &&
10318 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10319 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10320 - desc_bytes(ctx->sh_desc_givenc),
10321 - DMA_TO_DEVICE);
10322 - if (ctx->key_dma &&
10323 - !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10324 - dma_unmap_single(ctx->jrdev, ctx->key_dma,
10325 - ctx->enckeylen + ctx->split_key_pad_len,
10326 - DMA_TO_DEVICE);
10327 -
10328 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10329 + offsetof(struct caam_ctx, sh_desc_enc_dma),
10330 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
10331 caam_jr_free(ctx->jrdev);
10332 }
10333
10334 @@ -4515,7 +3415,6 @@ static struct caam_crypto_alg *caam_alg_
10335
10336 t_alg->caam.class1_alg_type = template->class1_alg_type;
10337 t_alg->caam.class2_alg_type = template->class2_alg_type;
10338 - t_alg->caam.alg_op = template->alg_op;
10339
10340 return t_alg;
10341 }
10342 --- /dev/null
10343 +++ b/drivers/crypto/caam/caamalg_desc.c
10344 @@ -0,0 +1,1961 @@
10345 +/*
10346 + * Shared descriptors for aead, ablkcipher algorithms
10347 + *
10348 + * Copyright 2016 NXP
10349 + */
10350 +
10351 +#include "compat.h"
10352 +#include "desc_constr.h"
10353 +#include "caamalg_desc.h"
10354 +
10355 +/*
10356 + * For aead functions, read payload and write payload,
10357 + * both of which are specified in req->src and req->dst
10358 + */
10359 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10360 +{
10361 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10362 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10363 + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10364 +}
10365 +
10366 +/* Set DK bit in class 1 operation if shared */
10367 +static inline void append_dec_op1(u32 *desc, u32 type)
10368 +{
10369 + u32 *jump_cmd, *uncond_jump_cmd;
10370 +
10371 + /* DK bit is valid only for AES */
10372 + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10373 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10374 + OP_ALG_DECRYPT);
10375 + return;
10376 + }
10377 +
10378 + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10379 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10380 + OP_ALG_DECRYPT);
10381 + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10382 + set_jump_tgt_here(desc, jump_cmd);
10383 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10384 + OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10385 + set_jump_tgt_here(desc, uncond_jump_cmd);
10386 +}
10387 +
10388 +/**
10389 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10390 + * (non-protocol) with no (null) encryption.
10391 + * @desc: pointer to buffer used for descriptor construction
10392 + * @adata: pointer to authentication transform definitions.
10393 + * A split key is required for SEC Era < 6; the size of the split key
10394 + * is specified in this case. Valid algorithm values - one of
10395 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10396 + * with OP_ALG_AAI_HMAC_PRECOMP.
10397 + * @icvsize: integrity check value (ICV) size (truncated or full)
10398 + * @era: SEC Era
10399 + */
10400 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10401 + unsigned int icvsize, int era)
10402 +{
10403 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10404 +
10405 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10406 +
10407 + /* Skip if already shared */
10408 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10409 + JUMP_COND_SHRD);
10410 + if (era < 6) {
10411 + if (adata->key_inline)
10412 + append_key_as_imm(desc, adata->key_virt,
10413 + adata->keylen_pad, adata->keylen,
10414 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10415 + KEY_ENC);
10416 + else
10417 + append_key(desc, adata->key_dma, adata->keylen,
10418 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10419 + } else {
10420 + append_proto_dkp(desc, adata);
10421 + }
10422 + set_jump_tgt_here(desc, key_jump_cmd);
10423 +
10424 + /* assoclen + cryptlen = seqinlen */
10425 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10426 +
10427 + /* Prepare to read and write cryptlen + assoclen bytes */
10428 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10429 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10430 +
10431 + /*
10432 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10433 + * thus need to do some magic, i.e. self-patch the descriptor
10434 + * buffer.
10435 + */
10436 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10437 + MOVE_DEST_MATH3 |
10438 + (0x6 << MOVE_LEN_SHIFT));
10439 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10440 + MOVE_DEST_DESCBUF |
10441 + MOVE_WAITCOMP |
10442 + (0x8 << MOVE_LEN_SHIFT));
10443 +
10444 + /* Class 2 operation */
10445 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10446 + OP_ALG_ENCRYPT);
10447 +
10448 + /* Read and write cryptlen bytes */
10449 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10450 +
10451 + set_move_tgt_here(desc, read_move_cmd);
10452 + set_move_tgt_here(desc, write_move_cmd);
10453 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10454 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10455 + MOVE_AUX_LS);
10456 +
10457 + /* Write ICV */
10458 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10459 + LDST_SRCDST_BYTE_CONTEXT);
10460 +
10461 +#ifdef DEBUG
10462 + print_hex_dump(KERN_ERR,
10463 + "aead null enc shdesc@" __stringify(__LINE__)": ",
10464 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10465 +#endif
10466 +}
10467 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10468 +
10469 +/**
10470 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10471 + * (non-protocol) with no (null) decryption.
10472 + * @desc: pointer to buffer used for descriptor construction
10473 + * @adata: pointer to authentication transform definitions.
10474 + * A split key is required for SEC Era < 6; the size of the split key
10475 + * is specified in this case. Valid algorithm values - one of
10476 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10477 + * with OP_ALG_AAI_HMAC_PRECOMP.
10478 + * @icvsize: integrity check value (ICV) size (truncated or full)
10479 + * @era: SEC Era
10480 + */
10481 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10482 + unsigned int icvsize, int era)
10483 +{
10484 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10485 +
10486 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10487 +
10488 + /* Skip if already shared */
10489 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10490 + JUMP_COND_SHRD);
10491 + if (era < 6) {
10492 + if (adata->key_inline)
10493 + append_key_as_imm(desc, adata->key_virt,
10494 + adata->keylen_pad, adata->keylen,
10495 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10496 + KEY_ENC);
10497 + else
10498 + append_key(desc, adata->key_dma, adata->keylen,
10499 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10500 + } else {
10501 + append_proto_dkp(desc, adata);
10502 + }
10503 + set_jump_tgt_here(desc, key_jump_cmd);
10504 +
10505 + /* Class 2 operation */
10506 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10507 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10508 +
10509 + /* assoclen + cryptlen = seqoutlen */
10510 + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10511 +
10512 + /* Prepare to read and write cryptlen + assoclen bytes */
10513 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10514 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10515 +
10516 + /*
10517 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10518 + * thus need to do some magic, i.e. self-patch the descriptor
10519 + * buffer.
10520 + */
10521 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10522 + MOVE_DEST_MATH2 |
10523 + (0x6 << MOVE_LEN_SHIFT));
10524 + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10525 + MOVE_DEST_DESCBUF |
10526 + MOVE_WAITCOMP |
10527 + (0x8 << MOVE_LEN_SHIFT));
10528 +
10529 + /* Read and write cryptlen bytes */
10530 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10531 +
10532 + /*
10533 + * Insert a NOP here, since we need at least 4 instructions between
10534 + * code patching the descriptor buffer and the location being patched.
10535 + */
10536 + jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10537 + set_jump_tgt_here(desc, jump_cmd);
10538 +
10539 + set_move_tgt_here(desc, read_move_cmd);
10540 + set_move_tgt_here(desc, write_move_cmd);
10541 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10542 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10543 + MOVE_AUX_LS);
10544 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10545 +
10546 + /* Load ICV */
10547 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10548 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10549 +
10550 +#ifdef DEBUG
10551 + print_hex_dump(KERN_ERR,
10552 + "aead null dec shdesc@" __stringify(__LINE__)": ",
10553 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10554 +#endif
10555 +}
10556 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10557 +
10558 +static void init_sh_desc_key_aead(u32 * const desc,
10559 + struct alginfo * const cdata,
10560 + struct alginfo * const adata,
10561 + const bool is_rfc3686, u32 *nonce, int era)
10562 +{
10563 + u32 *key_jump_cmd;
10564 + unsigned int enckeylen = cdata->keylen;
10565 +
10566 + /* Note: Context registers are saved. */
10567 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10568 +
10569 + /* Skip if already shared */
10570 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10571 + JUMP_COND_SHRD);
10572 +
10573 + /*
10574 + * RFC3686 specific:
10575 + * | key = {AUTH_KEY, ENC_KEY, NONCE}
10576 + * | enckeylen = encryption key size + nonce size
10577 + */
10578 + if (is_rfc3686)
10579 + enckeylen -= CTR_RFC3686_NONCE_SIZE;
10580 +
10581 + if (era < 6) {
10582 + if (adata->key_inline)
10583 + append_key_as_imm(desc, adata->key_virt,
10584 + adata->keylen_pad, adata->keylen,
10585 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10586 + KEY_ENC);
10587 + else
10588 + append_key(desc, adata->key_dma, adata->keylen,
10589 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10590 + } else {
10591 + append_proto_dkp(desc, adata);
10592 + }
10593 +
10594 + if (cdata->key_inline)
10595 + append_key_as_imm(desc, cdata->key_virt, enckeylen,
10596 + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10597 + else
10598 + append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10599 + KEY_DEST_CLASS_REG);
10600 +
10601 + /* Load Counter into CONTEXT1 reg */
10602 + if (is_rfc3686) {
10603 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10604 + LDST_CLASS_IND_CCB |
10605 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10606 + append_move(desc,
10607 + MOVE_SRC_OUTFIFO |
10608 + MOVE_DEST_CLASS1CTX |
10609 + (16 << MOVE_OFFSET_SHIFT) |
10610 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10611 + }
10612 +
10613 + set_jump_tgt_here(desc, key_jump_cmd);
10614 +}
10615 +
10616 +/**
10617 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10618 + * (non-protocol).
10619 + * @desc: pointer to buffer used for descriptor construction
10620 + * @cdata: pointer to block cipher transform definitions
10621 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10622 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10623 + * @adata: pointer to authentication transform definitions.
10624 + * A split key is required for SEC Era < 6; the size of the split key
10625 + * is specified in this case. Valid algorithm values - one of
10626 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10627 + * with OP_ALG_AAI_HMAC_PRECOMP.
10628 + * @ivsize: initialization vector size
10629 + * @icvsize: integrity check value (ICV) size (truncated or full)
10630 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10631 + * @nonce: pointer to rfc3686 nonce
10632 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10633 + * @is_qi: true when called from caam/qi
10634 + * @era: SEC Era
10635 + */
10636 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10637 + struct alginfo *adata, unsigned int ivsize,
10638 + unsigned int icvsize, const bool is_rfc3686,
10639 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
10640 + int era)
10641 +{
10642 + /* Note: Context registers are saved. */
10643 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10644 +
10645 + /* Class 2 operation */
10646 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10647 + OP_ALG_ENCRYPT);
10648 +
10649 + if (is_qi) {
10650 + u32 *wait_load_cmd;
10651 +
10652 + /* REG3 = assoclen */
10653 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10654 + LDST_SRCDST_WORD_DECO_MATH3 |
10655 + (4 << LDST_OFFSET_SHIFT));
10656 +
10657 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10658 + JUMP_COND_CALM | JUMP_COND_NCP |
10659 + JUMP_COND_NOP | JUMP_COND_NIP |
10660 + JUMP_COND_NIFP);
10661 + set_jump_tgt_here(desc, wait_load_cmd);
10662 +
10663 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10664 + LDST_SRCDST_BYTE_CONTEXT |
10665 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10666 + }
10667 +
10668 + /* Read and write assoclen bytes */
10669 + if (is_qi || era < 3) {
10670 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10671 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10672 + } else {
10673 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10674 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10675 + }
10676 +
10677 + /* Skip assoc data */
10678 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10679 +
10680 + /* read assoc before reading payload */
10681 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10682 + FIFOLDST_VLF);
10683 +
10684 + /* Load Counter into CONTEXT1 reg */
10685 + if (is_rfc3686)
10686 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10687 + LDST_SRCDST_BYTE_CONTEXT |
10688 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10689 + LDST_OFFSET_SHIFT));
10690 +
10691 + /* Class 1 operation */
10692 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10693 + OP_ALG_ENCRYPT);
10694 +
10695 + /* Read and write cryptlen bytes */
10696 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10697 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10698 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10699 +
10700 + /* Write ICV */
10701 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10702 + LDST_SRCDST_BYTE_CONTEXT);
10703 +
10704 +#ifdef DEBUG
10705 + print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10706 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10707 +#endif
10708 +}
10709 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10710 +
10711 +/**
10712 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10713 + * (non-protocol).
10714 + * @desc: pointer to buffer used for descriptor construction
10715 + * @cdata: pointer to block cipher transform definitions
10716 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10717 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10718 + * @adata: pointer to authentication transform definitions.
10719 + * A split key is required for SEC Era < 6; the size of the split key
10720 + * is specified in this case. Valid algorithm values - one of
10721 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10722 + * with OP_ALG_AAI_HMAC_PRECOMP.
10723 + * @ivsize: initialization vector size
10724 + * @icvsize: integrity check value (ICV) size (truncated or full)
10725 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10726 + * @nonce: pointer to rfc3686 nonce
10727 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10728 + * @is_qi: true when called from caam/qi
10729 + * @era: SEC Era
10730 + */
10731 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10732 + struct alginfo *adata, unsigned int ivsize,
10733 + unsigned int icvsize, const bool geniv,
10734 + const bool is_rfc3686, u32 *nonce,
10735 + const u32 ctx1_iv_off, const bool is_qi, int era)
10736 +{
10737 + /* Note: Context registers are saved. */
10738 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10739 +
10740 + /* Class 2 operation */
10741 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10742 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10743 +
10744 + if (is_qi) {
10745 + u32 *wait_load_cmd;
10746 +
10747 + /* REG3 = assoclen */
10748 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10749 + LDST_SRCDST_WORD_DECO_MATH3 |
10750 + (4 << LDST_OFFSET_SHIFT));
10751 +
10752 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10753 + JUMP_COND_CALM | JUMP_COND_NCP |
10754 + JUMP_COND_NOP | JUMP_COND_NIP |
10755 + JUMP_COND_NIFP);
10756 + set_jump_tgt_here(desc, wait_load_cmd);
10757 +
10758 + if (!geniv)
10759 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10760 + LDST_SRCDST_BYTE_CONTEXT |
10761 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10762 + }
10763 +
10764 + /* Read and write assoclen bytes */
10765 + if (is_qi || era < 3) {
10766 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10767 + if (geniv)
10768 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
10769 + ivsize);
10770 + else
10771 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
10772 + CAAM_CMD_SZ);
10773 + } else {
10774 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10775 + if (geniv)
10776 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
10777 + ivsize);
10778 + else
10779 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
10780 + CAAM_CMD_SZ);
10781 + }
10782 +
10783 + /* Skip assoc data */
10784 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10785 +
10786 + /* read assoc before reading payload */
10787 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10788 + KEY_VLF);
10789 +
10790 + if (geniv) {
10791 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10792 + LDST_SRCDST_BYTE_CONTEXT |
10793 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10794 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10795 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10796 + }
10797 +
10798 + /* Load Counter into CONTEXT1 reg */
10799 + if (is_rfc3686)
10800 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10801 + LDST_SRCDST_BYTE_CONTEXT |
10802 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10803 + LDST_OFFSET_SHIFT));
10804 +
10805 + /* Choose operation */
10806 + if (ctx1_iv_off)
10807 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10808 + OP_ALG_DECRYPT);
10809 + else
10810 + append_dec_op1(desc, cdata->algtype);
10811 +
10812 + /* Read and write cryptlen bytes */
10813 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10814 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10815 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10816 +
10817 + /* Load ICV */
10818 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10819 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10820 +
10821 +#ifdef DEBUG
10822 + print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10823 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10824 +#endif
10825 +}
10826 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10827 +
10828 +/**
10829 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10830 + * (non-protocol) with HW-generated initialization
10831 + * vector.
10832 + * @desc: pointer to buffer used for descriptor construction
10833 + * @cdata: pointer to block cipher transform definitions
10834 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10835 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10836 + * @adata: pointer to authentication transform definitions.
10837 + * A split key is required for SEC Era < 6; the size of the split key
10838 + * is specified in this case. Valid algorithm values - one of
10839 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10840 + * with OP_ALG_AAI_HMAC_PRECOMP.
10841 + * @ivsize: initialization vector size
10842 + * @icvsize: integrity check value (ICV) size (truncated or full)
10843 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10844 + * @nonce: pointer to rfc3686 nonce
10845 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10846 + * @is_qi: true when called from caam/qi
10847 + * @era: SEC Era
10848 + */
10849 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10850 + struct alginfo *adata, unsigned int ivsize,
10851 + unsigned int icvsize, const bool is_rfc3686,
10852 + u32 *nonce, const u32 ctx1_iv_off,
10853 + const bool is_qi, int era)
10854 +{
10855 + u32 geniv, moveiv;
10856 +
10857 + /* Note: Context registers are saved. */
10858 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10859 +
10860 + if (is_qi) {
10861 + u32 *wait_load_cmd;
10862 +
10863 + /* REG3 = assoclen */
10864 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10865 + LDST_SRCDST_WORD_DECO_MATH3 |
10866 + (4 << LDST_OFFSET_SHIFT));
10867 +
10868 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10869 + JUMP_COND_CALM | JUMP_COND_NCP |
10870 + JUMP_COND_NOP | JUMP_COND_NIP |
10871 + JUMP_COND_NIFP);
10872 + set_jump_tgt_here(desc, wait_load_cmd);
10873 + }
10874 +
10875 + if (is_rfc3686) {
10876 + if (is_qi)
10877 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10878 + LDST_SRCDST_BYTE_CONTEXT |
10879 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10880 +
10881 + goto copy_iv;
10882 + }
10883 +
10884 + /* Generate IV */
10885 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10886 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10887 + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10888 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10889 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10890 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10891 + append_move(desc, MOVE_WAITCOMP |
10892 + MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10893 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10894 + (ivsize << MOVE_LEN_SHIFT));
10895 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10896 +
10897 +copy_iv:
10898 + /* Copy IV to class 1 context */
10899 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10900 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10901 + (ivsize << MOVE_LEN_SHIFT));
10902 +
10903 + /* Return to encryption */
10904 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10905 + OP_ALG_ENCRYPT);
10906 +
10907 + /* Read and write assoclen bytes */
10908 + if (is_qi || era < 3) {
10909 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10910 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10911 + } else {
10912 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10913 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10914 + }
10915 +
10916 + /* Skip assoc data */
10917 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10918 +
10919 + /* read assoc before reading payload */
10920 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10921 + KEY_VLF);
10922 +
10923 + /* Copy iv from outfifo to class 2 fifo */
10924 + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10925 + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10926 + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10927 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10928 + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10929 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10930 +
10931 + /* Load Counter into CONTEXT1 reg */
10932 + if (is_rfc3686)
10933 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10934 + LDST_SRCDST_BYTE_CONTEXT |
10935 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10936 + LDST_OFFSET_SHIFT));
10937 +
10938 + /* Class 1 operation */
10939 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10940 + OP_ALG_ENCRYPT);
10941 +
10942 + /* Will write ivsize + cryptlen */
10943 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10944 +
10945 + /* Not need to reload iv */
10946 + append_seq_fifo_load(desc, ivsize,
10947 + FIFOLD_CLASS_SKIP);
10948 +
10949 + /* Will read cryptlen */
10950 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10951 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10952 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10953 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10954 +
10955 + /* Write ICV */
10956 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10957 + LDST_SRCDST_BYTE_CONTEXT);
10958 +
10959 +#ifdef DEBUG
10960 + print_hex_dump(KERN_ERR,
10961 + "aead givenc shdesc@" __stringify(__LINE__)": ",
10962 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10963 +#endif
10964 +}
10965 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10966 +
10967 +/**
10968 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10969 + * @desc: pointer to buffer used for descriptor construction
10970 + * @cdata: pointer to block cipher transform definitions
10971 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10972 + * with OP_ALG_AAI_CBC
10973 + * @adata: pointer to authentication transform definitions.
10974 + * A split key is required for SEC Era < 6; the size of the split key
10975 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
10976 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10977 + * @assoclen: associated data length
10978 + * @ivsize: initialization vector size
10979 + * @authsize: authentication data size
10980 + * @blocksize: block cipher size
10981 + * @era: SEC Era
10982 + */
10983 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10984 + struct alginfo *adata, unsigned int assoclen,
10985 + unsigned int ivsize, unsigned int authsize,
10986 + unsigned int blocksize, int era)
10987 +{
10988 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
10989 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10990 +
10991 + /*
10992 + * Compute the index (in bytes) for the LOAD with destination of
10993 + * Class 1 Data Size Register and for the LOAD that generates padding
10994 + */
10995 + if (adata->key_inline) {
10996 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10997 + cdata->keylen - 4 * CAAM_CMD_SZ;
10998 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10999 + cdata->keylen - 2 * CAAM_CMD_SZ;
11000 + } else {
11001 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
11002 + 4 * CAAM_CMD_SZ;
11003 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
11004 + 2 * CAAM_CMD_SZ;
11005 + }
11006 +
11007 + stidx = 1 << HDR_START_IDX_SHIFT;
11008 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
11009 +
11010 + /* skip key loading if they are loaded due to sharing */
11011 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11012 + JUMP_COND_SHRD);
11013 +
11014 + if (era < 6) {
11015 + if (adata->key_inline)
11016 + append_key_as_imm(desc, adata->key_virt,
11017 + adata->keylen_pad, adata->keylen,
11018 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
11019 + KEY_ENC);
11020 + else
11021 + append_key(desc, adata->key_dma, adata->keylen,
11022 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
11023 + } else {
11024 + append_proto_dkp(desc, adata);
11025 + }
11026 +
11027 + if (cdata->key_inline)
11028 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11029 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11030 + else
11031 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11032 + KEY_DEST_CLASS_REG);
11033 +
11034 + set_jump_tgt_here(desc, key_jump_cmd);
11035 +
11036 + /* class 2 operation */
11037 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11038 + OP_ALG_ENCRYPT);
11039 + /* class 1 operation */
11040 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11041 + OP_ALG_ENCRYPT);
11042 +
11043 + /* payloadlen = input data length - (assoclen + ivlen) */
11044 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
11045 +
11046 + /* math1 = payloadlen + icvlen */
11047 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
11048 +
11049 + /* padlen = block_size - math1 % block_size */
11050 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
11051 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
11052 +
11053 + /* cryptlen = payloadlen + icvlen + padlen */
11054 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
11055 +
11056 + /*
11057 + * update immediate data with the padding length value
11058 + * for the LOAD in the class 1 data size register.
11059 + */
11060 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
11061 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
11062 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
11063 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
11064 +
11065 + /* overwrite PL field for the padding iNFO FIFO entry */
11066 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
11067 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
11068 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
11069 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
11070 +
11071 + /* store encrypted payload, icv and padding */
11072 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11073 +
11074 + /* if payload length is zero, jump to zero-payload commands */
11075 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
11076 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11077 + JUMP_COND_MATH_Z);
11078 +
11079 + /* load iv in context1 */
11080 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11081 + LDST_CLASS_1_CCB | ivsize);
11082 +
11083 + /* read assoc for authentication */
11084 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
11085 + FIFOLD_TYPE_MSG);
11086 + /* insnoop payload */
11087 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
11088 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
11089 +
11090 + /* jump the zero-payload commands */
11091 + append_jump(desc, JUMP_TEST_ALL | 3);
11092 +
11093 + /* zero-payload commands */
11094 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11095 +
11096 + /* load iv in context1 */
11097 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11098 + LDST_CLASS_1_CCB | ivsize);
11099 +
11100 + /* assoc data is the only data for authentication */
11101 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
11102 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
11103 +
11104 + /* send icv to encryption */
11105 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
11106 + authsize);
11107 +
11108 + /* update class 1 data size register with padding length */
11109 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
11110 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
11111 +
11112 + /* generate padding and send it to encryption */
11113 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
11114 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
11115 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
11116 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
11117 +
11118 +#ifdef DEBUG
11119 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
11120 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11121 + desc_bytes(desc), 1);
11122 +#endif
11123 +}
11124 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
11125 +
11126 +/**
11127 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
11128 + * @desc: pointer to buffer used for descriptor construction
11129 + * @cdata: pointer to block cipher transform definitions
11130 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
11131 + * with OP_ALG_AAI_CBC
11132 + * @adata: pointer to authentication transform definitions.
11133 + * A split key is required for SEC Era < 6; the size of the split key
11134 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
11135 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
11136 + * @assoclen: associated data length
11137 + * @ivsize: initialization vector size
11138 + * @authsize: authentication data size
11139 + * @blocksize: block cipher size
11140 + * @era: SEC Era
11141 + */
11142 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
11143 + struct alginfo *adata, unsigned int assoclen,
11144 + unsigned int ivsize, unsigned int authsize,
11145 + unsigned int blocksize, int era)
11146 +{
11147 + u32 stidx, jumpback;
11148 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
11149 + /*
11150 + * Pointer Size bool determines the size of address pointers.
11151 + * false - Pointers fit in one 32-bit word.
11152 + * true - Pointers fit in two 32-bit words.
11153 + */
11154 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
11155 +
11156 + stidx = 1 << HDR_START_IDX_SHIFT;
11157 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
11158 +
11159 + /* skip key loading if they are loaded due to sharing */
11160 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11161 + JUMP_COND_SHRD);
11162 +
11163 + if (era < 6)
11164 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
11165 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
11166 + else
11167 + append_proto_dkp(desc, adata);
11168 +
11169 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11170 + KEY_DEST_CLASS_REG);
11171 +
11172 + set_jump_tgt_here(desc, key_jump_cmd);
11173 +
11174 + /* class 2 operation */
11175 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11176 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11177 + /* class 1 operation */
11178 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11179 + OP_ALG_DECRYPT);
11180 +
11181 + /* VSIL = input data length - 2 * block_size */
11182 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11183 + blocksize);
11184 +
11185 + /*
11186 + * payloadlen + icvlen + padlen = input data length - (assoclen +
11187 + * ivsize)
11188 + */
11189 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11190 +
11191 + /* skip data to the last but one cipher block */
11192 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11193 +
11194 + /* load iv for the last cipher block */
11195 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11196 + LDST_CLASS_1_CCB | ivsize);
11197 +
11198 + /* read last cipher block */
11199 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11200 + FIFOLD_TYPE_LAST1 | blocksize);
11201 +
11202 + /* move decrypted block into math0 and math1 */
11203 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11204 + blocksize);
11205 +
11206 + /* reset AES CHA */
11207 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11208 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11209 +
11210 + /* rewind input sequence */
11211 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11212 +
11213 + /* key1 is in decryption form */
11214 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11215 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11216 +
11217 + /* load iv in context1 */
11218 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11219 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11220 +
11221 + /* read sequence number */
11222 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11223 + /* load Type, Version and Len fields in math0 */
11224 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11225 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11226 +
11227 + /* compute (padlen - 1) */
11228 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11229 +
11230 + /* math2 = icvlen + (padlen - 1) + 1 */
11231 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11232 +
11233 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11234 +
11235 + /* VSOL = payloadlen + icvlen + padlen */
11236 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11237 +
11238 + if (caam_little_end)
11239 + append_moveb(desc, MOVE_WAITCOMP |
11240 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11241 +
11242 + /* update Len field */
11243 + append_math_sub(desc, REG0, REG0, REG2, 8);
11244 +
11245 + /* store decrypted payload, icv and padding */
11246 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11247 +
11248 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11249 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11250 +
11251 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11252 + JUMP_COND_MATH_Z);
11253 +
11254 + /* send Type, Version and Len(pre ICV) fields to authentication */
11255 + append_move(desc, MOVE_WAITCOMP |
11256 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11257 + (3 << MOVE_OFFSET_SHIFT) | 5);
11258 +
11259 + /* outsnooping payload */
11260 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11261 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11262 + FIFOLDST_VLF);
11263 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11264 +
11265 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11266 + /* send Type, Version and Len(pre ICV) fields to authentication */
11267 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11268 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11269 + (3 << MOVE_OFFSET_SHIFT) | 5);
11270 +
11271 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
11272 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11273 +
11274 + /* load icvlen and padlen */
11275 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11276 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11277 +
11278 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11279 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11280 +
11281 + /*
11282 + * Start a new input sequence using the SEQ OUT PTR command options,
11283 + * pointer and length used when the current output sequence was defined.
11284 + */
11285 + if (ps) {
11286 + /*
11287 + * Move the lower 32 bits of Shared Descriptor address, the
11288 + * SEQ OUT PTR command, Output Pointer (2 words) and
11289 + * Output Length into math registers.
11290 + */
11291 + if (caam_little_end)
11292 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11293 + MOVE_DEST_MATH0 |
11294 + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
11295 + else
11296 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11297 + MOVE_DEST_MATH0 |
11298 + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
11299 +
11300 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11301 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
11302 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11303 + /* Append a JUMP command after the copied fields */
11304 + jumpback = CMD_JUMP | (char)-9;
11305 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11306 + LDST_SRCDST_WORD_DECO_MATH2 |
11307 + (4 << LDST_OFFSET_SHIFT));
11308 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11309 + /* Move the updated fields back to the Job Descriptor */
11310 + if (caam_little_end)
11311 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11312 + MOVE_DEST_DESCBUF |
11313 + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
11314 + else
11315 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11316 + MOVE_DEST_DESCBUF |
11317 + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
11318 +
11319 + /*
11320 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11321 + * and then jump back to the next command from the
11322 + * Shared Descriptor.
11323 + */
11324 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11325 + } else {
11326 + /*
11327 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11328 + * Output Length into math registers.
11329 + */
11330 + if (caam_little_end)
11331 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11332 + MOVE_DEST_MATH0 |
11333 + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
11334 + else
11335 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11336 + MOVE_DEST_MATH0 |
11337 + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
11338 +
11339 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11340 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
11341 + ~(((u64)(CMD_SEQ_IN_PTR ^
11342 + CMD_SEQ_OUT_PTR)) << 32));
11343 + /* Append a JUMP command after the copied fields */
11344 + jumpback = CMD_JUMP | (char)-7;
11345 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11346 + LDST_SRCDST_WORD_DECO_MATH1 |
11347 + (4 << LDST_OFFSET_SHIFT));
11348 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11349 + /* Move the updated fields back to the Job Descriptor */
11350 + if (caam_little_end)
11351 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11352 + MOVE_DEST_DESCBUF |
11353 + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
11354 + else
11355 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11356 + MOVE_DEST_DESCBUF |
11357 + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
11358 +
11359 + /*
11360 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11361 + * and then jump back to the next command from the
11362 + * Shared Descriptor.
11363 + */
11364 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11365 + }
11366 +
11367 + /* skip payload */
11368 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11369 + /* check icv */
11370 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11371 + FIFOLD_TYPE_LAST2 | authsize);
11372 +
11373 +#ifdef DEBUG
11374 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11375 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11376 + desc_bytes(desc), 1);
11377 +#endif
11378 +}
11379 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11380 +
11381 +/**
11382 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11383 + * @desc: pointer to buffer used for descriptor construction
11384 + * @cdata: pointer to block cipher transform definitions
11385 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11386 + * @ivsize: initialization vector size
11387 + * @icvsize: integrity check value (ICV) size (truncated or full)
11388 + * @is_qi: true when called from caam/qi
11389 + */
11390 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11391 + unsigned int ivsize, unsigned int icvsize,
11392 + const bool is_qi)
11393 +{
11394 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11395 + *zero_assoc_jump_cmd2;
11396 +
11397 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11398 +
11399 + /* skip key loading if they are loaded due to sharing */
11400 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11401 + JUMP_COND_SHRD);
11402 + if (cdata->key_inline)
11403 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11404 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11405 + else
11406 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11407 + KEY_DEST_CLASS_REG);
11408 + set_jump_tgt_here(desc, key_jump_cmd);
11409 +
11410 + /* class 1 operation */
11411 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11412 + OP_ALG_ENCRYPT);
11413 +
11414 + if (is_qi) {
11415 + u32 *wait_load_cmd;
11416 +
11417 + /* REG3 = assoclen */
11418 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11419 + LDST_SRCDST_WORD_DECO_MATH3 |
11420 + (4 << LDST_OFFSET_SHIFT));
11421 +
11422 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11423 + JUMP_COND_CALM | JUMP_COND_NCP |
11424 + JUMP_COND_NOP | JUMP_COND_NIP |
11425 + JUMP_COND_NIFP);
11426 + set_jump_tgt_here(desc, wait_load_cmd);
11427 +
11428 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11429 + ivsize);
11430 + } else {
11431 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11432 + CAAM_CMD_SZ);
11433 + }
11434 +
11435 + /* if assoclen + cryptlen is ZERO, skip to ICV write */
11436 + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11437 + JUMP_COND_MATH_Z);
11438 +
11439 + if (is_qi)
11440 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11441 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11442 +
11443 + /* if assoclen is ZERO, skip reading the assoc data */
11444 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11445 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11446 + JUMP_COND_MATH_Z);
11447 +
11448 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11449 +
11450 + /* skip assoc data */
11451 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11452 +
11453 + /* cryptlen = seqinlen - assoclen */
11454 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11455 +
11456 + /* if cryptlen is ZERO jump to zero-payload commands */
11457 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11458 + JUMP_COND_MATH_Z);
11459 +
11460 + /* read assoc data */
11461 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11462 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11463 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11464 +
11465 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11466 +
11467 + /* write encrypted data */
11468 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11469 +
11470 + /* read payload data */
11471 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11472 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11473 +
11474 + /* jump to ICV writing */
11475 + if (is_qi)
11476 + append_jump(desc, JUMP_TEST_ALL | 4);
11477 + else
11478 + append_jump(desc, JUMP_TEST_ALL | 2);
11479 +
11480 + /* zero-payload commands */
11481 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11482 +
11483 + /* read assoc data */
11484 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11485 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11486 + if (is_qi)
11487 + /* jump to ICV writing */
11488 + append_jump(desc, JUMP_TEST_ALL | 2);
11489 +
11490 + /* There is no input data */
11491 + set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11492 +
11493 + if (is_qi)
11494 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11495 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11496 + FIFOLD_TYPE_LAST1);
11497 +
11498 + /* write ICV */
11499 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11500 + LDST_SRCDST_BYTE_CONTEXT);
11501 +
11502 +#ifdef DEBUG
11503 + print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11504 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11505 +#endif
11506 +}
11507 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11508 +
11509 +/**
11510 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11511 + * @desc: pointer to buffer used for descriptor construction
11512 + * @cdata: pointer to block cipher transform definitions
11513 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11514 + * @ivsize: initialization vector size
11515 + * @icvsize: integrity check value (ICV) size (truncated or full)
11516 + * @is_qi: true when called from caam/qi
11517 + */
11518 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11519 + unsigned int ivsize, unsigned int icvsize,
11520 + const bool is_qi)
11521 +{
11522 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11523 +
11524 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11525 +
11526 + /* skip key loading if they are loaded due to sharing */
11527 + key_jump_cmd = append_jump(desc, JUMP_JSL |
11528 + JUMP_TEST_ALL | JUMP_COND_SHRD);
11529 + if (cdata->key_inline)
11530 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11531 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11532 + else
11533 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11534 + KEY_DEST_CLASS_REG);
11535 + set_jump_tgt_here(desc, key_jump_cmd);
11536 +
11537 + /* class 1 operation */
11538 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11539 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11540 +
11541 + if (is_qi) {
11542 + u32 *wait_load_cmd;
11543 +
11544 + /* REG3 = assoclen */
11545 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11546 + LDST_SRCDST_WORD_DECO_MATH3 |
11547 + (4 << LDST_OFFSET_SHIFT));
11548 +
11549 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11550 + JUMP_COND_CALM | JUMP_COND_NCP |
11551 + JUMP_COND_NOP | JUMP_COND_NIP |
11552 + JUMP_COND_NIFP);
11553 + set_jump_tgt_here(desc, wait_load_cmd);
11554 +
11555 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11556 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11557 + }
11558 +
11559 + /* if assoclen is ZERO, skip reading the assoc data */
11560 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11561 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11562 + JUMP_COND_MATH_Z);
11563 +
11564 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11565 +
11566 + /* skip assoc data */
11567 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11568 +
11569 + /* read assoc data */
11570 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11571 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11572 +
11573 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11574 +
11575 + /* cryptlen = seqoutlen - assoclen */
11576 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11577 +
11578 + /* jump to zero-payload command if cryptlen is zero */
11579 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11580 + JUMP_COND_MATH_Z);
11581 +
11582 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11583 +
11584 + /* store encrypted data */
11585 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11586 +
11587 + /* read payload data */
11588 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11589 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11590 +
11591 + /* zero-payload command */
11592 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11593 +
11594 + /* read ICV */
11595 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11596 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11597 +
11598 +#ifdef DEBUG
11599 + print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11600 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11601 +#endif
11602 +}
11603 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11604 +
11605 +/**
11606 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11607 + * (non-protocol).
11608 + * @desc: pointer to buffer used for descriptor construction
11609 + * @cdata: pointer to block cipher transform definitions
11610 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11611 + * @ivsize: initialization vector size
11612 + * @icvsize: integrity check value (ICV) size (truncated or full)
11613 + * @is_qi: true when called from caam/qi
11614 + */
11615 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11616 + unsigned int ivsize, unsigned int icvsize,
11617 + const bool is_qi)
11618 +{
11619 + u32 *key_jump_cmd;
11620 +
11621 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11622 +
11623 + /* Skip key loading if it is loaded due to sharing */
11624 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11625 + JUMP_COND_SHRD);
11626 + if (cdata->key_inline)
11627 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11628 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11629 + else
11630 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11631 + KEY_DEST_CLASS_REG);
11632 + set_jump_tgt_here(desc, key_jump_cmd);
11633 +
11634 + /* Class 1 operation */
11635 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11636 + OP_ALG_ENCRYPT);
11637 +
11638 + if (is_qi) {
11639 + u32 *wait_load_cmd;
11640 +
11641 + /* REG3 = assoclen */
11642 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11643 + LDST_SRCDST_WORD_DECO_MATH3 |
11644 + (4 << LDST_OFFSET_SHIFT));
11645 +
11646 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11647 + JUMP_COND_CALM | JUMP_COND_NCP |
11648 + JUMP_COND_NOP | JUMP_COND_NIP |
11649 + JUMP_COND_NIFP);
11650 + set_jump_tgt_here(desc, wait_load_cmd);
11651 +
11652 + /* Read salt and IV */
11653 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11654 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11655 + FIFOLD_TYPE_IV);
11656 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11657 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11658 + }
11659 +
11660 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11661 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11662 +
11663 + /* Read assoc data */
11664 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11665 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11666 +
11667 + /* Skip IV */
11668 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11669 +
11670 + /* Will read cryptlen bytes */
11671 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11672 +
11673 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11674 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11675 +
11676 + /* Skip assoc data */
11677 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11678 +
11679 + /* cryptlen = seqoutlen - assoclen */
11680 + append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11681 +
11682 + /* Write encrypted data */
11683 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11684 +
11685 + /* Read payload data */
11686 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11687 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11688 +
11689 + /* Write ICV */
11690 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11691 + LDST_SRCDST_BYTE_CONTEXT);
11692 +
11693 +#ifdef DEBUG
11694 + print_hex_dump(KERN_ERR,
11695 + "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11696 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11697 +#endif
11698 +}
11699 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11700 +
11701 +/**
11702 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11703 + * (non-protocol).
11704 + * @desc: pointer to buffer used for descriptor construction
11705 + * @cdata: pointer to block cipher transform definitions
11706 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11707 + * @ivsize: initialization vector size
11708 + * @icvsize: integrity check value (ICV) size (truncated or full)
11709 + * @is_qi: true when called from caam/qi
11710 + */
11711 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11712 + unsigned int ivsize, unsigned int icvsize,
11713 + const bool is_qi)
11714 +{
11715 + u32 *key_jump_cmd;
11716 +
11717 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11718 +
11719 + /* Skip key loading if it is loaded due to sharing */
11720 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11721 + JUMP_COND_SHRD);
11722 + if (cdata->key_inline)
11723 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11724 + cdata->keylen, CLASS_1 |
11725 + KEY_DEST_CLASS_REG);
11726 + else
11727 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11728 + KEY_DEST_CLASS_REG);
11729 + set_jump_tgt_here(desc, key_jump_cmd);
11730 +
11731 + /* Class 1 operation */
11732 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11733 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11734 +
11735 + if (is_qi) {
11736 + u32 *wait_load_cmd;
11737 +
11738 + /* REG3 = assoclen */
11739 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11740 + LDST_SRCDST_WORD_DECO_MATH3 |
11741 + (4 << LDST_OFFSET_SHIFT));
11742 +
11743 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11744 + JUMP_COND_CALM | JUMP_COND_NCP |
11745 + JUMP_COND_NOP | JUMP_COND_NIP |
11746 + JUMP_COND_NIFP);
11747 + set_jump_tgt_here(desc, wait_load_cmd);
11748 +
11749 + /* Read salt and IV */
11750 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11751 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11752 + FIFOLD_TYPE_IV);
11753 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11754 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11755 + }
11756 +
11757 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11758 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11759 +
11760 + /* Read assoc data */
11761 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11762 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11763 +
11764 + /* Skip IV */
11765 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11766 +
11767 + /* Will read cryptlen bytes */
11768 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11769 +
11770 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11771 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11772 +
11773 + /* Skip assoc data */
11774 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11775 +
11776 + /* Will write cryptlen bytes */
11777 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11778 +
11779 + /* Store payload data */
11780 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11781 +
11782 + /* Read encrypted data */
11783 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11784 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11785 +
11786 + /* Read ICV */
11787 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11788 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11789 +
11790 +#ifdef DEBUG
11791 + print_hex_dump(KERN_ERR,
11792 + "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11793 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11794 +#endif
11795 +}
11796 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11797 +
11798 +/**
11799 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11800 + * (non-protocol).
11801 + * @desc: pointer to buffer used for descriptor construction
11802 + * @cdata: pointer to block cipher transform definitions
11803 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11804 + * @ivsize: initialization vector size
11805 + * @icvsize: integrity check value (ICV) size (truncated or full)
11806 + * @is_qi: true when called from caam/qi
11807 + */
11808 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11809 + unsigned int ivsize, unsigned int icvsize,
11810 + const bool is_qi)
11811 +{
11812 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11813 +
11814 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11815 +
11816 + /* Skip key loading if it is loaded due to sharing */
11817 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11818 + JUMP_COND_SHRD);
11819 + if (cdata->key_inline)
11820 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11821 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11822 + else
11823 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11824 + KEY_DEST_CLASS_REG);
11825 + set_jump_tgt_here(desc, key_jump_cmd);
11826 +
11827 + /* Class 1 operation */
11828 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11829 + OP_ALG_ENCRYPT);
11830 +
11831 + if (is_qi) {
11832 + /* assoclen is not needed, skip it */
11833 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11834 +
11835 + /* Read salt and IV */
11836 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11837 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11838 + FIFOLD_TYPE_IV);
11839 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11840 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11841 + }
11842 +
11843 + /* assoclen + cryptlen = seqinlen */
11844 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11845 +
11846 + /*
11847 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11848 + * thus need to do some magic, i.e. self-patch the descriptor
11849 + * buffer.
11850 + */
11851 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11852 + (0x6 << MOVE_LEN_SHIFT));
11853 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11854 + (0x8 << MOVE_LEN_SHIFT));
11855 +
11856 + /* Will read assoclen + cryptlen bytes */
11857 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11858 +
11859 + /* Will write assoclen + cryptlen bytes */
11860 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11861 +
11862 + /* Read and write assoclen + cryptlen bytes */
11863 + aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11864 +
11865 + set_move_tgt_here(desc, read_move_cmd);
11866 + set_move_tgt_here(desc, write_move_cmd);
11867 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11868 + /* Move payload data to OFIFO */
11869 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11870 +
11871 + /* Write ICV */
11872 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11873 + LDST_SRCDST_BYTE_CONTEXT);
11874 +
11875 +#ifdef DEBUG
11876 + print_hex_dump(KERN_ERR,
11877 + "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11878 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11879 +#endif
11880 +}
11881 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11882 +
11883 +/**
11884 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11885 + * (non-protocol).
11886 + * @desc: pointer to buffer used for descriptor construction
11887 + * @cdata: pointer to block cipher transform definitions
11888 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11889 + * @ivsize: initialization vector size
11890 + * @icvsize: integrity check value (ICV) size (truncated or full)
11891 + * @is_qi: true when called from caam/qi
11892 + */
11893 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11894 + unsigned int ivsize, unsigned int icvsize,
11895 + const bool is_qi)
11896 +{
11897 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11898 +
11899 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11900 +
11901 + /* Skip key loading if it is loaded due to sharing */
11902 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11903 + JUMP_COND_SHRD);
11904 + if (cdata->key_inline)
11905 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11906 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11907 + else
11908 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11909 + KEY_DEST_CLASS_REG);
11910 + set_jump_tgt_here(desc, key_jump_cmd);
11911 +
11912 + /* Class 1 operation */
11913 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11914 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11915 +
11916 + if (is_qi) {
11917 + /* assoclen is not needed, skip it */
11918 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11919 +
11920 + /* Read salt and IV */
11921 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11922 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11923 + FIFOLD_TYPE_IV);
11924 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11925 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11926 + }
11927 +
11928 + /* assoclen + cryptlen = seqoutlen */
11929 + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11930 +
11931 + /*
11932 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11933 + * thus need to do some magic, i.e. self-patch the descriptor
11934 + * buffer.
11935 + */
11936 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11937 + (0x6 << MOVE_LEN_SHIFT));
11938 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11939 + (0x8 << MOVE_LEN_SHIFT));
11940 +
11941 + /* Will read assoclen + cryptlen bytes */
11942 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11943 +
11944 + /* Will write assoclen + cryptlen bytes */
11945 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11946 +
11947 + /* Store payload data */
11948 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11949 +
11950 + /* In-snoop assoclen + cryptlen data */
11951 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11952 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11953 +
11954 + set_move_tgt_here(desc, read_move_cmd);
11955 + set_move_tgt_here(desc, write_move_cmd);
11956 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11957 + /* Move payload data to OFIFO */
11958 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11959 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11960 +
11961 + /* Read ICV */
11962 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11963 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11964 +
11965 +#ifdef DEBUG
11966 + print_hex_dump(KERN_ERR,
11967 + "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11968 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11969 +#endif
11970 +}
11971 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11972 +
11973 +/*
11974 + * For ablkcipher encrypt and decrypt, read from req->src and
11975 + * write to req->dst
11976 + */
11977 +static inline void ablkcipher_append_src_dst(u32 *desc)
11978 +{
11979 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11980 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11981 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11982 + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11983 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11984 +}
11985 +
11986 +/**
11987 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11988 + * @desc: pointer to buffer used for descriptor construction
11989 + * @cdata: pointer to block cipher transform definitions
11990 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11991 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11992 + * @ivsize: initialization vector size
11993 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11994 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11995 + */
11996 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11997 + unsigned int ivsize, const bool is_rfc3686,
11998 + const u32 ctx1_iv_off)
11999 +{
12000 + u32 *key_jump_cmd;
12001 +
12002 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12003 + /* Skip if already shared */
12004 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12005 + JUMP_COND_SHRD);
12006 +
12007 + /* Load class1 key only */
12008 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12009 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12010 +
12011 + /* Load nonce into CONTEXT1 reg */
12012 + if (is_rfc3686) {
12013 + const u8 *nonce = cdata->key_virt + cdata->keylen;
12014 +
12015 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12016 + LDST_CLASS_IND_CCB |
12017 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12018 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12019 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12020 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12021 + }
12022 +
12023 + set_jump_tgt_here(desc, key_jump_cmd);
12024 +
12025 + /* Load iv */
12026 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12027 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12028 +
12029 + /* Load counter into CONTEXT1 reg */
12030 + if (is_rfc3686)
12031 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12032 + LDST_SRCDST_BYTE_CONTEXT |
12033 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12034 + LDST_OFFSET_SHIFT));
12035 +
12036 + /* Load operation */
12037 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12038 + OP_ALG_ENCRYPT);
12039 +
12040 + /* Perform operation */
12041 + ablkcipher_append_src_dst(desc);
12042 +
12043 +#ifdef DEBUG
12044 + print_hex_dump(KERN_ERR,
12045 + "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
12046 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12047 +#endif
12048 +}
12049 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
12050 +
12051 +/**
12052 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
12053 + * @desc: pointer to buffer used for descriptor construction
12054 + * @cdata: pointer to block cipher transform definitions
12055 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
12056 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
12057 + * @ivsize: initialization vector size
12058 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
12059 + * @ctx1_iv_off: IV offset in CONTEXT1 register
12060 + */
12061 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12062 + unsigned int ivsize, const bool is_rfc3686,
12063 + const u32 ctx1_iv_off)
12064 +{
12065 + u32 *key_jump_cmd;
12066 +
12067 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12068 + /* Skip if already shared */
12069 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12070 + JUMP_COND_SHRD);
12071 +
12072 + /* Load class1 key only */
12073 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12074 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12075 +
12076 + /* Load nonce into CONTEXT1 reg */
12077 + if (is_rfc3686) {
12078 + const u8 *nonce = cdata->key_virt + cdata->keylen;
12079 +
12080 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12081 + LDST_CLASS_IND_CCB |
12082 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12083 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12084 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12085 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12086 + }
12087 +
12088 + set_jump_tgt_here(desc, key_jump_cmd);
12089 +
12090 + /* load IV */
12091 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12092 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12093 +
12094 + /* Load counter into CONTEXT1 reg */
12095 + if (is_rfc3686)
12096 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12097 + LDST_SRCDST_BYTE_CONTEXT |
12098 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12099 + LDST_OFFSET_SHIFT));
12100 +
12101 + /* Choose operation */
12102 + if (ctx1_iv_off)
12103 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12104 + OP_ALG_DECRYPT);
12105 + else
12106 + append_dec_op1(desc, cdata->algtype);
12107 +
12108 + /* Perform operation */
12109 + ablkcipher_append_src_dst(desc);
12110 +
12111 +#ifdef DEBUG
12112 + print_hex_dump(KERN_ERR,
12113 + "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
12114 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12115 +#endif
12116 +}
12117 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
12118 +
12119 +/**
12120 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
12121 + * with HW-generated initialization vector.
12122 + * @desc: pointer to buffer used for descriptor construction
12123 + * @cdata: pointer to block cipher transform definitions
12124 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
12125 + * with OP_ALG_AAI_CBC.
12126 + * @ivsize: initialization vector size
12127 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
12128 + * @ctx1_iv_off: IV offset in CONTEXT1 register
12129 + */
12130 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12131 + unsigned int ivsize, const bool is_rfc3686,
12132 + const u32 ctx1_iv_off)
12133 +{
12134 + u32 *key_jump_cmd, geniv;
12135 +
12136 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12137 + /* Skip if already shared */
12138 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12139 + JUMP_COND_SHRD);
12140 +
12141 + /* Load class1 key only */
12142 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12143 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12144 +
12145 + /* Load Nonce into CONTEXT1 reg */
12146 + if (is_rfc3686) {
12147 + const u8 *nonce = cdata->key_virt + cdata->keylen;
12148 +
12149 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12150 + LDST_CLASS_IND_CCB |
12151 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12152 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12153 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12154 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12155 + }
12156 + set_jump_tgt_here(desc, key_jump_cmd);
12157 +
12158 + /* Generate IV */
12159 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
12160 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
12161 + (ivsize << NFIFOENTRY_DLEN_SHIFT);
12162 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
12163 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
12164 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
12165 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
12166 + MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
12167 + (ctx1_iv_off << MOVE_OFFSET_SHIFT));
12168 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
12169 +
12170 + /* Copy generated IV to memory */
12171 + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12172 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12173 +
12174 + /* Load Counter into CONTEXT1 reg */
12175 + if (is_rfc3686)
12176 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12177 + LDST_SRCDST_BYTE_CONTEXT |
12178 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12179 + LDST_OFFSET_SHIFT));
12180 +
12181 + if (ctx1_iv_off)
12182 + append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12183 + (1 << JUMP_OFFSET_SHIFT));
12184 +
12185 + /* Load operation */
12186 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12187 + OP_ALG_ENCRYPT);
12188 +
12189 + /* Perform operation */
12190 + ablkcipher_append_src_dst(desc);
12191 +
12192 +#ifdef DEBUG
12193 + print_hex_dump(KERN_ERR,
12194 + "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12195 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12196 +#endif
12197 +}
12198 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12199 +
12200 +/**
12201 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12202 + * descriptor
12203 + * @desc: pointer to buffer used for descriptor construction
12204 + * @cdata: pointer to block cipher transform definitions
12205 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12206 + */
12207 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12208 +{
12209 + __be64 sector_size = cpu_to_be64(512);
12210 + u32 *key_jump_cmd;
12211 +
12212 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12213 + /* Skip if already shared */
12214 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12215 + JUMP_COND_SHRD);
12216 +
12217 + /* Load class1 keys only */
12218 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12219 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12220 +
12221 + /* Load sector size with index 40 bytes (0x28) */
12222 + append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12223 + LDST_SRCDST_BYTE_CONTEXT |
12224 + (0x28 << LDST_OFFSET_SHIFT));
12225 +
12226 + set_jump_tgt_here(desc, key_jump_cmd);
12227 +
12228 + /*
12229 + * create sequence for loading the sector index
12230 + * Upper 8B of IV - will be used as sector index
12231 + * Lower 8B of IV - will be discarded
12232 + */
12233 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12234 + (0x20 << LDST_OFFSET_SHIFT));
12235 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12236 +
12237 + /* Load operation */
12238 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12239 + OP_ALG_ENCRYPT);
12240 +
12241 + /* Perform operation */
12242 + ablkcipher_append_src_dst(desc);
12243 +
12244 +#ifdef DEBUG
12245 + print_hex_dump(KERN_ERR,
12246 + "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12247 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12248 +#endif
12249 +}
12250 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12251 +
12252 +/**
12253 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12254 + * descriptor
12255 + * @desc: pointer to buffer used for descriptor construction
12256 + * @cdata: pointer to block cipher transform definitions
12257 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12258 + */
12259 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12260 +{
12261 + __be64 sector_size = cpu_to_be64(512);
12262 + u32 *key_jump_cmd;
12263 +
12264 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12265 + /* Skip if already shared */
12266 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12267 + JUMP_COND_SHRD);
12268 +
12269 + /* Load class1 key only */
12270 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12271 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12272 +
12273 + /* Load sector size with index 40 bytes (0x28) */
12274 + append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12275 + LDST_SRCDST_BYTE_CONTEXT |
12276 + (0x28 << LDST_OFFSET_SHIFT));
12277 +
12278 + set_jump_tgt_here(desc, key_jump_cmd);
12279 +
12280 + /*
12281 + * create sequence for loading the sector index
12282 + * Upper 8B of IV - will be used as sector index
12283 + * Lower 8B of IV - will be discarded
12284 + */
12285 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12286 + (0x20 << LDST_OFFSET_SHIFT));
12287 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12288 +
12289 + /* Load operation */
12290 + append_dec_op1(desc, cdata->algtype);
12291 +
12292 + /* Perform operation */
12293 + ablkcipher_append_src_dst(desc);
12294 +
12295 +#ifdef DEBUG
12296 + print_hex_dump(KERN_ERR,
12297 + "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12298 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12299 +#endif
12300 +}
12301 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12302 +
12303 +MODULE_LICENSE("GPL");
12304 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12305 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12306 --- /dev/null
12307 +++ b/drivers/crypto/caam/caamalg_desc.h
12308 @@ -0,0 +1,127 @@
12309 +/*
12310 + * Shared descriptors for aead, ablkcipher algorithms
12311 + *
12312 + * Copyright 2016 NXP
12313 + */
12314 +
12315 +#ifndef _CAAMALG_DESC_H_
12316 +#define _CAAMALG_DESC_H_
12317 +
12318 +/* length of descriptors text */
12319 +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
12320 +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12321 +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12322 +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12323 +#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12324 +#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12325 +#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12326 +
12327 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
12328 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12329 +
12330 +/* Note: Nonce is counted in cdata.keylen */
12331 +#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
12332 +
12333 +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
12334 +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12335 +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12336 +
12337 +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
12338 +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12339 +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12340 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12341 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12342 +
12343 +#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
12344 +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12345 +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12346 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12347 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12348 +
12349 +#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
12350 +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12351 +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12352 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12353 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12354 +
12355 +#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
12356 +#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
12357 + 20 * CAAM_CMD_SZ)
12358 +#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
12359 + 15 * CAAM_CMD_SZ)
12360 +
12361 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12362 + unsigned int icvsize, int era);
12363 +
12364 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12365 + unsigned int icvsize, int era);
12366 +
12367 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12368 + struct alginfo *adata, unsigned int ivsize,
12369 + unsigned int icvsize, const bool is_rfc3686,
12370 + u32 *nonce, const u32 ctx1_iv_off,
12371 + const bool is_qi, int era);
12372 +
12373 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12374 + struct alginfo *adata, unsigned int ivsize,
12375 + unsigned int icvsize, const bool geniv,
12376 + const bool is_rfc3686, u32 *nonce,
12377 + const u32 ctx1_iv_off, const bool is_qi, int era);
12378 +
12379 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12380 + struct alginfo *adata, unsigned int ivsize,
12381 + unsigned int icvsize, const bool is_rfc3686,
12382 + u32 *nonce, const u32 ctx1_iv_off,
12383 + const bool is_qi, int era);
12384 +
12385 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12386 + struct alginfo *adata, unsigned int assoclen,
12387 + unsigned int ivsize, unsigned int authsize,
12388 + unsigned int blocksize, int era);
12389 +
12390 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12391 + struct alginfo *adata, unsigned int assoclen,
12392 + unsigned int ivsize, unsigned int authsize,
12393 + unsigned int blocksize, int era);
12394 +
12395 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12396 + unsigned int ivsize, unsigned int icvsize,
12397 + const bool is_qi);
12398 +
12399 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12400 + unsigned int ivsize, unsigned int icvsize,
12401 + const bool is_qi);
12402 +
12403 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12404 + unsigned int ivsize, unsigned int icvsize,
12405 + const bool is_qi);
12406 +
12407 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12408 + unsigned int ivsize, unsigned int icvsize,
12409 + const bool is_qi);
12410 +
12411 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12412 + unsigned int ivsize, unsigned int icvsize,
12413 + const bool is_qi);
12414 +
12415 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12416 + unsigned int ivsize, unsigned int icvsize,
12417 + const bool is_qi);
12418 +
12419 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12420 + unsigned int ivsize, const bool is_rfc3686,
12421 + const u32 ctx1_iv_off);
12422 +
12423 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12424 + unsigned int ivsize, const bool is_rfc3686,
12425 + const u32 ctx1_iv_off);
12426 +
12427 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12428 + unsigned int ivsize, const bool is_rfc3686,
12429 + const u32 ctx1_iv_off);
12430 +
12431 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12432 +
12433 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12434 +
12435 +#endif /* _CAAMALG_DESC_H_ */
12436 --- /dev/null
12437 +++ b/drivers/crypto/caam/caamalg_qi.c
12438 @@ -0,0 +1,3321 @@
12439 +/*
12440 + * Freescale FSL CAAM support for crypto API over QI backend.
12441 + * Based on caamalg.c
12442 + *
12443 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12444 + * Copyright 2016-2017 NXP
12445 + */
12446 +
12447 +#include "compat.h"
12448 +#include "ctrl.h"
12449 +#include "regs.h"
12450 +#include "intern.h"
12451 +#include "desc_constr.h"
12452 +#include "error.h"
12453 +#include "sg_sw_qm.h"
12454 +#include "key_gen.h"
12455 +#include "qi.h"
12456 +#include "jr.h"
12457 +#include "caamalg_desc.h"
12458 +
12459 +/*
12460 + * crypto alg
12461 + */
12462 +#define CAAM_CRA_PRIORITY 2000
12463 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12464 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
12465 + SHA512_DIGEST_SIZE * 2)
12466 +
12467 +#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
12468 + CAAM_MAX_KEY_SIZE)
12469 +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12470 +
12471 +struct caam_alg_entry {
12472 + int class1_alg_type;
12473 + int class2_alg_type;
12474 + bool rfc3686;
12475 + bool geniv;
12476 +};
12477 +
12478 +struct caam_aead_alg {
12479 + struct aead_alg aead;
12480 + struct caam_alg_entry caam;
12481 + bool registered;
12482 +};
12483 +
12484 +/*
12485 + * per-session context
12486 + */
12487 +struct caam_ctx {
12488 + struct device *jrdev;
12489 + u32 sh_desc_enc[DESC_MAX_USED_LEN];
12490 + u32 sh_desc_dec[DESC_MAX_USED_LEN];
12491 + u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12492 + u8 key[CAAM_MAX_KEY_SIZE];
12493 + dma_addr_t key_dma;
12494 + enum dma_data_direction dir;
12495 + struct alginfo adata;
12496 + struct alginfo cdata;
12497 + unsigned int authsize;
12498 + struct device *qidev;
12499 + spinlock_t lock; /* Protects multiple init of driver context */
12500 + struct caam_drv_ctx *drv_ctx[NUM_OP];
12501 +};
12502 +
12503 +static int aead_set_sh_desc(struct crypto_aead *aead)
12504 +{
12505 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12506 + typeof(*alg), aead);
12507 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12508 + unsigned int ivsize = crypto_aead_ivsize(aead);
12509 + u32 ctx1_iv_off = 0;
12510 + u32 *nonce = NULL;
12511 + unsigned int data_len[2];
12512 + u32 inl_mask;
12513 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12514 + OP_ALG_AAI_CTR_MOD128);
12515 + const bool is_rfc3686 = alg->caam.rfc3686;
12516 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
12517 +
12518 + if (!ctx->cdata.keylen || !ctx->authsize)
12519 + return 0;
12520 +
12521 + /*
12522 + * AES-CTR needs to load IV in CONTEXT1 reg
12523 + * at an offset of 128bits (16bytes)
12524 + * CONTEXT1[255:128] = IV
12525 + */
12526 + if (ctr_mode)
12527 + ctx1_iv_off = 16;
12528 +
12529 + /*
12530 + * RFC3686 specific:
12531 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12532 + */
12533 + if (is_rfc3686) {
12534 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12535 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12536 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12537 + }
12538 +
12539 + data_len[0] = ctx->adata.keylen_pad;
12540 + data_len[1] = ctx->cdata.keylen;
12541 +
12542 + if (alg->caam.geniv)
12543 + goto skip_enc;
12544 +
12545 + /* aead_encrypt shared descriptor */
12546 + if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12547 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12548 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12549 + ARRAY_SIZE(data_len)) < 0)
12550 + return -EINVAL;
12551 +
12552 + if (inl_mask & 1)
12553 + ctx->adata.key_virt = ctx->key;
12554 + else
12555 + ctx->adata.key_dma = ctx->key_dma;
12556 +
12557 + if (inl_mask & 2)
12558 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12559 + else
12560 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12561 +
12562 + ctx->adata.key_inline = !!(inl_mask & 1);
12563 + ctx->cdata.key_inline = !!(inl_mask & 2);
12564 +
12565 + cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12566 + ivsize, ctx->authsize, is_rfc3686, nonce,
12567 + ctx1_iv_off, true, ctrlpriv->era);
12568 +
12569 +skip_enc:
12570 + /* aead_decrypt shared descriptor */
12571 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12572 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12573 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12574 + ARRAY_SIZE(data_len)) < 0)
12575 + return -EINVAL;
12576 +
12577 + if (inl_mask & 1)
12578 + ctx->adata.key_virt = ctx->key;
12579 + else
12580 + ctx->adata.key_dma = ctx->key_dma;
12581 +
12582 + if (inl_mask & 2)
12583 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12584 + else
12585 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12586 +
12587 + ctx->adata.key_inline = !!(inl_mask & 1);
12588 + ctx->cdata.key_inline = !!(inl_mask & 2);
12589 +
12590 + cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12591 + ivsize, ctx->authsize, alg->caam.geniv,
12592 + is_rfc3686, nonce, ctx1_iv_off, true,
12593 + ctrlpriv->era);
12594 +
12595 + if (!alg->caam.geniv)
12596 + goto skip_givenc;
12597 +
12598 + /* aead_givencrypt shared descriptor */
12599 + if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12600 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12601 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12602 + ARRAY_SIZE(data_len)) < 0)
12603 + return -EINVAL;
12604 +
12605 + if (inl_mask & 1)
12606 + ctx->adata.key_virt = ctx->key;
12607 + else
12608 + ctx->adata.key_dma = ctx->key_dma;
12609 +
12610 + if (inl_mask & 2)
12611 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12612 + else
12613 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12614 +
12615 + ctx->adata.key_inline = !!(inl_mask & 1);
12616 + ctx->cdata.key_inline = !!(inl_mask & 2);
12617 +
12618 + cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12619 + ivsize, ctx->authsize, is_rfc3686, nonce,
12620 + ctx1_iv_off, true, ctrlpriv->era);
12621 +
12622 +skip_givenc:
12623 + return 0;
12624 +}
12625 +
12626 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12627 +{
12628 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12629 +
12630 + ctx->authsize = authsize;
12631 + aead_set_sh_desc(authenc);
12632 +
12633 + return 0;
12634 +}
12635 +
12636 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12637 + unsigned int keylen)
12638 +{
12639 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12640 + struct device *jrdev = ctx->jrdev;
12641 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
12642 + struct crypto_authenc_keys keys;
12643 + int ret = 0;
12644 +
12645 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12646 + goto badkey;
12647 +
12648 +#ifdef DEBUG
12649 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12650 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12651 + keys.authkeylen);
12652 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12653 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12654 +#endif
12655 +
12656 + /*
12657 + * If DKP is supported, use it in the shared descriptor to generate
12658 + * the split key.
12659 + */
12660 + if (ctrlpriv->era >= 6) {
12661 + ctx->adata.keylen = keys.authkeylen;
12662 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
12663 + OP_ALG_ALGSEL_MASK);
12664 +
12665 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
12666 + goto badkey;
12667 +
12668 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
12669 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
12670 + keys.enckeylen);
12671 + dma_sync_single_for_device(jrdev, ctx->key_dma,
12672 + ctx->adata.keylen_pad +
12673 + keys.enckeylen, ctx->dir);
12674 + goto skip_split_key;
12675 + }
12676 +
12677 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12678 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12679 + keys.enckeylen);
12680 + if (ret)
12681 + goto badkey;
12682 +
12683 + /* postpend encryption key to auth split key */
12684 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12685 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12686 + keys.enckeylen, ctx->dir);
12687 +#ifdef DEBUG
12688 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12689 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12690 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12691 +#endif
12692 +
12693 +skip_split_key:
12694 + ctx->cdata.keylen = keys.enckeylen;
12695 +
12696 + ret = aead_set_sh_desc(aead);
12697 + if (ret)
12698 + goto badkey;
12699 +
12700 + /* Now update the driver contexts with the new shared descriptor */
12701 + if (ctx->drv_ctx[ENCRYPT]) {
12702 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12703 + ctx->sh_desc_enc);
12704 + if (ret) {
12705 + dev_err(jrdev, "driver enc context update failed\n");
12706 + goto badkey;
12707 + }
12708 + }
12709 +
12710 + if (ctx->drv_ctx[DECRYPT]) {
12711 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12712 + ctx->sh_desc_dec);
12713 + if (ret) {
12714 + dev_err(jrdev, "driver dec context update failed\n");
12715 + goto badkey;
12716 + }
12717 + }
12718 +
12719 + return ret;
12720 +badkey:
12721 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12722 + return -EINVAL;
12723 +}
12724 +
12725 +static int tls_set_sh_desc(struct crypto_aead *tls)
12726 +{
12727 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12728 + unsigned int ivsize = crypto_aead_ivsize(tls);
12729 + unsigned int blocksize = crypto_aead_blocksize(tls);
12730 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
12731 + unsigned int data_len[2];
12732 + u32 inl_mask;
12733 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
12734 +
12735 + if (!ctx->cdata.keylen || !ctx->authsize)
12736 + return 0;
12737 +
12738 + /*
12739 + * TLS 1.0 encrypt shared descriptor
12740 + * Job Descriptor and Shared Descriptor
12741 + * must fit into the 64-word Descriptor h/w Buffer
12742 + */
12743 + data_len[0] = ctx->adata.keylen_pad;
12744 + data_len[1] = ctx->cdata.keylen;
12745 +
12746 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12747 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
12748 + return -EINVAL;
12749 +
12750 + if (inl_mask & 1)
12751 + ctx->adata.key_virt = ctx->key;
12752 + else
12753 + ctx->adata.key_dma = ctx->key_dma;
12754 +
12755 + if (inl_mask & 2)
12756 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12757 + else
12758 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12759 +
12760 + ctx->adata.key_inline = !!(inl_mask & 1);
12761 + ctx->cdata.key_inline = !!(inl_mask & 2);
12762 +
12763 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12764 + assoclen, ivsize, ctx->authsize, blocksize,
12765 + ctrlpriv->era);
12766 +
12767 + /*
12768 + * TLS 1.0 decrypt shared descriptor
12769 + * Keys do not fit inline, regardless of algorithms used
12770 + */
12771 + ctx->adata.key_inline = false;
12772 + ctx->adata.key_dma = ctx->key_dma;
12773 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12774 +
12775 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12776 + assoclen, ivsize, ctx->authsize, blocksize,
12777 + ctrlpriv->era);
12778 +
12779 + return 0;
12780 +}
12781 +
12782 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12783 +{
12784 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12785 +
12786 + ctx->authsize = authsize;
12787 + tls_set_sh_desc(tls);
12788 +
12789 + return 0;
12790 +}
12791 +
12792 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12793 + unsigned int keylen)
12794 +{
12795 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12796 + struct device *jrdev = ctx->jrdev;
12797 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
12798 + struct crypto_authenc_keys keys;
12799 + int ret = 0;
12800 +
12801 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12802 + goto badkey;
12803 +
12804 +#ifdef DEBUG
12805 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12806 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12807 + keys.authkeylen);
12808 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12809 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12810 +#endif
12811 +
12812 + /*
12813 + * If DKP is supported, use it in the shared descriptor to generate
12814 + * the split key.
12815 + */
12816 + if (ctrlpriv->era >= 6) {
12817 + ctx->adata.keylen = keys.authkeylen;
12818 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
12819 + OP_ALG_ALGSEL_MASK);
12820 +
12821 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
12822 + goto badkey;
12823 +
12824 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
12825 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
12826 + keys.enckeylen);
12827 + dma_sync_single_for_device(jrdev, ctx->key_dma,
12828 + ctx->adata.keylen_pad +
12829 + keys.enckeylen, ctx->dir);
12830 + goto skip_split_key;
12831 + }
12832 +
12833 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12834 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12835 + keys.enckeylen);
12836 + if (ret)
12837 + goto badkey;
12838 +
12839 + /* postpend encryption key to auth split key */
12840 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12841 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12842 + keys.enckeylen, ctx->dir);
12843 +
12844 +#ifdef DEBUG
12845 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12846 + ctx->adata.keylen, ctx->adata.keylen_pad);
12847 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12848 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12849 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12850 +#endif
12851 +
12852 +skip_split_key:
12853 + ctx->cdata.keylen = keys.enckeylen;
12854 +
12855 + ret = tls_set_sh_desc(tls);
12856 + if (ret)
12857 + goto badkey;
12858 +
12859 + /* Now update the driver contexts with the new shared descriptor */
12860 + if (ctx->drv_ctx[ENCRYPT]) {
12861 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12862 + ctx->sh_desc_enc);
12863 + if (ret) {
12864 + dev_err(jrdev, "driver enc context update failed\n");
12865 + goto badkey;
12866 + }
12867 + }
12868 +
12869 + if (ctx->drv_ctx[DECRYPT]) {
12870 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12871 + ctx->sh_desc_dec);
12872 + if (ret) {
12873 + dev_err(jrdev, "driver dec context update failed\n");
12874 + goto badkey;
12875 + }
12876 + }
12877 +
12878 + return ret;
12879 +badkey:
12880 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12881 + return -EINVAL;
12882 +}
12883 +
12884 +static int gcm_set_sh_desc(struct crypto_aead *aead)
12885 +{
12886 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12887 + unsigned int ivsize = crypto_aead_ivsize(aead);
12888 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
12889 + ctx->cdata.keylen;
12890 +
12891 + if (!ctx->cdata.keylen || !ctx->authsize)
12892 + return 0;
12893 +
12894 + /*
12895 + * Job Descriptor and Shared Descriptor
12896 + * must fit into the 64-word Descriptor h/w Buffer
12897 + */
12898 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
12899 + ctx->cdata.key_inline = true;
12900 + ctx->cdata.key_virt = ctx->key;
12901 + } else {
12902 + ctx->cdata.key_inline = false;
12903 + ctx->cdata.key_dma = ctx->key_dma;
12904 + }
12905 +
12906 + cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12907 + ctx->authsize, true);
12908 +
12909 + /*
12910 + * Job Descriptor and Shared Descriptor
12911 + * must fit into the 64-word Descriptor h/w Buffer
12912 + */
12913 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
12914 + ctx->cdata.key_inline = true;
12915 + ctx->cdata.key_virt = ctx->key;
12916 + } else {
12917 + ctx->cdata.key_inline = false;
12918 + ctx->cdata.key_dma = ctx->key_dma;
12919 + }
12920 +
12921 + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12922 + ctx->authsize, true);
12923 +
12924 + return 0;
12925 +}
12926 +
12927 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12928 +{
12929 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12930 +
12931 + ctx->authsize = authsize;
12932 + gcm_set_sh_desc(authenc);
12933 +
12934 + return 0;
12935 +}
12936 +
12937 +static int gcm_setkey(struct crypto_aead *aead,
12938 + const u8 *key, unsigned int keylen)
12939 +{
12940 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12941 + struct device *jrdev = ctx->jrdev;
12942 + int ret;
12943 +
12944 +#ifdef DEBUG
12945 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12946 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12947 +#endif
12948 +
12949 + memcpy(ctx->key, key, keylen);
12950 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
12951 + ctx->cdata.keylen = keylen;
12952 +
12953 + ret = gcm_set_sh_desc(aead);
12954 + if (ret)
12955 + return ret;
12956 +
12957 + /* Now update the driver contexts with the new shared descriptor */
12958 + if (ctx->drv_ctx[ENCRYPT]) {
12959 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12960 + ctx->sh_desc_enc);
12961 + if (ret) {
12962 + dev_err(jrdev, "driver enc context update failed\n");
12963 + return ret;
12964 + }
12965 + }
12966 +
12967 + if (ctx->drv_ctx[DECRYPT]) {
12968 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12969 + ctx->sh_desc_dec);
12970 + if (ret) {
12971 + dev_err(jrdev, "driver dec context update failed\n");
12972 + return ret;
12973 + }
12974 + }
12975 +
12976 + return 0;
12977 +}
12978 +
12979 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
12980 +{
12981 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12982 + unsigned int ivsize = crypto_aead_ivsize(aead);
12983 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
12984 + ctx->cdata.keylen;
12985 +
12986 + if (!ctx->cdata.keylen || !ctx->authsize)
12987 + return 0;
12988 +
12989 + ctx->cdata.key_virt = ctx->key;
12990 +
12991 + /*
12992 + * Job Descriptor and Shared Descriptor
12993 + * must fit into the 64-word Descriptor h/w Buffer
12994 + */
12995 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
12996 + ctx->cdata.key_inline = true;
12997 + } else {
12998 + ctx->cdata.key_inline = false;
12999 + ctx->cdata.key_dma = ctx->key_dma;
13000 + }
13001 +
13002 + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
13003 + ctx->authsize, true);
13004 +
13005 + /*
13006 + * Job Descriptor and Shared Descriptor
13007 + * must fit into the 64-word Descriptor h/w Buffer
13008 + */
13009 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
13010 + ctx->cdata.key_inline = true;
13011 + } else {
13012 + ctx->cdata.key_inline = false;
13013 + ctx->cdata.key_dma = ctx->key_dma;
13014 + }
13015 +
13016 + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
13017 + ctx->authsize, true);
13018 +
13019 + return 0;
13020 +}
13021 +
13022 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
13023 + unsigned int authsize)
13024 +{
13025 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
13026 +
13027 + ctx->authsize = authsize;
13028 + rfc4106_set_sh_desc(authenc);
13029 +
13030 + return 0;
13031 +}
13032 +
13033 +static int rfc4106_setkey(struct crypto_aead *aead,
13034 + const u8 *key, unsigned int keylen)
13035 +{
13036 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13037 + struct device *jrdev = ctx->jrdev;
13038 + int ret;
13039 +
13040 + if (keylen < 4)
13041 + return -EINVAL;
13042 +
13043 +#ifdef DEBUG
13044 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
13045 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
13046 +#endif
13047 +
13048 + memcpy(ctx->key, key, keylen);
13049 + /*
13050 + * The last four bytes of the key material are used as the salt value
13051 + * in the nonce. Update the AES key length.
13052 + */
13053 + ctx->cdata.keylen = keylen - 4;
13054 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
13055 + ctx->dir);
13056 +
13057 + ret = rfc4106_set_sh_desc(aead);
13058 + if (ret)
13059 + return ret;
13060 +
13061 + /* Now update the driver contexts with the new shared descriptor */
13062 + if (ctx->drv_ctx[ENCRYPT]) {
13063 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
13064 + ctx->sh_desc_enc);
13065 + if (ret) {
13066 + dev_err(jrdev, "driver enc context update failed\n");
13067 + return ret;
13068 + }
13069 + }
13070 +
13071 + if (ctx->drv_ctx[DECRYPT]) {
13072 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
13073 + ctx->sh_desc_dec);
13074 + if (ret) {
13075 + dev_err(jrdev, "driver dec context update failed\n");
13076 + return ret;
13077 + }
13078 + }
13079 +
13080 + return 0;
13081 +}
13082 +
13083 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
13084 +{
13085 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13086 + unsigned int ivsize = crypto_aead_ivsize(aead);
13087 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
13088 + ctx->cdata.keylen;
13089 +
13090 + if (!ctx->cdata.keylen || !ctx->authsize)
13091 + return 0;
13092 +
13093 + ctx->cdata.key_virt = ctx->key;
13094 +
13095 + /*
13096 + * Job Descriptor and Shared Descriptor
13097 + * must fit into the 64-word Descriptor h/w Buffer
13098 + */
13099 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
13100 + ctx->cdata.key_inline = true;
13101 + } else {
13102 + ctx->cdata.key_inline = false;
13103 + ctx->cdata.key_dma = ctx->key_dma;
13104 + }
13105 +
13106 + cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
13107 + ctx->authsize, true);
13108 +
13109 + /*
13110 + * Job Descriptor and Shared Descriptor
13111 + * must fit into the 64-word Descriptor h/w Buffer
13112 + */
13113 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
13114 + ctx->cdata.key_inline = true;
13115 + } else {
13116 + ctx->cdata.key_inline = false;
13117 + ctx->cdata.key_dma = ctx->key_dma;
13118 + }
13119 +
13120 + cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
13121 + ctx->authsize, true);
13122 +
13123 + return 0;
13124 +}
13125 +
13126 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
13127 + unsigned int authsize)
13128 +{
13129 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
13130 +
13131 + ctx->authsize = authsize;
13132 + rfc4543_set_sh_desc(authenc);
13133 +
13134 + return 0;
13135 +}
13136 +
13137 +static int rfc4543_setkey(struct crypto_aead *aead,
13138 + const u8 *key, unsigned int keylen)
13139 +{
13140 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13141 + struct device *jrdev = ctx->jrdev;
13142 + int ret;
13143 +
13144 + if (keylen < 4)
13145 + return -EINVAL;
13146 +
13147 +#ifdef DEBUG
13148 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
13149 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
13150 +#endif
13151 +
13152 + memcpy(ctx->key, key, keylen);
13153 + /*
13154 + * The last four bytes of the key material are used as the salt value
13155 + * in the nonce. Update the AES key length.
13156 + */
13157 + ctx->cdata.keylen = keylen - 4;
13158 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
13159 + ctx->dir);
13160 +
13161 + ret = rfc4543_set_sh_desc(aead);
13162 + if (ret)
13163 + return ret;
13164 +
13165 + /* Now update the driver contexts with the new shared descriptor */
13166 + if (ctx->drv_ctx[ENCRYPT]) {
13167 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
13168 + ctx->sh_desc_enc);
13169 + if (ret) {
13170 + dev_err(jrdev, "driver enc context update failed\n");
13171 + return ret;
13172 + }
13173 + }
13174 +
13175 + if (ctx->drv_ctx[DECRYPT]) {
13176 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
13177 + ctx->sh_desc_dec);
13178 + if (ret) {
13179 + dev_err(jrdev, "driver dec context update failed\n");
13180 + return ret;
13181 + }
13182 + }
13183 +
13184 + return 0;
13185 +}
13186 +
13187 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
13188 + const u8 *key, unsigned int keylen)
13189 +{
13190 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13191 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
13192 + const char *alg_name = crypto_tfm_alg_name(tfm);
13193 + struct device *jrdev = ctx->jrdev;
13194 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13195 + u32 ctx1_iv_off = 0;
13196 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
13197 + OP_ALG_AAI_CTR_MOD128);
13198 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
13199 + int ret = 0;
13200 +
13201 + memcpy(ctx->key, key, keylen);
13202 +#ifdef DEBUG
13203 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
13204 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
13205 +#endif
13206 + /*
13207 + * AES-CTR needs to load IV in CONTEXT1 reg
13208 + * at an offset of 128bits (16bytes)
13209 + * CONTEXT1[255:128] = IV
13210 + */
13211 + if (ctr_mode)
13212 + ctx1_iv_off = 16;
13213 +
13214 + /*
13215 + * RFC3686 specific:
13216 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
13217 + * | *key = {KEY, NONCE}
13218 + */
13219 + if (is_rfc3686) {
13220 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
13221 + keylen -= CTR_RFC3686_NONCE_SIZE;
13222 + }
13223 +
13224 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
13225 + ctx->cdata.keylen = keylen;
13226 + ctx->cdata.key_virt = ctx->key;
13227 + ctx->cdata.key_inline = true;
13228 +
13229 + /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
13230 + cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
13231 + is_rfc3686, ctx1_iv_off);
13232 + cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
13233 + is_rfc3686, ctx1_iv_off);
13234 + cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
13235 + ivsize, is_rfc3686, ctx1_iv_off);
13236 +
13237 + /* Now update the driver contexts with the new shared descriptor */
13238 + if (ctx->drv_ctx[ENCRYPT]) {
13239 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
13240 + ctx->sh_desc_enc);
13241 + if (ret) {
13242 + dev_err(jrdev, "driver enc context update failed\n");
13243 + goto badkey;
13244 + }
13245 + }
13246 +
13247 + if (ctx->drv_ctx[DECRYPT]) {
13248 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
13249 + ctx->sh_desc_dec);
13250 + if (ret) {
13251 + dev_err(jrdev, "driver dec context update failed\n");
13252 + goto badkey;
13253 + }
13254 + }
13255 +
13256 + if (ctx->drv_ctx[GIVENCRYPT]) {
13257 + ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
13258 + ctx->sh_desc_givenc);
13259 + if (ret) {
13260 + dev_err(jrdev, "driver givenc context update failed\n");
13261 + goto badkey;
13262 + }
13263 + }
13264 +
13265 + return ret;
13266 +badkey:
13267 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
13268 + return -EINVAL;
13269 +}
13270 +
13271 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
13272 + const u8 *key, unsigned int keylen)
13273 +{
13274 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13275 + struct device *jrdev = ctx->jrdev;
13276 + int ret = 0;
13277 +
13278 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
13279 + crypto_ablkcipher_set_flags(ablkcipher,
13280 + CRYPTO_TFM_RES_BAD_KEY_LEN);
13281 + dev_err(jrdev, "key size mismatch\n");
13282 + return -EINVAL;
13283 + }
13284 +
13285 + memcpy(ctx->key, key, keylen);
13286 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
13287 + ctx->cdata.keylen = keylen;
13288 + ctx->cdata.key_virt = ctx->key;
13289 + ctx->cdata.key_inline = true;
13290 +
13291 + /* xts ablkcipher encrypt, decrypt shared descriptors */
13292 + cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
13293 + cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
13294 +
13295 + /* Now update the driver contexts with the new shared descriptor */
13296 + if (ctx->drv_ctx[ENCRYPT]) {
13297 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
13298 + ctx->sh_desc_enc);
13299 + if (ret) {
13300 + dev_err(jrdev, "driver enc context update failed\n");
13301 + goto badkey;
13302 + }
13303 + }
13304 +
13305 + if (ctx->drv_ctx[DECRYPT]) {
13306 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
13307 + ctx->sh_desc_dec);
13308 + if (ret) {
13309 + dev_err(jrdev, "driver dec context update failed\n");
13310 + goto badkey;
13311 + }
13312 + }
13313 +
13314 + return ret;
13315 +badkey:
13316 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
13317 + return 0;
13318 +}
13319 +
13320 +/*
13321 + * aead_edesc - s/w-extended aead descriptor
13322 + * @src_nents: number of segments in input scatterlist
13323 + * @dst_nents: number of segments in output scatterlist
13324 + * @iv_dma: dma address of iv for checking continuity and link table
13325 + * @qm_sg_bytes: length of dma mapped h/w link table
13326 + * @qm_sg_dma: bus physical mapped address of h/w link table
13327 + * @assoclen: associated data length, in CAAM endianness
13328 + * @assoclen_dma: bus physical mapped address of req->assoclen
13329 + * @drv_req: driver-specific request structure
13330 + * @sgt: the h/w link table
13331 + */
13332 +struct aead_edesc {
13333 + int src_nents;
13334 + int dst_nents;
13335 + dma_addr_t iv_dma;
13336 + int qm_sg_bytes;
13337 + dma_addr_t qm_sg_dma;
13338 + unsigned int assoclen;
13339 + dma_addr_t assoclen_dma;
13340 + struct caam_drv_req drv_req;
13341 +#define CAAM_QI_MAX_AEAD_SG \
13342 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
13343 + sizeof(struct qm_sg_entry))
13344 + struct qm_sg_entry sgt[0];
13345 +};
13346 +
13347 +/*
13348 + * tls_edesc - s/w-extended tls descriptor
13349 + * @src_nents: number of segments in input scatterlist
13350 + * @dst_nents: number of segments in output scatterlist
13351 + * @iv_dma: dma address of iv for checking continuity and link table
13352 + * @qm_sg_bytes: length of dma mapped h/w link table
13353 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
13354 + * @qm_sg_dma: bus physical mapped address of h/w link table
13355 + * @drv_req: driver-specific request structure
13356 + * @sgt: the h/w link table
13357 + */
13358 +struct tls_edesc {
13359 + int src_nents;
13360 + int dst_nents;
13361 + dma_addr_t iv_dma;
13362 + int qm_sg_bytes;
13363 + dma_addr_t qm_sg_dma;
13364 + struct scatterlist tmp[2];
13365 + struct scatterlist *dst;
13366 + struct caam_drv_req drv_req;
13367 + struct qm_sg_entry sgt[0];
13368 +};
13369 +
13370 +/*
13371 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
13372 + * @src_nents: number of segments in input scatterlist
13373 + * @dst_nents: number of segments in output scatterlist
13374 + * @iv_dma: dma address of iv for checking continuity and link table
13375 + * @qm_sg_bytes: length of dma mapped h/w link table
13376 + * @qm_sg_dma: bus physical mapped address of h/w link table
13377 + * @drv_req: driver-specific request structure
13378 + * @sgt: the h/w link table
13379 + */
13380 +struct ablkcipher_edesc {
13381 + int src_nents;
13382 + int dst_nents;
13383 + dma_addr_t iv_dma;
13384 + int qm_sg_bytes;
13385 + dma_addr_t qm_sg_dma;
13386 + struct caam_drv_req drv_req;
13387 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
13388 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
13389 + sizeof(struct qm_sg_entry))
13390 + struct qm_sg_entry sgt[0];
13391 +};
13392 +
13393 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
13394 + enum optype type)
13395 +{
13396 + /*
13397 + * This function is called on the fast path with values of 'type'
13398 + * known at compile time. Invalid arguments are not expected and
13399 + * thus no checks are made.
13400 + */
13401 + struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
13402 + u32 *desc;
13403 +
13404 + if (unlikely(!drv_ctx)) {
13405 + spin_lock(&ctx->lock);
13406 +
13407 + /* Read again to check if some other core init drv_ctx */
13408 + drv_ctx = ctx->drv_ctx[type];
13409 + if (!drv_ctx) {
13410 + int cpu;
13411 +
13412 + if (type == ENCRYPT)
13413 + desc = ctx->sh_desc_enc;
13414 + else if (type == DECRYPT)
13415 + desc = ctx->sh_desc_dec;
13416 + else /* (type == GIVENCRYPT) */
13417 + desc = ctx->sh_desc_givenc;
13418 +
13419 + cpu = smp_processor_id();
13420 + drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
13421 + if (likely(!IS_ERR_OR_NULL(drv_ctx)))
13422 + drv_ctx->op_type = type;
13423 +
13424 + ctx->drv_ctx[type] = drv_ctx;
13425 + }
13426 +
13427 + spin_unlock(&ctx->lock);
13428 + }
13429 +
13430 + return drv_ctx;
13431 +}
13432 +
13433 +static void caam_unmap(struct device *dev, struct scatterlist *src,
13434 + struct scatterlist *dst, int src_nents,
13435 + int dst_nents, dma_addr_t iv_dma, int ivsize,
13436 + enum optype op_type, dma_addr_t qm_sg_dma,
13437 + int qm_sg_bytes)
13438 +{
13439 + if (dst != src) {
13440 + if (src_nents)
13441 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
13442 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
13443 + } else {
13444 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
13445 + }
13446 +
13447 + if (iv_dma)
13448 + dma_unmap_single(dev, iv_dma, ivsize,
13449 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
13450 + DMA_TO_DEVICE);
13451 + if (qm_sg_bytes)
13452 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
13453 +}
13454 +
13455 +static void aead_unmap(struct device *dev,
13456 + struct aead_edesc *edesc,
13457 + struct aead_request *req)
13458 +{
13459 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13460 + int ivsize = crypto_aead_ivsize(aead);
13461 +
13462 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13463 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13464 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
13465 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13466 +}
13467 +
13468 +static void tls_unmap(struct device *dev,
13469 + struct tls_edesc *edesc,
13470 + struct aead_request *req)
13471 +{
13472 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13473 + int ivsize = crypto_aead_ivsize(aead);
13474 +
13475 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
13476 + edesc->dst_nents, edesc->iv_dma, ivsize,
13477 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
13478 + edesc->qm_sg_bytes);
13479 +}
13480 +
13481 +static void ablkcipher_unmap(struct device *dev,
13482 + struct ablkcipher_edesc *edesc,
13483 + struct ablkcipher_request *req)
13484 +{
13485 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13486 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13487 +
13488 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13489 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13490 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
13491 +}
13492 +
13493 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
13494 +{
13495 + struct device *qidev;
13496 + struct aead_edesc *edesc;
13497 + struct aead_request *aead_req = drv_req->app_ctx;
13498 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13499 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13500 + int ecode = 0;
13501 +
13502 + qidev = caam_ctx->qidev;
13503 +
13504 + if (unlikely(status)) {
13505 + u32 ssrc = status & JRSTA_SSRC_MASK;
13506 + u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
13507 +
13508 + caam_jr_strstatus(qidev, status);
13509 + /*
13510 + * verify hw auth check passed else return -EBADMSG
13511 + */
13512 + if (ssrc == JRSTA_SSRC_CCB_ERROR &&
13513 + err_id == JRSTA_CCBERR_ERRID_ICVCHK)
13514 + ecode = -EBADMSG;
13515 + else
13516 + ecode = -EIO;
13517 + }
13518 +
13519 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13520 + aead_unmap(qidev, edesc, aead_req);
13521 +
13522 + aead_request_complete(aead_req, ecode);
13523 + qi_cache_free(edesc);
13524 +}
13525 +
13526 +/*
13527 + * allocate and map the aead extended descriptor
13528 + */
13529 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
13530 + bool encrypt)
13531 +{
13532 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13533 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13534 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13535 + typeof(*alg), aead);
13536 + struct device *qidev = ctx->qidev;
13537 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13538 + GFP_KERNEL : GFP_ATOMIC;
13539 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13540 + struct aead_edesc *edesc;
13541 + dma_addr_t qm_sg_dma, iv_dma = 0;
13542 + int ivsize = 0;
13543 + unsigned int authsize = ctx->authsize;
13544 + int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13545 + int in_len, out_len;
13546 + struct qm_sg_entry *sg_table, *fd_sgt;
13547 + struct caam_drv_ctx *drv_ctx;
13548 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13549 +
13550 + drv_ctx = get_drv_ctx(ctx, op_type);
13551 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13552 + return (struct aead_edesc *)drv_ctx;
13553 +
13554 + /* allocate space for base edesc and hw desc commands, link tables */
13555 + edesc = qi_cache_alloc(GFP_DMA | flags);
13556 + if (unlikely(!edesc)) {
13557 + dev_err(qidev, "could not allocate extended descriptor\n");
13558 + return ERR_PTR(-ENOMEM);
13559 + }
13560 +
13561 + if (likely(req->src == req->dst)) {
13562 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13563 + req->cryptlen +
13564 + (encrypt ? authsize : 0));
13565 + if (unlikely(src_nents < 0)) {
13566 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13567 + req->assoclen + req->cryptlen +
13568 + (encrypt ? authsize : 0));
13569 + qi_cache_free(edesc);
13570 + return ERR_PTR(src_nents);
13571 + }
13572 +
13573 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13574 + DMA_BIDIRECTIONAL);
13575 + if (unlikely(!mapped_src_nents)) {
13576 + dev_err(qidev, "unable to map source\n");
13577 + qi_cache_free(edesc);
13578 + return ERR_PTR(-ENOMEM);
13579 + }
13580 + } else {
13581 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13582 + req->cryptlen);
13583 + if (unlikely(src_nents < 0)) {
13584 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13585 + req->assoclen + req->cryptlen);
13586 + qi_cache_free(edesc);
13587 + return ERR_PTR(src_nents);
13588 + }
13589 +
13590 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13591 + req->cryptlen +
13592 + (encrypt ? authsize :
13593 + (-authsize)));
13594 + if (unlikely(dst_nents < 0)) {
13595 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13596 + req->assoclen + req->cryptlen +
13597 + (encrypt ? authsize : (-authsize)));
13598 + qi_cache_free(edesc);
13599 + return ERR_PTR(dst_nents);
13600 + }
13601 +
13602 + if (src_nents) {
13603 + mapped_src_nents = dma_map_sg(qidev, req->src,
13604 + src_nents, DMA_TO_DEVICE);
13605 + if (unlikely(!mapped_src_nents)) {
13606 + dev_err(qidev, "unable to map source\n");
13607 + qi_cache_free(edesc);
13608 + return ERR_PTR(-ENOMEM);
13609 + }
13610 + } else {
13611 + mapped_src_nents = 0;
13612 + }
13613 +
13614 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13615 + DMA_FROM_DEVICE);
13616 + if (unlikely(!mapped_dst_nents)) {
13617 + dev_err(qidev, "unable to map destination\n");
13618 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13619 + qi_cache_free(edesc);
13620 + return ERR_PTR(-ENOMEM);
13621 + }
13622 + }
13623 +
13624 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13625 + ivsize = crypto_aead_ivsize(aead);
13626 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13627 + if (dma_mapping_error(qidev, iv_dma)) {
13628 + dev_err(qidev, "unable to map IV\n");
13629 + caam_unmap(qidev, req->src, req->dst, src_nents,
13630 + dst_nents, 0, 0, op_type, 0, 0);
13631 + qi_cache_free(edesc);
13632 + return ERR_PTR(-ENOMEM);
13633 + }
13634 + }
13635 +
13636 + /*
13637 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13638 + * Input is not contiguous.
13639 + */
13640 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13641 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13642 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13643 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13644 + qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13645 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13646 + iv_dma, ivsize, op_type, 0, 0);
13647 + qi_cache_free(edesc);
13648 + return ERR_PTR(-ENOMEM);
13649 + }
13650 + sg_table = &edesc->sgt[0];
13651 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13652 +
13653 + edesc->src_nents = src_nents;
13654 + edesc->dst_nents = dst_nents;
13655 + edesc->iv_dma = iv_dma;
13656 + edesc->drv_req.app_ctx = req;
13657 + edesc->drv_req.cbk = aead_done;
13658 + edesc->drv_req.drv_ctx = drv_ctx;
13659 +
13660 + edesc->assoclen = cpu_to_caam32(req->assoclen);
13661 + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13662 + DMA_TO_DEVICE);
13663 + if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13664 + dev_err(qidev, "unable to map assoclen\n");
13665 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13666 + iv_dma, ivsize, op_type, 0, 0);
13667 + qi_cache_free(edesc);
13668 + return ERR_PTR(-ENOMEM);
13669 + }
13670 +
13671 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13672 + qm_sg_index++;
13673 + if (ivsize) {
13674 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13675 + qm_sg_index++;
13676 + }
13677 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13678 + qm_sg_index += mapped_src_nents;
13679 +
13680 + if (mapped_dst_nents > 1)
13681 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13682 + qm_sg_index, 0);
13683 +
13684 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13685 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13686 + dev_err(qidev, "unable to map S/G table\n");
13687 + dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13688 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13689 + iv_dma, ivsize, op_type, 0, 0);
13690 + qi_cache_free(edesc);
13691 + return ERR_PTR(-ENOMEM);
13692 + }
13693 +
13694 + edesc->qm_sg_dma = qm_sg_dma;
13695 + edesc->qm_sg_bytes = qm_sg_bytes;
13696 +
13697 + out_len = req->assoclen + req->cryptlen +
13698 + (encrypt ? ctx->authsize : (-ctx->authsize));
13699 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13700 +
13701 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13702 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13703 +
13704 + if (req->dst == req->src) {
13705 + if (mapped_src_nents == 1)
13706 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13707 + out_len, 0);
13708 + else
13709 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13710 + (1 + !!ivsize) * sizeof(*sg_table),
13711 + out_len, 0);
13712 + } else if (mapped_dst_nents == 1) {
13713 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13714 + 0);
13715 + } else {
13716 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13717 + qm_sg_index, out_len, 0);
13718 + }
13719 +
13720 + return edesc;
13721 +}
13722 +
13723 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13724 +{
13725 + struct aead_edesc *edesc;
13726 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13727 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13728 + int ret;
13729 +
13730 + if (unlikely(caam_congested))
13731 + return -EAGAIN;
13732 +
13733 + /* allocate extended descriptor */
13734 + edesc = aead_edesc_alloc(req, encrypt);
13735 + if (IS_ERR_OR_NULL(edesc))
13736 + return PTR_ERR(edesc);
13737 +
13738 + /* Create and submit job descriptor */
13739 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13740 + if (!ret) {
13741 + ret = -EINPROGRESS;
13742 + } else {
13743 + aead_unmap(ctx->qidev, edesc, req);
13744 + qi_cache_free(edesc);
13745 + }
13746 +
13747 + return ret;
13748 +}
13749 +
13750 +static int aead_encrypt(struct aead_request *req)
13751 +{
13752 + return aead_crypt(req, true);
13753 +}
13754 +
13755 +static int aead_decrypt(struct aead_request *req)
13756 +{
13757 + return aead_crypt(req, false);
13758 +}
13759 +
13760 +static int ipsec_gcm_encrypt(struct aead_request *req)
13761 +{
13762 + if (req->assoclen < 8)
13763 + return -EINVAL;
13764 +
13765 + return aead_crypt(req, true);
13766 +}
13767 +
13768 +static int ipsec_gcm_decrypt(struct aead_request *req)
13769 +{
13770 + if (req->assoclen < 8)
13771 + return -EINVAL;
13772 +
13773 + return aead_crypt(req, false);
13774 +}
13775 +
13776 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13777 +{
13778 + struct device *qidev;
13779 + struct tls_edesc *edesc;
13780 + struct aead_request *aead_req = drv_req->app_ctx;
13781 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13782 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13783 + int ecode = 0;
13784 +
13785 + qidev = caam_ctx->qidev;
13786 +
13787 + if (unlikely(status)) {
13788 + caam_jr_strstatus(qidev, status);
13789 + ecode = -EIO;
13790 + }
13791 +
13792 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13793 + tls_unmap(qidev, edesc, aead_req);
13794 +
13795 + aead_request_complete(aead_req, ecode);
13796 + qi_cache_free(edesc);
13797 +}
13798 +
13799 +/*
13800 + * allocate and map the tls extended descriptor
13801 + */
13802 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13803 +{
13804 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13805 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13806 + unsigned int blocksize = crypto_aead_blocksize(aead);
13807 + unsigned int padsize, authsize;
13808 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13809 + typeof(*alg), aead);
13810 + struct device *qidev = ctx->qidev;
13811 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13812 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13813 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13814 + struct tls_edesc *edesc;
13815 + dma_addr_t qm_sg_dma, iv_dma = 0;
13816 + int ivsize = 0;
13817 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13818 + int in_len, out_len;
13819 + struct qm_sg_entry *sg_table, *fd_sgt;
13820 + struct caam_drv_ctx *drv_ctx;
13821 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13822 + struct scatterlist *dst;
13823 +
13824 + if (encrypt) {
13825 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13826 + blocksize);
13827 + authsize = ctx->authsize + padsize;
13828 + } else {
13829 + authsize = ctx->authsize;
13830 + }
13831 +
13832 + drv_ctx = get_drv_ctx(ctx, op_type);
13833 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13834 + return (struct tls_edesc *)drv_ctx;
13835 +
13836 + /* allocate space for base edesc and hw desc commands, link tables */
13837 + edesc = qi_cache_alloc(GFP_DMA | flags);
13838 + if (unlikely(!edesc)) {
13839 + dev_err(qidev, "could not allocate extended descriptor\n");
13840 + return ERR_PTR(-ENOMEM);
13841 + }
13842 +
13843 + if (likely(req->src == req->dst)) {
13844 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13845 + req->cryptlen +
13846 + (encrypt ? authsize : 0));
13847 + if (unlikely(src_nents < 0)) {
13848 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13849 + req->assoclen + req->cryptlen +
13850 + (encrypt ? authsize : 0));
13851 + qi_cache_free(edesc);
13852 + return ERR_PTR(src_nents);
13853 + }
13854 +
13855 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13856 + DMA_BIDIRECTIONAL);
13857 + if (unlikely(!mapped_src_nents)) {
13858 + dev_err(qidev, "unable to map source\n");
13859 + qi_cache_free(edesc);
13860 + return ERR_PTR(-ENOMEM);
13861 + }
13862 + dst = req->dst;
13863 + } else {
13864 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13865 + req->cryptlen);
13866 + if (unlikely(src_nents < 0)) {
13867 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13868 + req->assoclen + req->cryptlen);
13869 + qi_cache_free(edesc);
13870 + return ERR_PTR(src_nents);
13871 + }
13872 +
13873 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13874 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
13875 + (encrypt ? authsize : 0));
13876 + if (unlikely(dst_nents < 0)) {
13877 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13878 + req->cryptlen +
13879 + (encrypt ? authsize : 0));
13880 + qi_cache_free(edesc);
13881 + return ERR_PTR(dst_nents);
13882 + }
13883 +
13884 + if (src_nents) {
13885 + mapped_src_nents = dma_map_sg(qidev, req->src,
13886 + src_nents, DMA_TO_DEVICE);
13887 + if (unlikely(!mapped_src_nents)) {
13888 + dev_err(qidev, "unable to map source\n");
13889 + qi_cache_free(edesc);
13890 + return ERR_PTR(-ENOMEM);
13891 + }
13892 + } else {
13893 + mapped_src_nents = 0;
13894 + }
13895 +
13896 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13897 + DMA_FROM_DEVICE);
13898 + if (unlikely(!mapped_dst_nents)) {
13899 + dev_err(qidev, "unable to map destination\n");
13900 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13901 + qi_cache_free(edesc);
13902 + return ERR_PTR(-ENOMEM);
13903 + }
13904 + }
13905 +
13906 + ivsize = crypto_aead_ivsize(aead);
13907 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13908 + if (dma_mapping_error(qidev, iv_dma)) {
13909 + dev_err(qidev, "unable to map IV\n");
13910 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13911 + op_type, 0, 0);
13912 + qi_cache_free(edesc);
13913 + return ERR_PTR(-ENOMEM);
13914 + }
13915 +
13916 + /*
13917 + * Create S/G table: IV, src, dst.
13918 + * Input is not contiguous.
13919 + */
13920 + qm_sg_ents = 1 + mapped_src_nents +
13921 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13922 + sg_table = &edesc->sgt[0];
13923 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13924 +
13925 + edesc->src_nents = src_nents;
13926 + edesc->dst_nents = dst_nents;
13927 + edesc->dst = dst;
13928 + edesc->iv_dma = iv_dma;
13929 + edesc->drv_req.app_ctx = req;
13930 + edesc->drv_req.cbk = tls_done;
13931 + edesc->drv_req.drv_ctx = drv_ctx;
13932 +
13933 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13934 + qm_sg_index = 1;
13935 +
13936 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13937 + qm_sg_index += mapped_src_nents;
13938 +
13939 + if (mapped_dst_nents > 1)
13940 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13941 + qm_sg_index, 0);
13942 +
13943 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13944 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13945 + dev_err(qidev, "unable to map S/G table\n");
13946 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13947 + ivsize, op_type, 0, 0);
13948 + qi_cache_free(edesc);
13949 + return ERR_PTR(-ENOMEM);
13950 + }
13951 +
13952 + edesc->qm_sg_dma = qm_sg_dma;
13953 + edesc->qm_sg_bytes = qm_sg_bytes;
13954 +
13955 + out_len = req->cryptlen + (encrypt ? authsize : 0);
13956 + in_len = ivsize + req->assoclen + req->cryptlen;
13957 +
13958 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13959 +
13960 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13961 +
13962 + if (req->dst == req->src)
13963 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13964 + (sg_nents_for_len(req->src, req->assoclen) +
13965 + 1) * sizeof(*sg_table), out_len, 0);
13966 + else if (mapped_dst_nents == 1)
13967 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13968 + else
13969 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13970 + qm_sg_index, out_len, 0);
13971 +
13972 + return edesc;
13973 +}
13974 +
13975 +static int tls_crypt(struct aead_request *req, bool encrypt)
13976 +{
13977 + struct tls_edesc *edesc;
13978 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13979 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13980 + int ret;
13981 +
13982 + if (unlikely(caam_congested))
13983 + return -EAGAIN;
13984 +
13985 + edesc = tls_edesc_alloc(req, encrypt);
13986 + if (IS_ERR_OR_NULL(edesc))
13987 + return PTR_ERR(edesc);
13988 +
13989 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13990 + if (!ret) {
13991 + ret = -EINPROGRESS;
13992 + } else {
13993 + tls_unmap(ctx->qidev, edesc, req);
13994 + qi_cache_free(edesc);
13995 + }
13996 +
13997 + return ret;
13998 +}
13999 +
14000 +static int tls_encrypt(struct aead_request *req)
14001 +{
14002 + return tls_crypt(req, true);
14003 +}
14004 +
14005 +static int tls_decrypt(struct aead_request *req)
14006 +{
14007 + return tls_crypt(req, false);
14008 +}
14009 +
14010 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
14011 +{
14012 + struct ablkcipher_edesc *edesc;
14013 + struct ablkcipher_request *req = drv_req->app_ctx;
14014 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14015 + struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
14016 + struct device *qidev = caam_ctx->qidev;
14017 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
14018 +
14019 +#ifdef DEBUG
14020 + dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
14021 +#endif
14022 +
14023 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
14024 +
14025 + if (status)
14026 + caam_jr_strstatus(qidev, status);
14027 +
14028 +#ifdef DEBUG
14029 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
14030 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
14031 + edesc->src_nents > 1 ? 100 : ivsize, 1);
14032 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
14033 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
14034 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
14035 +#endif
14036 +
14037 + ablkcipher_unmap(qidev, edesc, req);
14038 + qi_cache_free(edesc);
14039 +
14040 + /*
14041 + * The crypto API expects us to set the IV (req->info) to the last
14042 + * ciphertext block. This is used e.g. by the CTS mode.
14043 + */
14044 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
14045 + ivsize, 0);
14046 +
14047 + ablkcipher_request_complete(req, status);
14048 +}
14049 +
14050 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
14051 + *req, bool encrypt)
14052 +{
14053 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14054 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14055 + struct device *qidev = ctx->qidev;
14056 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
14057 + GFP_KERNEL : GFP_ATOMIC;
14058 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
14059 + struct ablkcipher_edesc *edesc;
14060 + dma_addr_t iv_dma;
14061 + bool in_contig;
14062 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
14063 + int dst_sg_idx, qm_sg_ents;
14064 + struct qm_sg_entry *sg_table, *fd_sgt;
14065 + struct caam_drv_ctx *drv_ctx;
14066 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
14067 +
14068 + drv_ctx = get_drv_ctx(ctx, op_type);
14069 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
14070 + return (struct ablkcipher_edesc *)drv_ctx;
14071 +
14072 + src_nents = sg_nents_for_len(req->src, req->nbytes);
14073 + if (unlikely(src_nents < 0)) {
14074 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
14075 + req->nbytes);
14076 + return ERR_PTR(src_nents);
14077 + }
14078 +
14079 + if (unlikely(req->src != req->dst)) {
14080 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
14081 + if (unlikely(dst_nents < 0)) {
14082 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
14083 + req->nbytes);
14084 + return ERR_PTR(dst_nents);
14085 + }
14086 +
14087 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
14088 + DMA_TO_DEVICE);
14089 + if (unlikely(!mapped_src_nents)) {
14090 + dev_err(qidev, "unable to map source\n");
14091 + return ERR_PTR(-ENOMEM);
14092 + }
14093 +
14094 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
14095 + DMA_FROM_DEVICE);
14096 + if (unlikely(!mapped_dst_nents)) {
14097 + dev_err(qidev, "unable to map destination\n");
14098 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
14099 + return ERR_PTR(-ENOMEM);
14100 + }
14101 + } else {
14102 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
14103 + DMA_BIDIRECTIONAL);
14104 + if (unlikely(!mapped_src_nents)) {
14105 + dev_err(qidev, "unable to map source\n");
14106 + return ERR_PTR(-ENOMEM);
14107 + }
14108 + }
14109 +
14110 + iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
14111 + if (dma_mapping_error(qidev, iv_dma)) {
14112 + dev_err(qidev, "unable to map IV\n");
14113 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
14114 + 0, 0, 0, 0);
14115 + return ERR_PTR(-ENOMEM);
14116 + }
14117 +
14118 + if (mapped_src_nents == 1 &&
14119 + iv_dma + ivsize == sg_dma_address(req->src)) {
14120 + in_contig = true;
14121 + qm_sg_ents = 0;
14122 + } else {
14123 + in_contig = false;
14124 + qm_sg_ents = 1 + mapped_src_nents;
14125 + }
14126 + dst_sg_idx = qm_sg_ents;
14127 +
14128 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
14129 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
14130 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
14131 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
14132 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
14133 + iv_dma, ivsize, op_type, 0, 0);
14134 + return ERR_PTR(-ENOMEM);
14135 + }
14136 +
14137 + /* allocate space for base edesc and link tables */
14138 + edesc = qi_cache_alloc(GFP_DMA | flags);
14139 + if (unlikely(!edesc)) {
14140 + dev_err(qidev, "could not allocate extended descriptor\n");
14141 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
14142 + iv_dma, ivsize, op_type, 0, 0);
14143 + return ERR_PTR(-ENOMEM);
14144 + }
14145 +
14146 + edesc->src_nents = src_nents;
14147 + edesc->dst_nents = dst_nents;
14148 + edesc->iv_dma = iv_dma;
14149 + sg_table = &edesc->sgt[0];
14150 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
14151 + edesc->drv_req.app_ctx = req;
14152 + edesc->drv_req.cbk = ablkcipher_done;
14153 + edesc->drv_req.drv_ctx = drv_ctx;
14154 +
14155 + if (!in_contig) {
14156 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
14157 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
14158 + }
14159 +
14160 + if (mapped_dst_nents > 1)
14161 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
14162 + dst_sg_idx, 0);
14163 +
14164 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
14165 + DMA_TO_DEVICE);
14166 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
14167 + dev_err(qidev, "unable to map S/G table\n");
14168 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
14169 + iv_dma, ivsize, op_type, 0, 0);
14170 + qi_cache_free(edesc);
14171 + return ERR_PTR(-ENOMEM);
14172 + }
14173 +
14174 + fd_sgt = &edesc->drv_req.fd_sgt[0];
14175 +
14176 + if (!in_contig)
14177 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
14178 + ivsize + req->nbytes, 0);
14179 + else
14180 + dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
14181 + 0);
14182 +
14183 + if (req->src == req->dst) {
14184 + if (!in_contig)
14185 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
14186 + sizeof(*sg_table), req->nbytes, 0);
14187 + else
14188 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
14189 + req->nbytes, 0);
14190 + } else if (mapped_dst_nents > 1) {
14191 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
14192 + sizeof(*sg_table), req->nbytes, 0);
14193 + } else {
14194 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
14195 + req->nbytes, 0);
14196 + }
14197 +
14198 + return edesc;
14199 +}
14200 +
14201 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
14202 + struct skcipher_givcrypt_request *creq)
14203 +{
14204 + struct ablkcipher_request *req = &creq->creq;
14205 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14206 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14207 + struct device *qidev = ctx->qidev;
14208 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
14209 + GFP_KERNEL : GFP_ATOMIC;
14210 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
14211 + struct ablkcipher_edesc *edesc;
14212 + dma_addr_t iv_dma;
14213 + bool out_contig;
14214 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
14215 + struct qm_sg_entry *sg_table, *fd_sgt;
14216 + int dst_sg_idx, qm_sg_ents;
14217 + struct caam_drv_ctx *drv_ctx;
14218 +
14219 + drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
14220 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
14221 + return (struct ablkcipher_edesc *)drv_ctx;
14222 +
14223 + src_nents = sg_nents_for_len(req->src, req->nbytes);
14224 + if (unlikely(src_nents < 0)) {
14225 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
14226 + req->nbytes);
14227 + return ERR_PTR(src_nents);
14228 + }
14229 +
14230 + if (unlikely(req->src != req->dst)) {
14231 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
14232 + if (unlikely(dst_nents < 0)) {
14233 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
14234 + req->nbytes);
14235 + return ERR_PTR(dst_nents);
14236 + }
14237 +
14238 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
14239 + DMA_TO_DEVICE);
14240 + if (unlikely(!mapped_src_nents)) {
14241 + dev_err(qidev, "unable to map source\n");
14242 + return ERR_PTR(-ENOMEM);
14243 + }
14244 +
14245 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
14246 + DMA_FROM_DEVICE);
14247 + if (unlikely(!mapped_dst_nents)) {
14248 + dev_err(qidev, "unable to map destination\n");
14249 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
14250 + return ERR_PTR(-ENOMEM);
14251 + }
14252 + } else {
14253 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
14254 + DMA_BIDIRECTIONAL);
14255 + if (unlikely(!mapped_src_nents)) {
14256 + dev_err(qidev, "unable to map source\n");
14257 + return ERR_PTR(-ENOMEM);
14258 + }
14259 +
14260 + dst_nents = src_nents;
14261 + mapped_dst_nents = src_nents;
14262 + }
14263 +
14264 + iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
14265 + if (dma_mapping_error(qidev, iv_dma)) {
14266 + dev_err(qidev, "unable to map IV\n");
14267 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
14268 + 0, 0, 0, 0);
14269 + return ERR_PTR(-ENOMEM);
14270 + }
14271 +
14272 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
14273 + dst_sg_idx = qm_sg_ents;
14274 + if (mapped_dst_nents == 1 &&
14275 + iv_dma + ivsize == sg_dma_address(req->dst)) {
14276 + out_contig = true;
14277 + } else {
14278 + out_contig = false;
14279 + qm_sg_ents += 1 + mapped_dst_nents;
14280 + }
14281 +
14282 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
14283 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
14284 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
14285 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
14286 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
14287 + return ERR_PTR(-ENOMEM);
14288 + }
14289 +
14290 + /* allocate space for base edesc and link tables */
14291 + edesc = qi_cache_alloc(GFP_DMA | flags);
14292 + if (!edesc) {
14293 + dev_err(qidev, "could not allocate extended descriptor\n");
14294 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
14295 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
14296 + return ERR_PTR(-ENOMEM);
14297 + }
14298 +
14299 + edesc->src_nents = src_nents;
14300 + edesc->dst_nents = dst_nents;
14301 + edesc->iv_dma = iv_dma;
14302 + sg_table = &edesc->sgt[0];
14303 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
14304 + edesc->drv_req.app_ctx = req;
14305 + edesc->drv_req.cbk = ablkcipher_done;
14306 + edesc->drv_req.drv_ctx = drv_ctx;
14307 +
14308 + if (mapped_src_nents > 1)
14309 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
14310 +
14311 + if (!out_contig) {
14312 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
14313 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
14314 + dst_sg_idx + 1, 0);
14315 + }
14316 +
14317 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
14318 + DMA_TO_DEVICE);
14319 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
14320 + dev_err(qidev, "unable to map S/G table\n");
14321 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
14322 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
14323 + qi_cache_free(edesc);
14324 + return ERR_PTR(-ENOMEM);
14325 + }
14326 +
14327 + fd_sgt = &edesc->drv_req.fd_sgt[0];
14328 +
14329 + if (mapped_src_nents > 1)
14330 + dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
14331 + 0);
14332 + else
14333 + dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
14334 + req->nbytes, 0);
14335 +
14336 + if (!out_contig)
14337 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
14338 + sizeof(*sg_table), ivsize + req->nbytes,
14339 + 0);
14340 + else
14341 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
14342 + ivsize + req->nbytes, 0);
14343 +
14344 + return edesc;
14345 +}
14346 +
14347 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
14348 +{
14349 + struct ablkcipher_edesc *edesc;
14350 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14351 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14352 + int ret;
14353 +
14354 + if (unlikely(caam_congested))
14355 + return -EAGAIN;
14356 +
14357 + /* allocate extended descriptor */
14358 + edesc = ablkcipher_edesc_alloc(req, encrypt);
14359 + if (IS_ERR(edesc))
14360 + return PTR_ERR(edesc);
14361 +
14362 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
14363 + if (!ret) {
14364 + ret = -EINPROGRESS;
14365 + } else {
14366 + ablkcipher_unmap(ctx->qidev, edesc, req);
14367 + qi_cache_free(edesc);
14368 + }
14369 +
14370 + return ret;
14371 +}
14372 +
14373 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
14374 +{
14375 + return ablkcipher_crypt(req, true);
14376 +}
14377 +
14378 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
14379 +{
14380 + return ablkcipher_crypt(req, false);
14381 +}
14382 +
14383 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
14384 +{
14385 + struct ablkcipher_request *req = &creq->creq;
14386 + struct ablkcipher_edesc *edesc;
14387 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14388 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14389 + int ret;
14390 +
14391 + if (unlikely(caam_congested))
14392 + return -EAGAIN;
14393 +
14394 + /* allocate extended descriptor */
14395 + edesc = ablkcipher_giv_edesc_alloc(creq);
14396 + if (IS_ERR(edesc))
14397 + return PTR_ERR(edesc);
14398 +
14399 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
14400 + if (!ret) {
14401 + ret = -EINPROGRESS;
14402 + } else {
14403 + ablkcipher_unmap(ctx->qidev, edesc, req);
14404 + qi_cache_free(edesc);
14405 + }
14406 +
14407 + return ret;
14408 +}
14409 +
14410 +#define template_ablkcipher template_u.ablkcipher
14411 +struct caam_alg_template {
14412 + char name[CRYPTO_MAX_ALG_NAME];
14413 + char driver_name[CRYPTO_MAX_ALG_NAME];
14414 + unsigned int blocksize;
14415 + u32 type;
14416 + union {
14417 + struct ablkcipher_alg ablkcipher;
14418 + } template_u;
14419 + u32 class1_alg_type;
14420 + u32 class2_alg_type;
14421 +};
14422 +
14423 +static struct caam_alg_template driver_algs[] = {
14424 + /* ablkcipher descriptor */
14425 + {
14426 + .name = "cbc(aes)",
14427 + .driver_name = "cbc-aes-caam-qi",
14428 + .blocksize = AES_BLOCK_SIZE,
14429 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14430 + .template_ablkcipher = {
14431 + .setkey = ablkcipher_setkey,
14432 + .encrypt = ablkcipher_encrypt,
14433 + .decrypt = ablkcipher_decrypt,
14434 + .givencrypt = ablkcipher_givencrypt,
14435 + .geniv = "<built-in>",
14436 + .min_keysize = AES_MIN_KEY_SIZE,
14437 + .max_keysize = AES_MAX_KEY_SIZE,
14438 + .ivsize = AES_BLOCK_SIZE,
14439 + },
14440 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14441 + },
14442 + {
14443 + .name = "cbc(des3_ede)",
14444 + .driver_name = "cbc-3des-caam-qi",
14445 + .blocksize = DES3_EDE_BLOCK_SIZE,
14446 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14447 + .template_ablkcipher = {
14448 + .setkey = ablkcipher_setkey,
14449 + .encrypt = ablkcipher_encrypt,
14450 + .decrypt = ablkcipher_decrypt,
14451 + .givencrypt = ablkcipher_givencrypt,
14452 + .geniv = "<built-in>",
14453 + .min_keysize = DES3_EDE_KEY_SIZE,
14454 + .max_keysize = DES3_EDE_KEY_SIZE,
14455 + .ivsize = DES3_EDE_BLOCK_SIZE,
14456 + },
14457 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14458 + },
14459 + {
14460 + .name = "cbc(des)",
14461 + .driver_name = "cbc-des-caam-qi",
14462 + .blocksize = DES_BLOCK_SIZE,
14463 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14464 + .template_ablkcipher = {
14465 + .setkey = ablkcipher_setkey,
14466 + .encrypt = ablkcipher_encrypt,
14467 + .decrypt = ablkcipher_decrypt,
14468 + .givencrypt = ablkcipher_givencrypt,
14469 + .geniv = "<built-in>",
14470 + .min_keysize = DES_KEY_SIZE,
14471 + .max_keysize = DES_KEY_SIZE,
14472 + .ivsize = DES_BLOCK_SIZE,
14473 + },
14474 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14475 + },
14476 + {
14477 + .name = "ctr(aes)",
14478 + .driver_name = "ctr-aes-caam-qi",
14479 + .blocksize = 1,
14480 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14481 + .template_ablkcipher = {
14482 + .setkey = ablkcipher_setkey,
14483 + .encrypt = ablkcipher_encrypt,
14484 + .decrypt = ablkcipher_decrypt,
14485 + .geniv = "chainiv",
14486 + .min_keysize = AES_MIN_KEY_SIZE,
14487 + .max_keysize = AES_MAX_KEY_SIZE,
14488 + .ivsize = AES_BLOCK_SIZE,
14489 + },
14490 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
14491 + },
14492 + {
14493 + .name = "rfc3686(ctr(aes))",
14494 + .driver_name = "rfc3686-ctr-aes-caam-qi",
14495 + .blocksize = 1,
14496 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14497 + .template_ablkcipher = {
14498 + .setkey = ablkcipher_setkey,
14499 + .encrypt = ablkcipher_encrypt,
14500 + .decrypt = ablkcipher_decrypt,
14501 + .givencrypt = ablkcipher_givencrypt,
14502 + .geniv = "<built-in>",
14503 + .min_keysize = AES_MIN_KEY_SIZE +
14504 + CTR_RFC3686_NONCE_SIZE,
14505 + .max_keysize = AES_MAX_KEY_SIZE +
14506 + CTR_RFC3686_NONCE_SIZE,
14507 + .ivsize = CTR_RFC3686_IV_SIZE,
14508 + },
14509 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
14510 + },
14511 + {
14512 + .name = "xts(aes)",
14513 + .driver_name = "xts-aes-caam-qi",
14514 + .blocksize = AES_BLOCK_SIZE,
14515 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14516 + .template_ablkcipher = {
14517 + .setkey = xts_ablkcipher_setkey,
14518 + .encrypt = ablkcipher_encrypt,
14519 + .decrypt = ablkcipher_decrypt,
14520 + .geniv = "eseqiv",
14521 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
14522 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
14523 + .ivsize = AES_BLOCK_SIZE,
14524 + },
14525 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
14526 + },
14527 +};
14528 +
14529 +static struct caam_aead_alg driver_aeads[] = {
14530 + {
14531 + .aead = {
14532 + .base = {
14533 + .cra_name = "rfc4106(gcm(aes))",
14534 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
14535 + .cra_blocksize = 1,
14536 + },
14537 + .setkey = rfc4106_setkey,
14538 + .setauthsize = rfc4106_setauthsize,
14539 + .encrypt = ipsec_gcm_encrypt,
14540 + .decrypt = ipsec_gcm_decrypt,
14541 + .ivsize = 8,
14542 + .maxauthsize = AES_BLOCK_SIZE,
14543 + },
14544 + .caam = {
14545 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
14546 + },
14547 + },
14548 + {
14549 + .aead = {
14550 + .base = {
14551 + .cra_name = "rfc4543(gcm(aes))",
14552 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
14553 + .cra_blocksize = 1,
14554 + },
14555 + .setkey = rfc4543_setkey,
14556 + .setauthsize = rfc4543_setauthsize,
14557 + .encrypt = ipsec_gcm_encrypt,
14558 + .decrypt = ipsec_gcm_decrypt,
14559 + .ivsize = 8,
14560 + .maxauthsize = AES_BLOCK_SIZE,
14561 + },
14562 + .caam = {
14563 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
14564 + },
14565 + },
14566 + /* Galois Counter Mode */
14567 + {
14568 + .aead = {
14569 + .base = {
14570 + .cra_name = "gcm(aes)",
14571 + .cra_driver_name = "gcm-aes-caam-qi",
14572 + .cra_blocksize = 1,
14573 + },
14574 + .setkey = gcm_setkey,
14575 + .setauthsize = gcm_setauthsize,
14576 + .encrypt = aead_encrypt,
14577 + .decrypt = aead_decrypt,
14578 + .ivsize = 12,
14579 + .maxauthsize = AES_BLOCK_SIZE,
14580 + },
14581 + .caam = {
14582 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
14583 + }
14584 + },
14585 + /* single-pass ipsec_esp descriptor */
14586 + {
14587 + .aead = {
14588 + .base = {
14589 + .cra_name = "authenc(hmac(md5),cbc(aes))",
14590 + .cra_driver_name = "authenc-hmac-md5-"
14591 + "cbc-aes-caam-qi",
14592 + .cra_blocksize = AES_BLOCK_SIZE,
14593 + },
14594 + .setkey = aead_setkey,
14595 + .setauthsize = aead_setauthsize,
14596 + .encrypt = aead_encrypt,
14597 + .decrypt = aead_decrypt,
14598 + .ivsize = AES_BLOCK_SIZE,
14599 + .maxauthsize = MD5_DIGEST_SIZE,
14600 + },
14601 + .caam = {
14602 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14603 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14604 + OP_ALG_AAI_HMAC_PRECOMP,
14605 + }
14606 + },
14607 + {
14608 + .aead = {
14609 + .base = {
14610 + .cra_name = "echainiv(authenc(hmac(md5),"
14611 + "cbc(aes)))",
14612 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14613 + "cbc-aes-caam-qi",
14614 + .cra_blocksize = AES_BLOCK_SIZE,
14615 + },
14616 + .setkey = aead_setkey,
14617 + .setauthsize = aead_setauthsize,
14618 + .encrypt = aead_encrypt,
14619 + .decrypt = aead_decrypt,
14620 + .ivsize = AES_BLOCK_SIZE,
14621 + .maxauthsize = MD5_DIGEST_SIZE,
14622 + },
14623 + .caam = {
14624 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14625 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14626 + OP_ALG_AAI_HMAC_PRECOMP,
14627 + .geniv = true,
14628 + }
14629 + },
14630 + {
14631 + .aead = {
14632 + .base = {
14633 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
14634 + .cra_driver_name = "authenc-hmac-sha1-"
14635 + "cbc-aes-caam-qi",
14636 + .cra_blocksize = AES_BLOCK_SIZE,
14637 + },
14638 + .setkey = aead_setkey,
14639 + .setauthsize = aead_setauthsize,
14640 + .encrypt = aead_encrypt,
14641 + .decrypt = aead_decrypt,
14642 + .ivsize = AES_BLOCK_SIZE,
14643 + .maxauthsize = SHA1_DIGEST_SIZE,
14644 + },
14645 + .caam = {
14646 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14647 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14648 + OP_ALG_AAI_HMAC_PRECOMP,
14649 + }
14650 + },
14651 + {
14652 + .aead = {
14653 + .base = {
14654 + .cra_name = "echainiv(authenc(hmac(sha1),"
14655 + "cbc(aes)))",
14656 + .cra_driver_name = "echainiv-authenc-"
14657 + "hmac-sha1-cbc-aes-caam-qi",
14658 + .cra_blocksize = AES_BLOCK_SIZE,
14659 + },
14660 + .setkey = aead_setkey,
14661 + .setauthsize = aead_setauthsize,
14662 + .encrypt = aead_encrypt,
14663 + .decrypt = aead_decrypt,
14664 + .ivsize = AES_BLOCK_SIZE,
14665 + .maxauthsize = SHA1_DIGEST_SIZE,
14666 + },
14667 + .caam = {
14668 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14669 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14670 + OP_ALG_AAI_HMAC_PRECOMP,
14671 + .geniv = true,
14672 + },
14673 + },
14674 + {
14675 + .aead = {
14676 + .base = {
14677 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
14678 + .cra_driver_name = "authenc-hmac-sha224-"
14679 + "cbc-aes-caam-qi",
14680 + .cra_blocksize = AES_BLOCK_SIZE,
14681 + },
14682 + .setkey = aead_setkey,
14683 + .setauthsize = aead_setauthsize,
14684 + .encrypt = aead_encrypt,
14685 + .decrypt = aead_decrypt,
14686 + .ivsize = AES_BLOCK_SIZE,
14687 + .maxauthsize = SHA224_DIGEST_SIZE,
14688 + },
14689 + .caam = {
14690 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14691 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14692 + OP_ALG_AAI_HMAC_PRECOMP,
14693 + }
14694 + },
14695 + {
14696 + .aead = {
14697 + .base = {
14698 + .cra_name = "echainiv(authenc(hmac(sha224),"
14699 + "cbc(aes)))",
14700 + .cra_driver_name = "echainiv-authenc-"
14701 + "hmac-sha224-cbc-aes-caam-qi",
14702 + .cra_blocksize = AES_BLOCK_SIZE,
14703 + },
14704 + .setkey = aead_setkey,
14705 + .setauthsize = aead_setauthsize,
14706 + .encrypt = aead_encrypt,
14707 + .decrypt = aead_decrypt,
14708 + .ivsize = AES_BLOCK_SIZE,
14709 + .maxauthsize = SHA224_DIGEST_SIZE,
14710 + },
14711 + .caam = {
14712 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14713 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14714 + OP_ALG_AAI_HMAC_PRECOMP,
14715 + .geniv = true,
14716 + }
14717 + },
14718 + {
14719 + .aead = {
14720 + .base = {
14721 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
14722 + .cra_driver_name = "authenc-hmac-sha256-"
14723 + "cbc-aes-caam-qi",
14724 + .cra_blocksize = AES_BLOCK_SIZE,
14725 + },
14726 + .setkey = aead_setkey,
14727 + .setauthsize = aead_setauthsize,
14728 + .encrypt = aead_encrypt,
14729 + .decrypt = aead_decrypt,
14730 + .ivsize = AES_BLOCK_SIZE,
14731 + .maxauthsize = SHA256_DIGEST_SIZE,
14732 + },
14733 + .caam = {
14734 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14735 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14736 + OP_ALG_AAI_HMAC_PRECOMP,
14737 + }
14738 + },
14739 + {
14740 + .aead = {
14741 + .base = {
14742 + .cra_name = "echainiv(authenc(hmac(sha256),"
14743 + "cbc(aes)))",
14744 + .cra_driver_name = "echainiv-authenc-"
14745 + "hmac-sha256-cbc-aes-"
14746 + "caam-qi",
14747 + .cra_blocksize = AES_BLOCK_SIZE,
14748 + },
14749 + .setkey = aead_setkey,
14750 + .setauthsize = aead_setauthsize,
14751 + .encrypt = aead_encrypt,
14752 + .decrypt = aead_decrypt,
14753 + .ivsize = AES_BLOCK_SIZE,
14754 + .maxauthsize = SHA256_DIGEST_SIZE,
14755 + },
14756 + .caam = {
14757 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14758 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14759 + OP_ALG_AAI_HMAC_PRECOMP,
14760 + .geniv = true,
14761 + }
14762 + },
14763 + {
14764 + .aead = {
14765 + .base = {
14766 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
14767 + .cra_driver_name = "authenc-hmac-sha384-"
14768 + "cbc-aes-caam-qi",
14769 + .cra_blocksize = AES_BLOCK_SIZE,
14770 + },
14771 + .setkey = aead_setkey,
14772 + .setauthsize = aead_setauthsize,
14773 + .encrypt = aead_encrypt,
14774 + .decrypt = aead_decrypt,
14775 + .ivsize = AES_BLOCK_SIZE,
14776 + .maxauthsize = SHA384_DIGEST_SIZE,
14777 + },
14778 + .caam = {
14779 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14780 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14781 + OP_ALG_AAI_HMAC_PRECOMP,
14782 + }
14783 + },
14784 + {
14785 + .aead = {
14786 + .base = {
14787 + .cra_name = "echainiv(authenc(hmac(sha384),"
14788 + "cbc(aes)))",
14789 + .cra_driver_name = "echainiv-authenc-"
14790 + "hmac-sha384-cbc-aes-"
14791 + "caam-qi",
14792 + .cra_blocksize = AES_BLOCK_SIZE,
14793 + },
14794 + .setkey = aead_setkey,
14795 + .setauthsize = aead_setauthsize,
14796 + .encrypt = aead_encrypt,
14797 + .decrypt = aead_decrypt,
14798 + .ivsize = AES_BLOCK_SIZE,
14799 + .maxauthsize = SHA384_DIGEST_SIZE,
14800 + },
14801 + .caam = {
14802 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14803 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14804 + OP_ALG_AAI_HMAC_PRECOMP,
14805 + .geniv = true,
14806 + }
14807 + },
14808 + {
14809 + .aead = {
14810 + .base = {
14811 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
14812 + .cra_driver_name = "authenc-hmac-sha512-"
14813 + "cbc-aes-caam-qi",
14814 + .cra_blocksize = AES_BLOCK_SIZE,
14815 + },
14816 + .setkey = aead_setkey,
14817 + .setauthsize = aead_setauthsize,
14818 + .encrypt = aead_encrypt,
14819 + .decrypt = aead_decrypt,
14820 + .ivsize = AES_BLOCK_SIZE,
14821 + .maxauthsize = SHA512_DIGEST_SIZE,
14822 + },
14823 + .caam = {
14824 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14825 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14826 + OP_ALG_AAI_HMAC_PRECOMP,
14827 + }
14828 + },
14829 + {
14830 + .aead = {
14831 + .base = {
14832 + .cra_name = "echainiv(authenc(hmac(sha512),"
14833 + "cbc(aes)))",
14834 + .cra_driver_name = "echainiv-authenc-"
14835 + "hmac-sha512-cbc-aes-"
14836 + "caam-qi",
14837 + .cra_blocksize = AES_BLOCK_SIZE,
14838 + },
14839 + .setkey = aead_setkey,
14840 + .setauthsize = aead_setauthsize,
14841 + .encrypt = aead_encrypt,
14842 + .decrypt = aead_decrypt,
14843 + .ivsize = AES_BLOCK_SIZE,
14844 + .maxauthsize = SHA512_DIGEST_SIZE,
14845 + },
14846 + .caam = {
14847 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14848 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14849 + OP_ALG_AAI_HMAC_PRECOMP,
14850 + .geniv = true,
14851 + }
14852 + },
14853 + {
14854 + .aead = {
14855 + .base = {
14856 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14857 + .cra_driver_name = "authenc-hmac-md5-"
14858 + "cbc-des3_ede-caam-qi",
14859 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14860 + },
14861 + .setkey = aead_setkey,
14862 + .setauthsize = aead_setauthsize,
14863 + .encrypt = aead_encrypt,
14864 + .decrypt = aead_decrypt,
14865 + .ivsize = DES3_EDE_BLOCK_SIZE,
14866 + .maxauthsize = MD5_DIGEST_SIZE,
14867 + },
14868 + .caam = {
14869 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14870 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14871 + OP_ALG_AAI_HMAC_PRECOMP,
14872 + }
14873 + },
14874 + {
14875 + .aead = {
14876 + .base = {
14877 + .cra_name = "echainiv(authenc(hmac(md5),"
14878 + "cbc(des3_ede)))",
14879 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14880 + "cbc-des3_ede-caam-qi",
14881 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14882 + },
14883 + .setkey = aead_setkey,
14884 + .setauthsize = aead_setauthsize,
14885 + .encrypt = aead_encrypt,
14886 + .decrypt = aead_decrypt,
14887 + .ivsize = DES3_EDE_BLOCK_SIZE,
14888 + .maxauthsize = MD5_DIGEST_SIZE,
14889 + },
14890 + .caam = {
14891 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14892 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14893 + OP_ALG_AAI_HMAC_PRECOMP,
14894 + .geniv = true,
14895 + }
14896 + },
14897 + {
14898 + .aead = {
14899 + .base = {
14900 + .cra_name = "authenc(hmac(sha1),"
14901 + "cbc(des3_ede))",
14902 + .cra_driver_name = "authenc-hmac-sha1-"
14903 + "cbc-des3_ede-caam-qi",
14904 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14905 + },
14906 + .setkey = aead_setkey,
14907 + .setauthsize = aead_setauthsize,
14908 + .encrypt = aead_encrypt,
14909 + .decrypt = aead_decrypt,
14910 + .ivsize = DES3_EDE_BLOCK_SIZE,
14911 + .maxauthsize = SHA1_DIGEST_SIZE,
14912 + },
14913 + .caam = {
14914 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14915 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14916 + OP_ALG_AAI_HMAC_PRECOMP,
14917 + },
14918 + },
14919 + {
14920 + .aead = {
14921 + .base = {
14922 + .cra_name = "echainiv(authenc(hmac(sha1),"
14923 + "cbc(des3_ede)))",
14924 + .cra_driver_name = "echainiv-authenc-"
14925 + "hmac-sha1-"
14926 + "cbc-des3_ede-caam-qi",
14927 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14928 + },
14929 + .setkey = aead_setkey,
14930 + .setauthsize = aead_setauthsize,
14931 + .encrypt = aead_encrypt,
14932 + .decrypt = aead_decrypt,
14933 + .ivsize = DES3_EDE_BLOCK_SIZE,
14934 + .maxauthsize = SHA1_DIGEST_SIZE,
14935 + },
14936 + .caam = {
14937 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14938 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14939 + OP_ALG_AAI_HMAC_PRECOMP,
14940 + .geniv = true,
14941 + }
14942 + },
14943 + {
14944 + .aead = {
14945 + .base = {
14946 + .cra_name = "authenc(hmac(sha224),"
14947 + "cbc(des3_ede))",
14948 + .cra_driver_name = "authenc-hmac-sha224-"
14949 + "cbc-des3_ede-caam-qi",
14950 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14951 + },
14952 + .setkey = aead_setkey,
14953 + .setauthsize = aead_setauthsize,
14954 + .encrypt = aead_encrypt,
14955 + .decrypt = aead_decrypt,
14956 + .ivsize = DES3_EDE_BLOCK_SIZE,
14957 + .maxauthsize = SHA224_DIGEST_SIZE,
14958 + },
14959 + .caam = {
14960 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14961 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14962 + OP_ALG_AAI_HMAC_PRECOMP,
14963 + },
14964 + },
14965 + {
14966 + .aead = {
14967 + .base = {
14968 + .cra_name = "echainiv(authenc(hmac(sha224),"
14969 + "cbc(des3_ede)))",
14970 + .cra_driver_name = "echainiv-authenc-"
14971 + "hmac-sha224-"
14972 + "cbc-des3_ede-caam-qi",
14973 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14974 + },
14975 + .setkey = aead_setkey,
14976 + .setauthsize = aead_setauthsize,
14977 + .encrypt = aead_encrypt,
14978 + .decrypt = aead_decrypt,
14979 + .ivsize = DES3_EDE_BLOCK_SIZE,
14980 + .maxauthsize = SHA224_DIGEST_SIZE,
14981 + },
14982 + .caam = {
14983 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14984 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14985 + OP_ALG_AAI_HMAC_PRECOMP,
14986 + .geniv = true,
14987 + }
14988 + },
14989 + {
14990 + .aead = {
14991 + .base = {
14992 + .cra_name = "authenc(hmac(sha256),"
14993 + "cbc(des3_ede))",
14994 + .cra_driver_name = "authenc-hmac-sha256-"
14995 + "cbc-des3_ede-caam-qi",
14996 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14997 + },
14998 + .setkey = aead_setkey,
14999 + .setauthsize = aead_setauthsize,
15000 + .encrypt = aead_encrypt,
15001 + .decrypt = aead_decrypt,
15002 + .ivsize = DES3_EDE_BLOCK_SIZE,
15003 + .maxauthsize = SHA256_DIGEST_SIZE,
15004 + },
15005 + .caam = {
15006 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
15007 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
15008 + OP_ALG_AAI_HMAC_PRECOMP,
15009 + },
15010 + },
15011 + {
15012 + .aead = {
15013 + .base = {
15014 + .cra_name = "echainiv(authenc(hmac(sha256),"
15015 + "cbc(des3_ede)))",
15016 + .cra_driver_name = "echainiv-authenc-"
15017 + "hmac-sha256-"
15018 + "cbc-des3_ede-caam-qi",
15019 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
15020 + },
15021 + .setkey = aead_setkey,
15022 + .setauthsize = aead_setauthsize,
15023 + .encrypt = aead_encrypt,
15024 + .decrypt = aead_decrypt,
15025 + .ivsize = DES3_EDE_BLOCK_SIZE,
15026 + .maxauthsize = SHA256_DIGEST_SIZE,
15027 + },
15028 + .caam = {
15029 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
15030 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
15031 + OP_ALG_AAI_HMAC_PRECOMP,
15032 + .geniv = true,
15033 + }
15034 + },
15035 + {
15036 + .aead = {
15037 + .base = {
15038 + .cra_name = "authenc(hmac(sha384),"
15039 + "cbc(des3_ede))",
15040 + .cra_driver_name = "authenc-hmac-sha384-"
15041 + "cbc-des3_ede-caam-qi",
15042 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
15043 + },
15044 + .setkey = aead_setkey,
15045 + .setauthsize = aead_setauthsize,
15046 + .encrypt = aead_encrypt,
15047 + .decrypt = aead_decrypt,
15048 + .ivsize = DES3_EDE_BLOCK_SIZE,
15049 + .maxauthsize = SHA384_DIGEST_SIZE,
15050 + },
15051 + .caam = {
15052 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
15053 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
15054 + OP_ALG_AAI_HMAC_PRECOMP,
15055 + },
15056 + },
15057 + {
15058 + .aead = {
15059 + .base = {
15060 + .cra_name = "echainiv(authenc(hmac(sha384),"
15061 + "cbc(des3_ede)))",
15062 + .cra_driver_name = "echainiv-authenc-"
15063 + "hmac-sha384-"
15064 + "cbc-des3_ede-caam-qi",
15065 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
15066 + },
15067 + .setkey = aead_setkey,
15068 + .setauthsize = aead_setauthsize,
15069 + .encrypt = aead_encrypt,
15070 + .decrypt = aead_decrypt,
15071 + .ivsize = DES3_EDE_BLOCK_SIZE,
15072 + .maxauthsize = SHA384_DIGEST_SIZE,
15073 + },
15074 + .caam = {
15075 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
15076 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
15077 + OP_ALG_AAI_HMAC_PRECOMP,
15078 + .geniv = true,
15079 + }
15080 + },
15081 + {
15082 + .aead = {
15083 + .base = {
15084 + .cra_name = "authenc(hmac(sha512),"
15085 + "cbc(des3_ede))",
15086 + .cra_driver_name = "authenc-hmac-sha512-"
15087 + "cbc-des3_ede-caam-qi",
15088 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
15089 + },
15090 + .setkey = aead_setkey,
15091 + .setauthsize = aead_setauthsize,
15092 + .encrypt = aead_encrypt,
15093 + .decrypt = aead_decrypt,
15094 + .ivsize = DES3_EDE_BLOCK_SIZE,
15095 + .maxauthsize = SHA512_DIGEST_SIZE,
15096 + },
15097 + .caam = {
15098 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
15099 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
15100 + OP_ALG_AAI_HMAC_PRECOMP,
15101 + },
15102 + },
15103 + {
15104 + .aead = {
15105 + .base = {
15106 + .cra_name = "echainiv(authenc(hmac(sha512),"
15107 + "cbc(des3_ede)))",
15108 + .cra_driver_name = "echainiv-authenc-"
15109 + "hmac-sha512-"
15110 + "cbc-des3_ede-caam-qi",
15111 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
15112 + },
15113 + .setkey = aead_setkey,
15114 + .setauthsize = aead_setauthsize,
15115 + .encrypt = aead_encrypt,
15116 + .decrypt = aead_decrypt,
15117 + .ivsize = DES3_EDE_BLOCK_SIZE,
15118 + .maxauthsize = SHA512_DIGEST_SIZE,
15119 + },
15120 + .caam = {
15121 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
15122 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
15123 + OP_ALG_AAI_HMAC_PRECOMP,
15124 + .geniv = true,
15125 + }
15126 + },
15127 + {
15128 + .aead = {
15129 + .base = {
15130 + .cra_name = "authenc(hmac(md5),cbc(des))",
15131 + .cra_driver_name = "authenc-hmac-md5-"
15132 + "cbc-des-caam-qi",
15133 + .cra_blocksize = DES_BLOCK_SIZE,
15134 + },
15135 + .setkey = aead_setkey,
15136 + .setauthsize = aead_setauthsize,
15137 + .encrypt = aead_encrypt,
15138 + .decrypt = aead_decrypt,
15139 + .ivsize = DES_BLOCK_SIZE,
15140 + .maxauthsize = MD5_DIGEST_SIZE,
15141 + },
15142 + .caam = {
15143 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15144 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
15145 + OP_ALG_AAI_HMAC_PRECOMP,
15146 + },
15147 + },
15148 + {
15149 + .aead = {
15150 + .base = {
15151 + .cra_name = "echainiv(authenc(hmac(md5),"
15152 + "cbc(des)))",
15153 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
15154 + "cbc-des-caam-qi",
15155 + .cra_blocksize = DES_BLOCK_SIZE,
15156 + },
15157 + .setkey = aead_setkey,
15158 + .setauthsize = aead_setauthsize,
15159 + .encrypt = aead_encrypt,
15160 + .decrypt = aead_decrypt,
15161 + .ivsize = DES_BLOCK_SIZE,
15162 + .maxauthsize = MD5_DIGEST_SIZE,
15163 + },
15164 + .caam = {
15165 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15166 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
15167 + OP_ALG_AAI_HMAC_PRECOMP,
15168 + .geniv = true,
15169 + }
15170 + },
15171 + {
15172 + .aead = {
15173 + .base = {
15174 + .cra_name = "authenc(hmac(sha1),cbc(des))",
15175 + .cra_driver_name = "authenc-hmac-sha1-"
15176 + "cbc-des-caam-qi",
15177 + .cra_blocksize = DES_BLOCK_SIZE,
15178 + },
15179 + .setkey = aead_setkey,
15180 + .setauthsize = aead_setauthsize,
15181 + .encrypt = aead_encrypt,
15182 + .decrypt = aead_decrypt,
15183 + .ivsize = DES_BLOCK_SIZE,
15184 + .maxauthsize = SHA1_DIGEST_SIZE,
15185 + },
15186 + .caam = {
15187 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15188 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
15189 + OP_ALG_AAI_HMAC_PRECOMP,
15190 + },
15191 + },
15192 + {
15193 + .aead = {
15194 + .base = {
15195 + .cra_name = "echainiv(authenc(hmac(sha1),"
15196 + "cbc(des)))",
15197 + .cra_driver_name = "echainiv-authenc-"
15198 + "hmac-sha1-cbc-des-caam-qi",
15199 + .cra_blocksize = DES_BLOCK_SIZE,
15200 + },
15201 + .setkey = aead_setkey,
15202 + .setauthsize = aead_setauthsize,
15203 + .encrypt = aead_encrypt,
15204 + .decrypt = aead_decrypt,
15205 + .ivsize = DES_BLOCK_SIZE,
15206 + .maxauthsize = SHA1_DIGEST_SIZE,
15207 + },
15208 + .caam = {
15209 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15210 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
15211 + OP_ALG_AAI_HMAC_PRECOMP,
15212 + .geniv = true,
15213 + }
15214 + },
15215 + {
15216 + .aead = {
15217 + .base = {
15218 + .cra_name = "authenc(hmac(sha224),cbc(des))",
15219 + .cra_driver_name = "authenc-hmac-sha224-"
15220 + "cbc-des-caam-qi",
15221 + .cra_blocksize = DES_BLOCK_SIZE,
15222 + },
15223 + .setkey = aead_setkey,
15224 + .setauthsize = aead_setauthsize,
15225 + .encrypt = aead_encrypt,
15226 + .decrypt = aead_decrypt,
15227 + .ivsize = DES_BLOCK_SIZE,
15228 + .maxauthsize = SHA224_DIGEST_SIZE,
15229 + },
15230 + .caam = {
15231 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15232 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
15233 + OP_ALG_AAI_HMAC_PRECOMP,
15234 + },
15235 + },
15236 + {
15237 + .aead = {
15238 + .base = {
15239 + .cra_name = "echainiv(authenc(hmac(sha224),"
15240 + "cbc(des)))",
15241 + .cra_driver_name = "echainiv-authenc-"
15242 + "hmac-sha224-cbc-des-"
15243 + "caam-qi",
15244 + .cra_blocksize = DES_BLOCK_SIZE,
15245 + },
15246 + .setkey = aead_setkey,
15247 + .setauthsize = aead_setauthsize,
15248 + .encrypt = aead_encrypt,
15249 + .decrypt = aead_decrypt,
15250 + .ivsize = DES_BLOCK_SIZE,
15251 + .maxauthsize = SHA224_DIGEST_SIZE,
15252 + },
15253 + .caam = {
15254 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15255 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
15256 + OP_ALG_AAI_HMAC_PRECOMP,
15257 + .geniv = true,
15258 + }
15259 + },
15260 + {
15261 + .aead = {
15262 + .base = {
15263 + .cra_name = "authenc(hmac(sha256),cbc(des))",
15264 + .cra_driver_name = "authenc-hmac-sha256-"
15265 + "cbc-des-caam-qi",
15266 + .cra_blocksize = DES_BLOCK_SIZE,
15267 + },
15268 + .setkey = aead_setkey,
15269 + .setauthsize = aead_setauthsize,
15270 + .encrypt = aead_encrypt,
15271 + .decrypt = aead_decrypt,
15272 + .ivsize = DES_BLOCK_SIZE,
15273 + .maxauthsize = SHA256_DIGEST_SIZE,
15274 + },
15275 + .caam = {
15276 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15277 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
15278 + OP_ALG_AAI_HMAC_PRECOMP,
15279 + },
15280 + },
15281 + {
15282 + .aead = {
15283 + .base = {
15284 + .cra_name = "echainiv(authenc(hmac(sha256),"
15285 + "cbc(des)))",
15286 + .cra_driver_name = "echainiv-authenc-"
15287 + "hmac-sha256-cbc-des-"
15288 + "caam-qi",
15289 + .cra_blocksize = DES_BLOCK_SIZE,
15290 + },
15291 + .setkey = aead_setkey,
15292 + .setauthsize = aead_setauthsize,
15293 + .encrypt = aead_encrypt,
15294 + .decrypt = aead_decrypt,
15295 + .ivsize = DES_BLOCK_SIZE,
15296 + .maxauthsize = SHA256_DIGEST_SIZE,
15297 + },
15298 + .caam = {
15299 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15300 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
15301 + OP_ALG_AAI_HMAC_PRECOMP,
15302 + .geniv = true,
15303 + },
15304 + },
15305 + {
15306 + .aead = {
15307 + .base = {
15308 + .cra_name = "authenc(hmac(sha384),cbc(des))",
15309 + .cra_driver_name = "authenc-hmac-sha384-"
15310 + "cbc-des-caam-qi",
15311 + .cra_blocksize = DES_BLOCK_SIZE,
15312 + },
15313 + .setkey = aead_setkey,
15314 + .setauthsize = aead_setauthsize,
15315 + .encrypt = aead_encrypt,
15316 + .decrypt = aead_decrypt,
15317 + .ivsize = DES_BLOCK_SIZE,
15318 + .maxauthsize = SHA384_DIGEST_SIZE,
15319 + },
15320 + .caam = {
15321 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15322 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
15323 + OP_ALG_AAI_HMAC_PRECOMP,
15324 + },
15325 + },
15326 + {
15327 + .aead = {
15328 + .base = {
15329 + .cra_name = "echainiv(authenc(hmac(sha384),"
15330 + "cbc(des)))",
15331 + .cra_driver_name = "echainiv-authenc-"
15332 + "hmac-sha384-cbc-des-"
15333 + "caam-qi",
15334 + .cra_blocksize = DES_BLOCK_SIZE,
15335 + },
15336 + .setkey = aead_setkey,
15337 + .setauthsize = aead_setauthsize,
15338 + .encrypt = aead_encrypt,
15339 + .decrypt = aead_decrypt,
15340 + .ivsize = DES_BLOCK_SIZE,
15341 + .maxauthsize = SHA384_DIGEST_SIZE,
15342 + },
15343 + .caam = {
15344 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15345 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
15346 + OP_ALG_AAI_HMAC_PRECOMP,
15347 + .geniv = true,
15348 + }
15349 + },
15350 + {
15351 + .aead = {
15352 + .base = {
15353 + .cra_name = "authenc(hmac(sha512),cbc(des))",
15354 + .cra_driver_name = "authenc-hmac-sha512-"
15355 + "cbc-des-caam-qi",
15356 + .cra_blocksize = DES_BLOCK_SIZE,
15357 + },
15358 + .setkey = aead_setkey,
15359 + .setauthsize = aead_setauthsize,
15360 + .encrypt = aead_encrypt,
15361 + .decrypt = aead_decrypt,
15362 + .ivsize = DES_BLOCK_SIZE,
15363 + .maxauthsize = SHA512_DIGEST_SIZE,
15364 + },
15365 + .caam = {
15366 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15367 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
15368 + OP_ALG_AAI_HMAC_PRECOMP,
15369 + }
15370 + },
15371 + {
15372 + .aead = {
15373 + .base = {
15374 + .cra_name = "echainiv(authenc(hmac(sha512),"
15375 + "cbc(des)))",
15376 + .cra_driver_name = "echainiv-authenc-"
15377 + "hmac-sha512-cbc-des-"
15378 + "caam-qi",
15379 + .cra_blocksize = DES_BLOCK_SIZE,
15380 + },
15381 + .setkey = aead_setkey,
15382 + .setauthsize = aead_setauthsize,
15383 + .encrypt = aead_encrypt,
15384 + .decrypt = aead_decrypt,
15385 + .ivsize = DES_BLOCK_SIZE,
15386 + .maxauthsize = SHA512_DIGEST_SIZE,
15387 + },
15388 + .caam = {
15389 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
15390 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
15391 + OP_ALG_AAI_HMAC_PRECOMP,
15392 + .geniv = true,
15393 + }
15394 + },
15395 + {
15396 + .aead = {
15397 + .base = {
15398 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
15399 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
15400 + .cra_blocksize = AES_BLOCK_SIZE,
15401 + },
15402 + .setkey = tls_setkey,
15403 + .setauthsize = tls_setauthsize,
15404 + .encrypt = tls_encrypt,
15405 + .decrypt = tls_decrypt,
15406 + .ivsize = AES_BLOCK_SIZE,
15407 + .maxauthsize = SHA1_DIGEST_SIZE,
15408 + },
15409 + .caam = {
15410 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
15411 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
15412 + OP_ALG_AAI_HMAC_PRECOMP,
15413 + }
15414 + }
15415 +};
15416 +
15417 +struct caam_crypto_alg {
15418 + struct list_head entry;
15419 + struct crypto_alg crypto_alg;
15420 + struct caam_alg_entry caam;
15421 +};
15422 +
15423 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
15424 + bool uses_dkp)
15425 +{
15426 + struct caam_drv_private *priv;
15427 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
15428 + static const u8 digest_size[] = {
15429 + MD5_DIGEST_SIZE,
15430 + SHA1_DIGEST_SIZE,
15431 + SHA224_DIGEST_SIZE,
15432 + SHA256_DIGEST_SIZE,
15433 + SHA384_DIGEST_SIZE,
15434 + SHA512_DIGEST_SIZE
15435 + };
15436 + u8 op_id;
15437 +
15438 + /*
15439 + * distribute tfms across job rings to ensure in-order
15440 + * crypto request processing per tfm
15441 + */
15442 + ctx->jrdev = caam_jr_alloc();
15443 + if (IS_ERR(ctx->jrdev)) {
15444 + pr_err("Job Ring Device allocation for transform failed\n");
15445 + return PTR_ERR(ctx->jrdev);
15446 + }
15447 +
15448 + priv = dev_get_drvdata(ctx->jrdev->parent);
15449 + if (priv->era >= 6 && uses_dkp)
15450 + ctx->dir = DMA_BIDIRECTIONAL;
15451 + else
15452 + ctx->dir = DMA_TO_DEVICE;
15453 +
15454 + ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
15455 + ctx->dir);
15456 + if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
15457 + dev_err(ctx->jrdev, "unable to map key\n");
15458 + caam_jr_free(ctx->jrdev);
15459 + return -ENOMEM;
15460 + }
15461 +
15462 + /* copy descriptor header template value */
15463 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
15464 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
15465 +
15466 + if (ctx->adata.algtype) {
15467 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
15468 + >> OP_ALG_ALGSEL_SHIFT;
15469 + if (op_id < ARRAY_SIZE(digest_size)) {
15470 + ctx->authsize = digest_size[op_id];
15471 + } else {
15472 + dev_err(ctx->jrdev,
15473 + "incorrect op_id %d; must be less than %zu\n",
15474 + op_id, ARRAY_SIZE(digest_size));
15475 + caam_jr_free(ctx->jrdev);
15476 + return -EINVAL;
15477 + }
15478 + } else {
15479 + ctx->authsize = 0;
15480 + }
15481 +
15482 + ctx->qidev = priv->qidev;
15483 +
15484 + spin_lock_init(&ctx->lock);
15485 + ctx->drv_ctx[ENCRYPT] = NULL;
15486 + ctx->drv_ctx[DECRYPT] = NULL;
15487 + ctx->drv_ctx[GIVENCRYPT] = NULL;
15488 +
15489 + return 0;
15490 +}
15491 +
15492 +static int caam_cra_init(struct crypto_tfm *tfm)
15493 +{
15494 + struct crypto_alg *alg = tfm->__crt_alg;
15495 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
15496 + crypto_alg);
15497 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
15498 +
15499 + return caam_init_common(ctx, &caam_alg->caam, false);
15500 +}
15501 +
15502 +static int caam_aead_init(struct crypto_aead *tfm)
15503 +{
15504 + struct aead_alg *alg = crypto_aead_alg(tfm);
15505 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
15506 + aead);
15507 + struct caam_ctx *ctx = crypto_aead_ctx(tfm);
15508 +
15509 + return caam_init_common(ctx, &caam_alg->caam,
15510 + (alg->setkey == aead_setkey) ||
15511 + (alg->setkey == tls_setkey));
15512 +}
15513 +
15514 +static void caam_exit_common(struct caam_ctx *ctx)
15515 +{
15516 + caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
15517 + caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
15518 + caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
15519 +
15520 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
15521 +
15522 + caam_jr_free(ctx->jrdev);
15523 +}
15524 +
15525 +static void caam_cra_exit(struct crypto_tfm *tfm)
15526 +{
15527 + caam_exit_common(crypto_tfm_ctx(tfm));
15528 +}
15529 +
15530 +static void caam_aead_exit(struct crypto_aead *tfm)
15531 +{
15532 + caam_exit_common(crypto_aead_ctx(tfm));
15533 +}
15534 +
15535 +static struct list_head alg_list;
15536 +static void __exit caam_qi_algapi_exit(void)
15537 +{
15538 + struct caam_crypto_alg *t_alg, *n;
15539 + int i;
15540 +
15541 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15542 + struct caam_aead_alg *t_alg = driver_aeads + i;
15543 +
15544 + if (t_alg->registered)
15545 + crypto_unregister_aead(&t_alg->aead);
15546 + }
15547 +
15548 + if (!alg_list.next)
15549 + return;
15550 +
15551 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
15552 + crypto_unregister_alg(&t_alg->crypto_alg);
15553 + list_del(&t_alg->entry);
15554 + kfree(t_alg);
15555 + }
15556 +}
15557 +
15558 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
15559 + *template)
15560 +{
15561 + struct caam_crypto_alg *t_alg;
15562 + struct crypto_alg *alg;
15563 +
15564 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
15565 + if (!t_alg)
15566 + return ERR_PTR(-ENOMEM);
15567 +
15568 + alg = &t_alg->crypto_alg;
15569 +
15570 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
15571 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
15572 + template->driver_name);
15573 + alg->cra_module = THIS_MODULE;
15574 + alg->cra_init = caam_cra_init;
15575 + alg->cra_exit = caam_cra_exit;
15576 + alg->cra_priority = CAAM_CRA_PRIORITY;
15577 + alg->cra_blocksize = template->blocksize;
15578 + alg->cra_alignmask = 0;
15579 + alg->cra_ctxsize = sizeof(struct caam_ctx);
15580 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
15581 + template->type;
15582 + switch (template->type) {
15583 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15584 + alg->cra_type = &crypto_givcipher_type;
15585 + alg->cra_ablkcipher = template->template_ablkcipher;
15586 + break;
15587 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15588 + alg->cra_type = &crypto_ablkcipher_type;
15589 + alg->cra_ablkcipher = template->template_ablkcipher;
15590 + break;
15591 + }
15592 +
15593 + t_alg->caam.class1_alg_type = template->class1_alg_type;
15594 + t_alg->caam.class2_alg_type = template->class2_alg_type;
15595 +
15596 + return t_alg;
15597 +}
15598 +
15599 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
15600 +{
15601 + struct aead_alg *alg = &t_alg->aead;
15602 +
15603 + alg->base.cra_module = THIS_MODULE;
15604 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
15605 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
15606 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
15607 +
15608 + alg->init = caam_aead_init;
15609 + alg->exit = caam_aead_exit;
15610 +}
15611 +
15612 +static int __init caam_qi_algapi_init(void)
15613 +{
15614 + struct device_node *dev_node;
15615 + struct platform_device *pdev;
15616 + struct device *ctrldev;
15617 + struct caam_drv_private *priv;
15618 + int i = 0, err = 0;
15619 + u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15620 + unsigned int md_limit = SHA512_DIGEST_SIZE;
15621 + bool registered = false;
15622 +
15623 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15624 + if (!dev_node) {
15625 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15626 + if (!dev_node)
15627 + return -ENODEV;
15628 + }
15629 +
15630 + pdev = of_find_device_by_node(dev_node);
15631 + of_node_put(dev_node);
15632 + if (!pdev)
15633 + return -ENODEV;
15634 +
15635 + ctrldev = &pdev->dev;
15636 + priv = dev_get_drvdata(ctrldev);
15637 +
15638 + /*
15639 + * If priv is NULL, it's probably because the caam driver wasn't
15640 + * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15641 + */
15642 + if (!priv || !priv->qi_present)
15643 + return -ENODEV;
15644 +
15645 + if (caam_dpaa2) {
15646 + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15647 + return -ENODEV;
15648 + }
15649 +
15650 + INIT_LIST_HEAD(&alg_list);
15651 +
15652 + /*
15653 + * Register crypto algorithms the device supports.
15654 + * First, detect presence and attributes of DES, AES, and MD blocks.
15655 + */
15656 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15657 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15658 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15659 + aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15660 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15661 +
15662 + /* If MD is present, limit digest size based on LP256 */
15663 + if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15664 + md_limit = SHA256_DIGEST_SIZE;
15665 +
15666 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15667 + struct caam_crypto_alg *t_alg;
15668 + struct caam_alg_template *alg = driver_algs + i;
15669 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15670 +
15671 + /* Skip DES algorithms if not supported by device */
15672 + if (!des_inst &&
15673 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15674 + (alg_sel == OP_ALG_ALGSEL_DES)))
15675 + continue;
15676 +
15677 + /* Skip AES algorithms if not supported by device */
15678 + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15679 + continue;
15680 +
15681 + t_alg = caam_alg_alloc(alg);
15682 + if (IS_ERR(t_alg)) {
15683 + err = PTR_ERR(t_alg);
15684 + dev_warn(priv->qidev, "%s alg allocation failed\n",
15685 + alg->driver_name);
15686 + continue;
15687 + }
15688 +
15689 + err = crypto_register_alg(&t_alg->crypto_alg);
15690 + if (err) {
15691 + dev_warn(priv->qidev, "%s alg registration failed\n",
15692 + t_alg->crypto_alg.cra_driver_name);
15693 + kfree(t_alg);
15694 + continue;
15695 + }
15696 +
15697 + list_add_tail(&t_alg->entry, &alg_list);
15698 + registered = true;
15699 + }
15700 +
15701 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15702 + struct caam_aead_alg *t_alg = driver_aeads + i;
15703 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15704 + OP_ALG_ALGSEL_MASK;
15705 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15706 + OP_ALG_ALGSEL_MASK;
15707 + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15708 +
15709 + /* Skip DES algorithms if not supported by device */
15710 + if (!des_inst &&
15711 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15712 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15713 + continue;
15714 +
15715 + /* Skip AES algorithms if not supported by device */
15716 + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15717 + continue;
15718 +
15719 + /*
15720 + * Check support for AES algorithms not available
15721 + * on LP devices.
15722 + */
15723 + if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15724 + (alg_aai == OP_ALG_AAI_GCM))
15725 + continue;
15726 +
15727 + /*
15728 + * Skip algorithms requiring message digests
15729 + * if MD or MD size is not supported by device.
15730 + */
15731 + if (c2_alg_sel &&
15732 + (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15733 + continue;
15734 +
15735 + caam_aead_alg_init(t_alg);
15736 +
15737 + err = crypto_register_aead(&t_alg->aead);
15738 + if (err) {
15739 + pr_warn("%s alg registration failed\n",
15740 + t_alg->aead.base.cra_driver_name);
15741 + continue;
15742 + }
15743 +
15744 + t_alg->registered = true;
15745 + registered = true;
15746 + }
15747 +
15748 + if (registered)
15749 + dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15750 +
15751 + return err;
15752 +}
15753 +
15754 +module_init(caam_qi_algapi_init);
15755 +module_exit(caam_qi_algapi_exit);
15756 +
15757 +MODULE_LICENSE("GPL");
15758 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15759 +MODULE_AUTHOR("Freescale Semiconductor");
15760 --- /dev/null
15761 +++ b/drivers/crypto/caam/caamalg_qi2.c
15762 @@ -0,0 +1,5938 @@
15763 +/*
15764 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15765 + * Copyright 2017 NXP
15766 + *
15767 + * Redistribution and use in source and binary forms, with or without
15768 + * modification, are permitted provided that the following conditions are met:
15769 + * * Redistributions of source code must retain the above copyright
15770 + * notice, this list of conditions and the following disclaimer.
15771 + * * Redistributions in binary form must reproduce the above copyright
15772 + * notice, this list of conditions and the following disclaimer in the
15773 + * documentation and/or other materials provided with the distribution.
15774 + * * Neither the names of the above-listed copyright holders nor the
15775 + * names of any contributors may be used to endorse or promote products
15776 + * derived from this software without specific prior written permission.
15777 + *
15778 + *
15779 + * ALTERNATIVELY, this software may be distributed under the terms of the
15780 + * GNU General Public License ("GPL") as published by the Free Software
15781 + * Foundation, either version 2 of that License or (at your option) any
15782 + * later version.
15783 + *
15784 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15785 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15786 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15787 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15788 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15789 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15790 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15791 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15792 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15793 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15794 + * POSSIBILITY OF SUCH DAMAGE.
15795 + */
15796 +
15797 +#include <linux/fsl/mc.h>
15798 +#include "compat.h"
15799 +#include "regs.h"
15800 +#include "caamalg_qi2.h"
15801 +#include "dpseci_cmd.h"
15802 +#include "desc_constr.h"
15803 +#include "error.h"
15804 +#include "sg_sw_sec4.h"
15805 +#include "sg_sw_qm2.h"
15806 +#include "key_gen.h"
15807 +#include "caamalg_desc.h"
15808 +#include "caamhash_desc.h"
15809 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15810 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15811 +
15812 +#define CAAM_CRA_PRIORITY 2000
15813 +
15814 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15815 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15816 + SHA512_DIGEST_SIZE * 2)
15817 +
15818 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15819 +bool caam_little_end;
15820 +EXPORT_SYMBOL(caam_little_end);
15821 +bool caam_imx;
15822 +EXPORT_SYMBOL(caam_imx);
15823 +#endif
15824 +
15825 +/*
15826 + * This is a a cache of buffers, from which the users of CAAM QI driver
15827 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15828 + * NOTE: A more elegant solution would be to have some headroom in the frames
15829 + * being processed. This can be added by the dpaa2-eth driver. This would
15830 + * pose a problem for userspace application processing which cannot
15831 + * know of this limitation. So for now, this will work.
15832 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15833 + */
15834 +static struct kmem_cache *qi_cache;
15835 +
15836 +struct caam_alg_entry {
15837 + struct device *dev;
15838 + int class1_alg_type;
15839 + int class2_alg_type;
15840 + bool rfc3686;
15841 + bool geniv;
15842 +};
15843 +
15844 +struct caam_aead_alg {
15845 + struct aead_alg aead;
15846 + struct caam_alg_entry caam;
15847 + bool registered;
15848 +};
15849 +
15850 +/**
15851 + * caam_ctx - per-session context
15852 + * @flc: Flow Contexts array
15853 + * @key: virtual address of the key(s): [authentication key], encryption key
15854 + * @flc_dma: I/O virtual addresses of the Flow Contexts
15855 + * @key_dma: I/O virtual address of the key
15856 + * @dir: DMA direction for mapping key and Flow Contexts
15857 + * @dev: dpseci device
15858 + * @adata: authentication algorithm details
15859 + * @cdata: encryption algorithm details
15860 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15861 + */
15862 +struct caam_ctx {
15863 + struct caam_flc flc[NUM_OP];
15864 + u8 key[CAAM_MAX_KEY_SIZE];
15865 + dma_addr_t flc_dma[NUM_OP];
15866 + dma_addr_t key_dma;
15867 + enum dma_data_direction dir;
15868 + struct device *dev;
15869 + struct alginfo adata;
15870 + struct alginfo cdata;
15871 + unsigned int authsize;
15872 +};
15873 +
15874 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15875 + dma_addr_t iova_addr)
15876 +{
15877 + phys_addr_t phys_addr;
15878 +
15879 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15880 + iova_addr;
15881 +
15882 + return phys_to_virt(phys_addr);
15883 +}
15884 +
15885 +/*
15886 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
15887 + *
15888 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
15889 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15890 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15891 + * hosting 16 SG entries.
15892 + *
15893 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15894 + *
15895 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15896 + */
15897 +static inline void *qi_cache_zalloc(gfp_t flags)
15898 +{
15899 + return kmem_cache_zalloc(qi_cache, flags);
15900 +}
15901 +
15902 +/*
15903 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15904 + *
15905 + * @obj - buffer previously allocated by qi_cache_zalloc
15906 + *
15907 + * No checking is being done, the call is a passthrough call to
15908 + * kmem_cache_free(...)
15909 + */
15910 +static inline void qi_cache_free(void *obj)
15911 +{
15912 + kmem_cache_free(qi_cache, obj);
15913 +}
15914 +
15915 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15916 +{
15917 + switch (crypto_tfm_alg_type(areq->tfm)) {
15918 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15919 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15920 + return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15921 + case CRYPTO_ALG_TYPE_AEAD:
15922 + return aead_request_ctx(container_of(areq, struct aead_request,
15923 + base));
15924 + case CRYPTO_ALG_TYPE_AHASH:
15925 + return ahash_request_ctx(ahash_request_cast(areq));
15926 + default:
15927 + return ERR_PTR(-EINVAL);
15928 + }
15929 +}
15930 +
15931 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15932 + struct scatterlist *dst, int src_nents,
15933 + int dst_nents, dma_addr_t iv_dma, int ivsize,
15934 + enum optype op_type, dma_addr_t qm_sg_dma,
15935 + int qm_sg_bytes)
15936 +{
15937 + if (dst != src) {
15938 + if (src_nents)
15939 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15940 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15941 + } else {
15942 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15943 + }
15944 +
15945 + if (iv_dma)
15946 + dma_unmap_single(dev, iv_dma, ivsize,
15947 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15948 + DMA_TO_DEVICE);
15949 +
15950 + if (qm_sg_bytes)
15951 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15952 +}
15953 +
15954 +static int aead_set_sh_desc(struct crypto_aead *aead)
15955 +{
15956 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15957 + typeof(*alg), aead);
15958 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15959 + unsigned int ivsize = crypto_aead_ivsize(aead);
15960 + struct device *dev = ctx->dev;
15961 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
15962 + struct caam_flc *flc;
15963 + u32 *desc;
15964 + u32 ctx1_iv_off = 0;
15965 + u32 *nonce = NULL;
15966 + unsigned int data_len[2];
15967 + u32 inl_mask;
15968 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15969 + OP_ALG_AAI_CTR_MOD128);
15970 + const bool is_rfc3686 = alg->caam.rfc3686;
15971 +
15972 + if (!ctx->cdata.keylen || !ctx->authsize)
15973 + return 0;
15974 +
15975 + /*
15976 + * AES-CTR needs to load IV in CONTEXT1 reg
15977 + * at an offset of 128bits (16bytes)
15978 + * CONTEXT1[255:128] = IV
15979 + */
15980 + if (ctr_mode)
15981 + ctx1_iv_off = 16;
15982 +
15983 + /*
15984 + * RFC3686 specific:
15985 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15986 + */
15987 + if (is_rfc3686) {
15988 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15989 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15990 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15991 + }
15992 +
15993 + data_len[0] = ctx->adata.keylen_pad;
15994 + data_len[1] = ctx->cdata.keylen;
15995 +
15996 + /* aead_encrypt shared descriptor */
15997 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15998 + DESC_QI_AEAD_ENC_LEN) +
15999 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
16000 + DESC_JOB_IO_LEN, data_len, &inl_mask,
16001 + ARRAY_SIZE(data_len)) < 0)
16002 + return -EINVAL;
16003 +
16004 + if (inl_mask & 1)
16005 + ctx->adata.key_virt = ctx->key;
16006 + else
16007 + ctx->adata.key_dma = ctx->key_dma;
16008 +
16009 + if (inl_mask & 2)
16010 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16011 + else
16012 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16013 +
16014 + ctx->adata.key_inline = !!(inl_mask & 1);
16015 + ctx->cdata.key_inline = !!(inl_mask & 2);
16016 +
16017 + flc = &ctx->flc[ENCRYPT];
16018 + desc = flc->sh_desc;
16019 +
16020 + if (alg->caam.geniv)
16021 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
16022 + ivsize, ctx->authsize, is_rfc3686,
16023 + nonce, ctx1_iv_off, true,
16024 + priv->sec_attr.era);
16025 + else
16026 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
16027 + ivsize, ctx->authsize, is_rfc3686, nonce,
16028 + ctx1_iv_off, true, priv->sec_attr.era);
16029 +
16030 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16031 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16032 + sizeof(flc->flc) + desc_bytes(desc),
16033 + ctx->dir);
16034 +
16035 + /* aead_decrypt shared descriptor */
16036 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
16037 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
16038 + DESC_JOB_IO_LEN, data_len, &inl_mask,
16039 + ARRAY_SIZE(data_len)) < 0)
16040 + return -EINVAL;
16041 +
16042 + if (inl_mask & 1)
16043 + ctx->adata.key_virt = ctx->key;
16044 + else
16045 + ctx->adata.key_dma = ctx->key_dma;
16046 +
16047 + if (inl_mask & 2)
16048 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16049 + else
16050 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16051 +
16052 + ctx->adata.key_inline = !!(inl_mask & 1);
16053 + ctx->cdata.key_inline = !!(inl_mask & 2);
16054 +
16055 + flc = &ctx->flc[DECRYPT];
16056 + desc = flc->sh_desc;
16057 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
16058 + ivsize, ctx->authsize, alg->caam.geniv,
16059 + is_rfc3686, nonce, ctx1_iv_off, true,
16060 + priv->sec_attr.era);
16061 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16062 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16063 + sizeof(flc->flc) + desc_bytes(desc),
16064 + ctx->dir);
16065 +
16066 + return 0;
16067 +}
16068 +
16069 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16070 +{
16071 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16072 +
16073 + ctx->authsize = authsize;
16074 + aead_set_sh_desc(authenc);
16075 +
16076 + return 0;
16077 +}
16078 +
16079 +struct split_key_sh_result {
16080 + struct completion completion;
16081 + int err;
16082 + struct device *dev;
16083 +};
16084 +
16085 +static void split_key_sh_done(void *cbk_ctx, u32 err)
16086 +{
16087 + struct split_key_sh_result *res = cbk_ctx;
16088 +
16089 +#ifdef DEBUG
16090 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
16091 +#endif
16092 +
16093 + if (err)
16094 + caam_qi2_strstatus(res->dev, err);
16095 +
16096 + res->err = err;
16097 + complete(&res->completion);
16098 +}
16099 +
16100 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
16101 + unsigned int keylen)
16102 +{
16103 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16104 + struct device *dev = ctx->dev;
16105 + struct crypto_authenc_keys keys;
16106 +
16107 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16108 + goto badkey;
16109 +
16110 +#ifdef DEBUG
16111 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16112 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
16113 + keys.authkeylen);
16114 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16115 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16116 +#endif
16117 +
16118 + ctx->adata.keylen = keys.authkeylen;
16119 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
16120 + OP_ALG_ALGSEL_MASK);
16121 +
16122 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16123 + goto badkey;
16124 +
16125 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
16126 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16127 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
16128 + keys.enckeylen, ctx->dir);
16129 +#ifdef DEBUG
16130 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16131 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16132 + ctx->adata.keylen_pad + keys.enckeylen, 1);
16133 +#endif
16134 +
16135 + ctx->cdata.keylen = keys.enckeylen;
16136 +
16137 + return aead_set_sh_desc(aead);
16138 +badkey:
16139 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
16140 + return -EINVAL;
16141 +}
16142 +
16143 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
16144 + bool encrypt)
16145 +{
16146 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16147 + struct caam_request *req_ctx = aead_request_ctx(req);
16148 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16149 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16150 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16151 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
16152 + typeof(*alg), aead);
16153 + struct device *dev = ctx->dev;
16154 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16155 + GFP_KERNEL : GFP_ATOMIC;
16156 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16157 + struct aead_edesc *edesc;
16158 + dma_addr_t qm_sg_dma, iv_dma = 0;
16159 + int ivsize = 0;
16160 + unsigned int authsize = ctx->authsize;
16161 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
16162 + int in_len, out_len;
16163 + struct dpaa2_sg_entry *sg_table;
16164 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16165 +
16166 + /* allocate space for base edesc and link tables */
16167 + edesc = qi_cache_zalloc(GFP_DMA | flags);
16168 + if (unlikely(!edesc)) {
16169 + dev_err(dev, "could not allocate extended descriptor\n");
16170 + return ERR_PTR(-ENOMEM);
16171 + }
16172 +
16173 + if (unlikely(req->dst != req->src)) {
16174 + src_nents = sg_nents_for_len(req->src, req->assoclen +
16175 + req->cryptlen);
16176 + if (unlikely(src_nents < 0)) {
16177 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16178 + req->assoclen + req->cryptlen);
16179 + qi_cache_free(edesc);
16180 + return ERR_PTR(src_nents);
16181 + }
16182 +
16183 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
16184 + req->cryptlen +
16185 + (encrypt ? authsize :
16186 + (-authsize)));
16187 + if (unlikely(dst_nents < 0)) {
16188 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16189 + req->assoclen + req->cryptlen +
16190 + (encrypt ? authsize : (-authsize)));
16191 + qi_cache_free(edesc);
16192 + return ERR_PTR(dst_nents);
16193 + }
16194 +
16195 + if (src_nents) {
16196 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16197 + DMA_TO_DEVICE);
16198 + if (unlikely(!mapped_src_nents)) {
16199 + dev_err(dev, "unable to map source\n");
16200 + qi_cache_free(edesc);
16201 + return ERR_PTR(-ENOMEM);
16202 + }
16203 + } else {
16204 + mapped_src_nents = 0;
16205 + }
16206 +
16207 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16208 + DMA_FROM_DEVICE);
16209 + if (unlikely(!mapped_dst_nents)) {
16210 + dev_err(dev, "unable to map destination\n");
16211 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16212 + qi_cache_free(edesc);
16213 + return ERR_PTR(-ENOMEM);
16214 + }
16215 + } else {
16216 + src_nents = sg_nents_for_len(req->src, req->assoclen +
16217 + req->cryptlen +
16218 + (encrypt ? authsize : 0));
16219 + if (unlikely(src_nents < 0)) {
16220 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16221 + req->assoclen + req->cryptlen +
16222 + (encrypt ? authsize : 0));
16223 + qi_cache_free(edesc);
16224 + return ERR_PTR(src_nents);
16225 + }
16226 +
16227 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16228 + DMA_BIDIRECTIONAL);
16229 + if (unlikely(!mapped_src_nents)) {
16230 + dev_err(dev, "unable to map source\n");
16231 + qi_cache_free(edesc);
16232 + return ERR_PTR(-ENOMEM);
16233 + }
16234 + }
16235 +
16236 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
16237 + ivsize = crypto_aead_ivsize(aead);
16238 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
16239 + if (dma_mapping_error(dev, iv_dma)) {
16240 + dev_err(dev, "unable to map IV\n");
16241 + caam_unmap(dev, req->src, req->dst, src_nents,
16242 + dst_nents, 0, 0, op_type, 0, 0);
16243 + qi_cache_free(edesc);
16244 + return ERR_PTR(-ENOMEM);
16245 + }
16246 + }
16247 +
16248 + /*
16249 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
16250 + * Input is not contiguous.
16251 + */
16252 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
16253 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
16254 + if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
16255 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16256 + qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
16257 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16258 + iv_dma, ivsize, op_type, 0, 0);
16259 + qi_cache_free(edesc);
16260 + return ERR_PTR(-ENOMEM);
16261 + }
16262 + sg_table = &edesc->sgt[0];
16263 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
16264 +
16265 + edesc->src_nents = src_nents;
16266 + edesc->dst_nents = dst_nents;
16267 + edesc->iv_dma = iv_dma;
16268 +
16269 + edesc->assoclen = cpu_to_caam32(req->assoclen);
16270 + edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
16271 + DMA_TO_DEVICE);
16272 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
16273 + dev_err(dev, "unable to map assoclen\n");
16274 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16275 + iv_dma, ivsize, op_type, 0, 0);
16276 + qi_cache_free(edesc);
16277 + return ERR_PTR(-ENOMEM);
16278 + }
16279 +
16280 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
16281 + qm_sg_index++;
16282 + if (ivsize) {
16283 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
16284 + qm_sg_index++;
16285 + }
16286 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
16287 + qm_sg_index += mapped_src_nents;
16288 +
16289 + if (mapped_dst_nents > 1)
16290 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16291 + qm_sg_index, 0);
16292 +
16293 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16294 + if (dma_mapping_error(dev, qm_sg_dma)) {
16295 + dev_err(dev, "unable to map S/G table\n");
16296 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
16297 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16298 + iv_dma, ivsize, op_type, 0, 0);
16299 + qi_cache_free(edesc);
16300 + return ERR_PTR(-ENOMEM);
16301 + }
16302 +
16303 + edesc->qm_sg_dma = qm_sg_dma;
16304 + edesc->qm_sg_bytes = qm_sg_bytes;
16305 +
16306 + out_len = req->assoclen + req->cryptlen +
16307 + (encrypt ? ctx->authsize : (-ctx->authsize));
16308 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
16309 +
16310 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16311 + dpaa2_fl_set_final(in_fle, true);
16312 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16313 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16314 + dpaa2_fl_set_len(in_fle, in_len);
16315 +
16316 + if (req->dst == req->src) {
16317 + if (mapped_src_nents == 1) {
16318 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16319 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16320 + } else {
16321 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16322 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16323 + (1 + !!ivsize) * sizeof(*sg_table));
16324 + }
16325 + } else if (mapped_dst_nents == 1) {
16326 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16327 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16328 + } else {
16329 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16330 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16331 + sizeof(*sg_table));
16332 + }
16333 +
16334 + dpaa2_fl_set_len(out_fle, out_len);
16335 +
16336 + return edesc;
16337 +}
16338 +
16339 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
16340 + bool encrypt)
16341 +{
16342 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
16343 + unsigned int blocksize = crypto_aead_blocksize(tls);
16344 + unsigned int padsize, authsize;
16345 + struct caam_request *req_ctx = aead_request_ctx(req);
16346 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16347 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16348 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16349 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
16350 + typeof(*alg), aead);
16351 + struct device *dev = ctx->dev;
16352 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
16353 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
16354 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16355 + struct tls_edesc *edesc;
16356 + dma_addr_t qm_sg_dma, iv_dma = 0;
16357 + int ivsize = 0;
16358 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
16359 + int in_len, out_len;
16360 + struct dpaa2_sg_entry *sg_table;
16361 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16362 + struct scatterlist *dst;
16363 +
16364 + if (encrypt) {
16365 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
16366 + blocksize);
16367 + authsize = ctx->authsize + padsize;
16368 + } else {
16369 + authsize = ctx->authsize;
16370 + }
16371 +
16372 + /* allocate space for base edesc and link tables */
16373 + edesc = qi_cache_zalloc(GFP_DMA | flags);
16374 + if (unlikely(!edesc)) {
16375 + dev_err(dev, "could not allocate extended descriptor\n");
16376 + return ERR_PTR(-ENOMEM);
16377 + }
16378 +
16379 + if (likely(req->src == req->dst)) {
16380 + src_nents = sg_nents_for_len(req->src, req->assoclen +
16381 + req->cryptlen +
16382 + (encrypt ? authsize : 0));
16383 + if (unlikely(src_nents < 0)) {
16384 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16385 + req->assoclen + req->cryptlen +
16386 + (encrypt ? authsize : 0));
16387 + qi_cache_free(edesc);
16388 + return ERR_PTR(src_nents);
16389 + }
16390 +
16391 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16392 + DMA_BIDIRECTIONAL);
16393 + if (unlikely(!mapped_src_nents)) {
16394 + dev_err(dev, "unable to map source\n");
16395 + qi_cache_free(edesc);
16396 + return ERR_PTR(-ENOMEM);
16397 + }
16398 + dst = req->dst;
16399 + } else {
16400 + src_nents = sg_nents_for_len(req->src, req->assoclen +
16401 + req->cryptlen);
16402 + if (unlikely(src_nents < 0)) {
16403 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16404 + req->assoclen + req->cryptlen);
16405 + qi_cache_free(edesc);
16406 + return ERR_PTR(src_nents);
16407 + }
16408 +
16409 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
16410 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
16411 + (encrypt ? authsize : 0));
16412 + if (unlikely(dst_nents < 0)) {
16413 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16414 + req->cryptlen +
16415 + (encrypt ? authsize : 0));
16416 + qi_cache_free(edesc);
16417 + return ERR_PTR(dst_nents);
16418 + }
16419 +
16420 + if (src_nents) {
16421 + mapped_src_nents = dma_map_sg(dev, req->src,
16422 + src_nents, DMA_TO_DEVICE);
16423 + if (unlikely(!mapped_src_nents)) {
16424 + dev_err(dev, "unable to map source\n");
16425 + qi_cache_free(edesc);
16426 + return ERR_PTR(-ENOMEM);
16427 + }
16428 + } else {
16429 + mapped_src_nents = 0;
16430 + }
16431 +
16432 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
16433 + DMA_FROM_DEVICE);
16434 + if (unlikely(!mapped_dst_nents)) {
16435 + dev_err(dev, "unable to map destination\n");
16436 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16437 + qi_cache_free(edesc);
16438 + return ERR_PTR(-ENOMEM);
16439 + }
16440 + }
16441 +
16442 + ivsize = crypto_aead_ivsize(tls);
16443 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
16444 + if (dma_mapping_error(dev, iv_dma)) {
16445 + dev_err(dev, "unable to map IV\n");
16446 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
16447 + op_type, 0, 0);
16448 + qi_cache_free(edesc);
16449 + return ERR_PTR(-ENOMEM);
16450 + }
16451 +
16452 + /*
16453 + * Create S/G table: IV, src, dst.
16454 + * Input is not contiguous.
16455 + */
16456 + qm_sg_ents = 1 + mapped_src_nents +
16457 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
16458 + sg_table = &edesc->sgt[0];
16459 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16460 +
16461 + edesc->src_nents = src_nents;
16462 + edesc->dst_nents = dst_nents;
16463 + edesc->dst = dst;
16464 + edesc->iv_dma = iv_dma;
16465 +
16466 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16467 + qm_sg_index = 1;
16468 +
16469 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
16470 + qm_sg_index += mapped_src_nents;
16471 +
16472 + if (mapped_dst_nents > 1)
16473 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
16474 + qm_sg_index, 0);
16475 +
16476 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16477 + if (dma_mapping_error(dev, qm_sg_dma)) {
16478 + dev_err(dev, "unable to map S/G table\n");
16479 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16480 + ivsize, op_type, 0, 0);
16481 + qi_cache_free(edesc);
16482 + return ERR_PTR(-ENOMEM);
16483 + }
16484 +
16485 + edesc->qm_sg_dma = qm_sg_dma;
16486 + edesc->qm_sg_bytes = qm_sg_bytes;
16487 +
16488 + out_len = req->cryptlen + (encrypt ? authsize : 0);
16489 + in_len = ivsize + req->assoclen + req->cryptlen;
16490 +
16491 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16492 + dpaa2_fl_set_final(in_fle, true);
16493 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16494 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16495 + dpaa2_fl_set_len(in_fle, in_len);
16496 +
16497 + if (req->dst == req->src) {
16498 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16499 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16500 + (sg_nents_for_len(req->src, req->assoclen) +
16501 + 1) * sizeof(*sg_table));
16502 + } else if (mapped_dst_nents == 1) {
16503 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16504 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16505 + } else {
16506 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16507 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16508 + sizeof(*sg_table));
16509 + }
16510 +
16511 + dpaa2_fl_set_len(out_fle, out_len);
16512 +
16513 + return edesc;
16514 +}
16515 +
16516 +static int tls_set_sh_desc(struct crypto_aead *tls)
16517 +{
16518 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16519 + unsigned int ivsize = crypto_aead_ivsize(tls);
16520 + unsigned int blocksize = crypto_aead_blocksize(tls);
16521 + struct device *dev = ctx->dev;
16522 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
16523 + struct caam_flc *flc;
16524 + u32 *desc;
16525 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
16526 + unsigned int data_len[2];
16527 + u32 inl_mask;
16528 +
16529 + if (!ctx->cdata.keylen || !ctx->authsize)
16530 + return 0;
16531 +
16532 + /*
16533 + * TLS 1.0 encrypt shared descriptor
16534 + * Job Descriptor and Shared Descriptor
16535 + * must fit into the 64-word Descriptor h/w Buffer
16536 + */
16537 + data_len[0] = ctx->adata.keylen_pad;
16538 + data_len[1] = ctx->cdata.keylen;
16539 +
16540 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16541 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
16542 + return -EINVAL;
16543 +
16544 + if (inl_mask & 1)
16545 + ctx->adata.key_virt = ctx->key;
16546 + else
16547 + ctx->adata.key_dma = ctx->key_dma;
16548 +
16549 + if (inl_mask & 2)
16550 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16551 + else
16552 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16553 +
16554 + ctx->adata.key_inline = !!(inl_mask & 1);
16555 + ctx->cdata.key_inline = !!(inl_mask & 2);
16556 +
16557 + flc = &ctx->flc[ENCRYPT];
16558 + desc = flc->sh_desc;
16559 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16560 + assoclen, ivsize, ctx->authsize, blocksize,
16561 + priv->sec_attr.era);
16562 + flc->flc[1] = cpu_to_caam32(desc_len(desc));
16563 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16564 + sizeof(flc->flc) + desc_bytes(desc),
16565 + ctx->dir);
16566 +
16567 + /*
16568 + * TLS 1.0 decrypt shared descriptor
16569 + * Keys do not fit inline, regardless of algorithms used
16570 + */
16571 + ctx->adata.key_inline = false;
16572 + ctx->adata.key_dma = ctx->key_dma;
16573 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16574 +
16575 + flc = &ctx->flc[DECRYPT];
16576 + desc = flc->sh_desc;
16577 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16578 + ctx->authsize, blocksize, priv->sec_attr.era);
16579 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16580 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16581 + sizeof(flc->flc) + desc_bytes(desc),
16582 + ctx->dir);
16583 +
16584 + return 0;
16585 +}
16586 +
16587 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16588 + unsigned int keylen)
16589 +{
16590 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16591 + struct device *dev = ctx->dev;
16592 + struct crypto_authenc_keys keys;
16593 +
16594 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16595 + goto badkey;
16596 +
16597 +#ifdef DEBUG
16598 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16599 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
16600 + keys.authkeylen);
16601 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16602 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16603 +#endif
16604 +
16605 + ctx->adata.keylen = keys.authkeylen;
16606 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
16607 + OP_ALG_ALGSEL_MASK);
16608 +
16609 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16610 + goto badkey;
16611 +
16612 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
16613 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16614 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
16615 + keys.enckeylen, ctx->dir);
16616 +#ifdef DEBUG
16617 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16618 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16619 + ctx->adata.keylen_pad + keys.enckeylen, 1);
16620 +#endif
16621 +
16622 + ctx->cdata.keylen = keys.enckeylen;
16623 +
16624 + return tls_set_sh_desc(tls);
16625 +badkey:
16626 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16627 + return -EINVAL;
16628 +}
16629 +
16630 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16631 +{
16632 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16633 +
16634 + ctx->authsize = authsize;
16635 + tls_set_sh_desc(tls);
16636 +
16637 + return 0;
16638 +}
16639 +
16640 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16641 +{
16642 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16643 + struct device *dev = ctx->dev;
16644 + unsigned int ivsize = crypto_aead_ivsize(aead);
16645 + struct caam_flc *flc;
16646 + u32 *desc;
16647 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16648 + ctx->cdata.keylen;
16649 +
16650 + if (!ctx->cdata.keylen || !ctx->authsize)
16651 + return 0;
16652 +
16653 + /*
16654 + * AES GCM encrypt shared descriptor
16655 + * Job Descriptor and Shared Descriptor
16656 + * must fit into the 64-word Descriptor h/w Buffer
16657 + */
16658 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16659 + ctx->cdata.key_inline = true;
16660 + ctx->cdata.key_virt = ctx->key;
16661 + } else {
16662 + ctx->cdata.key_inline = false;
16663 + ctx->cdata.key_dma = ctx->key_dma;
16664 + }
16665 +
16666 + flc = &ctx->flc[ENCRYPT];
16667 + desc = flc->sh_desc;
16668 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16669 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16670 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16671 + sizeof(flc->flc) + desc_bytes(desc),
16672 + ctx->dir);
16673 +
16674 + /*
16675 + * Job Descriptor and Shared Descriptors
16676 + * must all fit into the 64-word Descriptor h/w Buffer
16677 + */
16678 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16679 + ctx->cdata.key_inline = true;
16680 + ctx->cdata.key_virt = ctx->key;
16681 + } else {
16682 + ctx->cdata.key_inline = false;
16683 + ctx->cdata.key_dma = ctx->key_dma;
16684 + }
16685 +
16686 + flc = &ctx->flc[DECRYPT];
16687 + desc = flc->sh_desc;
16688 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16689 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16690 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16691 + sizeof(flc->flc) + desc_bytes(desc),
16692 + ctx->dir);
16693 +
16694 + return 0;
16695 +}
16696 +
16697 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16698 +{
16699 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16700 +
16701 + ctx->authsize = authsize;
16702 + gcm_set_sh_desc(authenc);
16703 +
16704 + return 0;
16705 +}
16706 +
16707 +static int gcm_setkey(struct crypto_aead *aead,
16708 + const u8 *key, unsigned int keylen)
16709 +{
16710 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16711 + struct device *dev = ctx->dev;
16712 +
16713 +#ifdef DEBUG
16714 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16715 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16716 +#endif
16717 +
16718 + memcpy(ctx->key, key, keylen);
16719 + dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
16720 + ctx->cdata.keylen = keylen;
16721 +
16722 + return gcm_set_sh_desc(aead);
16723 +}
16724 +
16725 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16726 +{
16727 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16728 + struct device *dev = ctx->dev;
16729 + unsigned int ivsize = crypto_aead_ivsize(aead);
16730 + struct caam_flc *flc;
16731 + u32 *desc;
16732 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16733 + ctx->cdata.keylen;
16734 +
16735 + if (!ctx->cdata.keylen || !ctx->authsize)
16736 + return 0;
16737 +
16738 + ctx->cdata.key_virt = ctx->key;
16739 +
16740 + /*
16741 + * RFC4106 encrypt shared descriptor
16742 + * Job Descriptor and Shared Descriptor
16743 + * must fit into the 64-word Descriptor h/w Buffer
16744 + */
16745 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16746 + ctx->cdata.key_inline = true;
16747 + } else {
16748 + ctx->cdata.key_inline = false;
16749 + ctx->cdata.key_dma = ctx->key_dma;
16750 + }
16751 +
16752 + flc = &ctx->flc[ENCRYPT];
16753 + desc = flc->sh_desc;
16754 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16755 + true);
16756 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16757 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16758 + sizeof(flc->flc) + desc_bytes(desc),
16759 + ctx->dir);
16760 +
16761 + /*
16762 + * Job Descriptor and Shared Descriptors
16763 + * must all fit into the 64-word Descriptor h/w Buffer
16764 + */
16765 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16766 + ctx->cdata.key_inline = true;
16767 + } else {
16768 + ctx->cdata.key_inline = false;
16769 + ctx->cdata.key_dma = ctx->key_dma;
16770 + }
16771 +
16772 + flc = &ctx->flc[DECRYPT];
16773 + desc = flc->sh_desc;
16774 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16775 + true);
16776 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16777 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16778 + sizeof(flc->flc) + desc_bytes(desc),
16779 + ctx->dir);
16780 +
16781 + return 0;
16782 +}
16783 +
16784 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16785 + unsigned int authsize)
16786 +{
16787 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16788 +
16789 + ctx->authsize = authsize;
16790 + rfc4106_set_sh_desc(authenc);
16791 +
16792 + return 0;
16793 +}
16794 +
16795 +static int rfc4106_setkey(struct crypto_aead *aead,
16796 + const u8 *key, unsigned int keylen)
16797 +{
16798 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16799 + struct device *dev = ctx->dev;
16800 +
16801 + if (keylen < 4)
16802 + return -EINVAL;
16803 +
16804 +#ifdef DEBUG
16805 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16806 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16807 +#endif
16808 +
16809 + memcpy(ctx->key, key, keylen);
16810 + /*
16811 + * The last four bytes of the key material are used as the salt value
16812 + * in the nonce. Update the AES key length.
16813 + */
16814 + ctx->cdata.keylen = keylen - 4;
16815 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
16816 + ctx->dir);
16817 +
16818 + return rfc4106_set_sh_desc(aead);
16819 +}
16820 +
16821 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16822 +{
16823 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16824 + struct device *dev = ctx->dev;
16825 + unsigned int ivsize = crypto_aead_ivsize(aead);
16826 + struct caam_flc *flc;
16827 + u32 *desc;
16828 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16829 + ctx->cdata.keylen;
16830 +
16831 + if (!ctx->cdata.keylen || !ctx->authsize)
16832 + return 0;
16833 +
16834 + ctx->cdata.key_virt = ctx->key;
16835 +
16836 + /*
16837 + * RFC4543 encrypt shared descriptor
16838 + * Job Descriptor and Shared Descriptor
16839 + * must fit into the 64-word Descriptor h/w Buffer
16840 + */
16841 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16842 + ctx->cdata.key_inline = true;
16843 + } else {
16844 + ctx->cdata.key_inline = false;
16845 + ctx->cdata.key_dma = ctx->key_dma;
16846 + }
16847 +
16848 + flc = &ctx->flc[ENCRYPT];
16849 + desc = flc->sh_desc;
16850 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16851 + true);
16852 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16853 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16854 + sizeof(flc->flc) + desc_bytes(desc),
16855 + ctx->dir);
16856 +
16857 + /*
16858 + * Job Descriptor and Shared Descriptors
16859 + * must all fit into the 64-word Descriptor h/w Buffer
16860 + */
16861 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16862 + ctx->cdata.key_inline = true;
16863 + } else {
16864 + ctx->cdata.key_inline = false;
16865 + ctx->cdata.key_dma = ctx->key_dma;
16866 + }
16867 +
16868 + flc = &ctx->flc[DECRYPT];
16869 + desc = flc->sh_desc;
16870 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16871 + true);
16872 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16873 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16874 + sizeof(flc->flc) + desc_bytes(desc),
16875 + ctx->dir);
16876 +
16877 + return 0;
16878 +}
16879 +
16880 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16881 + unsigned int authsize)
16882 +{
16883 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16884 +
16885 + ctx->authsize = authsize;
16886 + rfc4543_set_sh_desc(authenc);
16887 +
16888 + return 0;
16889 +}
16890 +
16891 +static int rfc4543_setkey(struct crypto_aead *aead,
16892 + const u8 *key, unsigned int keylen)
16893 +{
16894 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16895 + struct device *dev = ctx->dev;
16896 +
16897 + if (keylen < 4)
16898 + return -EINVAL;
16899 +
16900 +#ifdef DEBUG
16901 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16902 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16903 +#endif
16904 +
16905 + memcpy(ctx->key, key, keylen);
16906 + /*
16907 + * The last four bytes of the key material are used as the salt value
16908 + * in the nonce. Update the AES key length.
16909 + */
16910 + ctx->cdata.keylen = keylen - 4;
16911 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
16912 + ctx->dir);
16913 +
16914 + return rfc4543_set_sh_desc(aead);
16915 +}
16916 +
16917 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16918 + const u8 *key, unsigned int keylen)
16919 +{
16920 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16921 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16922 + const char *alg_name = crypto_tfm_alg_name(tfm);
16923 + struct device *dev = ctx->dev;
16924 + struct caam_flc *flc;
16925 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16926 + u32 *desc;
16927 + u32 ctx1_iv_off = 0;
16928 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16929 + OP_ALG_AAI_CTR_MOD128);
16930 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16931 +
16932 +#ifdef DEBUG
16933 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16934 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16935 +#endif
16936 + /*
16937 + * AES-CTR needs to load IV in CONTEXT1 reg
16938 + * at an offset of 128bits (16bytes)
16939 + * CONTEXT1[255:128] = IV
16940 + */
16941 + if (ctr_mode)
16942 + ctx1_iv_off = 16;
16943 +
16944 + /*
16945 + * RFC3686 specific:
16946 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16947 + * | *key = {KEY, NONCE}
16948 + */
16949 + if (is_rfc3686) {
16950 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16951 + keylen -= CTR_RFC3686_NONCE_SIZE;
16952 + }
16953 +
16954 + ctx->cdata.keylen = keylen;
16955 + ctx->cdata.key_virt = key;
16956 + ctx->cdata.key_inline = true;
16957 +
16958 + /* ablkcipher_encrypt shared descriptor */
16959 + flc = &ctx->flc[ENCRYPT];
16960 + desc = flc->sh_desc;
16961 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16962 + is_rfc3686, ctx1_iv_off);
16963 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16964 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16965 + sizeof(flc->flc) + desc_bytes(desc),
16966 + ctx->dir);
16967 +
16968 + /* ablkcipher_decrypt shared descriptor */
16969 + flc = &ctx->flc[DECRYPT];
16970 + desc = flc->sh_desc;
16971 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16972 + is_rfc3686, ctx1_iv_off);
16973 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16974 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16975 + sizeof(flc->flc) + desc_bytes(desc),
16976 + ctx->dir);
16977 +
16978 + /* ablkcipher_givencrypt shared descriptor */
16979 + flc = &ctx->flc[GIVENCRYPT];
16980 + desc = flc->sh_desc;
16981 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16982 + ivsize, is_rfc3686, ctx1_iv_off);
16983 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
16984 + dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT],
16985 + sizeof(flc->flc) + desc_bytes(desc),
16986 + ctx->dir);
16987 +
16988 + return 0;
16989 +}
16990 +
16991 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16992 + const u8 *key, unsigned int keylen)
16993 +{
16994 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16995 + struct device *dev = ctx->dev;
16996 + struct caam_flc *flc;
16997 + u32 *desc;
16998 +
16999 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
17000 + dev_err(dev, "key size mismatch\n");
17001 + crypto_ablkcipher_set_flags(ablkcipher,
17002 + CRYPTO_TFM_RES_BAD_KEY_LEN);
17003 + return -EINVAL;
17004 + }
17005 +
17006 + ctx->cdata.keylen = keylen;
17007 + ctx->cdata.key_virt = key;
17008 + ctx->cdata.key_inline = true;
17009 +
17010 + /* xts_ablkcipher_encrypt shared descriptor */
17011 + flc = &ctx->flc[ENCRYPT];
17012 + desc = flc->sh_desc;
17013 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
17014 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
17015 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
17016 + sizeof(flc->flc) + desc_bytes(desc),
17017 + ctx->dir);
17018 +
17019 + /* xts_ablkcipher_decrypt shared descriptor */
17020 + flc = &ctx->flc[DECRYPT];
17021 + desc = flc->sh_desc;
17022 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
17023 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
17024 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
17025 + sizeof(flc->flc) + desc_bytes(desc),
17026 + ctx->dir);
17027 +
17028 + return 0;
17029 +}
17030 +
17031 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
17032 + *req, bool encrypt)
17033 +{
17034 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17035 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
17036 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
17037 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
17038 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17039 + struct device *dev = ctx->dev;
17040 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
17041 + GFP_KERNEL : GFP_ATOMIC;
17042 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
17043 + struct ablkcipher_edesc *edesc;
17044 + dma_addr_t iv_dma;
17045 + bool in_contig;
17046 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17047 + int dst_sg_idx, qm_sg_ents;
17048 + struct dpaa2_sg_entry *sg_table;
17049 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
17050 +
17051 + src_nents = sg_nents_for_len(req->src, req->nbytes);
17052 + if (unlikely(src_nents < 0)) {
17053 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
17054 + req->nbytes);
17055 + return ERR_PTR(src_nents);
17056 + }
17057 +
17058 + if (unlikely(req->dst != req->src)) {
17059 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
17060 + if (unlikely(dst_nents < 0)) {
17061 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
17062 + req->nbytes);
17063 + return ERR_PTR(dst_nents);
17064 + }
17065 +
17066 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
17067 + DMA_TO_DEVICE);
17068 + if (unlikely(!mapped_src_nents)) {
17069 + dev_err(dev, "unable to map source\n");
17070 + return ERR_PTR(-ENOMEM);
17071 + }
17072 +
17073 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
17074 + DMA_FROM_DEVICE);
17075 + if (unlikely(!mapped_dst_nents)) {
17076 + dev_err(dev, "unable to map destination\n");
17077 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
17078 + return ERR_PTR(-ENOMEM);
17079 + }
17080 + } else {
17081 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
17082 + DMA_BIDIRECTIONAL);
17083 + if (unlikely(!mapped_src_nents)) {
17084 + dev_err(dev, "unable to map source\n");
17085 + return ERR_PTR(-ENOMEM);
17086 + }
17087 + }
17088 +
17089 + iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
17090 + if (dma_mapping_error(dev, iv_dma)) {
17091 + dev_err(dev, "unable to map IV\n");
17092 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
17093 + 0, 0, 0, 0);
17094 + return ERR_PTR(-ENOMEM);
17095 + }
17096 +
17097 + if (mapped_src_nents == 1 &&
17098 + iv_dma + ivsize == sg_dma_address(req->src)) {
17099 + in_contig = true;
17100 + qm_sg_ents = 0;
17101 + } else {
17102 + in_contig = false;
17103 + qm_sg_ents = 1 + mapped_src_nents;
17104 + }
17105 + dst_sg_idx = qm_sg_ents;
17106 +
17107 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
17108 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
17109 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
17110 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
17111 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
17112 + iv_dma, ivsize, op_type, 0, 0);
17113 + return ERR_PTR(-ENOMEM);
17114 + }
17115 +
17116 + /* allocate space for base edesc and link tables */
17117 + edesc = qi_cache_zalloc(GFP_DMA | flags);
17118 + if (unlikely(!edesc)) {
17119 + dev_err(dev, "could not allocate extended descriptor\n");
17120 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
17121 + iv_dma, ivsize, op_type, 0, 0);
17122 + return ERR_PTR(-ENOMEM);
17123 + }
17124 +
17125 + edesc->src_nents = src_nents;
17126 + edesc->dst_nents = dst_nents;
17127 + edesc->iv_dma = iv_dma;
17128 + sg_table = &edesc->sgt[0];
17129 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
17130 +
17131 + if (!in_contig) {
17132 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
17133 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
17134 + }
17135 +
17136 + if (mapped_dst_nents > 1)
17137 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
17138 + dst_sg_idx, 0);
17139 +
17140 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
17141 + DMA_TO_DEVICE);
17142 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
17143 + dev_err(dev, "unable to map S/G table\n");
17144 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
17145 + iv_dma, ivsize, op_type, 0, 0);
17146 + qi_cache_free(edesc);
17147 + return ERR_PTR(-ENOMEM);
17148 + }
17149 +
17150 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
17151 + dpaa2_fl_set_final(in_fle, true);
17152 + dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
17153 + dpaa2_fl_set_len(out_fle, req->nbytes);
17154 +
17155 + if (!in_contig) {
17156 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
17157 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
17158 + } else {
17159 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
17160 + dpaa2_fl_set_addr(in_fle, iv_dma);
17161 + }
17162 +
17163 + if (req->src == req->dst) {
17164 + if (!in_contig) {
17165 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
17166 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
17167 + sizeof(*sg_table));
17168 + } else {
17169 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
17170 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
17171 + }
17172 + } else if (mapped_dst_nents > 1) {
17173 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
17174 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
17175 + sizeof(*sg_table));
17176 + } else {
17177 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
17178 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
17179 + }
17180 +
17181 + return edesc;
17182 +}
17183 +
17184 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
17185 + struct skcipher_givcrypt_request *greq)
17186 +{
17187 + struct ablkcipher_request *req = &greq->creq;
17188 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17189 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
17190 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
17191 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
17192 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17193 + struct device *dev = ctx->dev;
17194 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
17195 + GFP_KERNEL : GFP_ATOMIC;
17196 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
17197 + struct ablkcipher_edesc *edesc;
17198 + dma_addr_t iv_dma;
17199 + bool out_contig;
17200 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17201 + struct dpaa2_sg_entry *sg_table;
17202 + int dst_sg_idx, qm_sg_ents;
17203 +
17204 + src_nents = sg_nents_for_len(req->src, req->nbytes);
17205 + if (unlikely(src_nents < 0)) {
17206 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
17207 + req->nbytes);
17208 + return ERR_PTR(src_nents);
17209 + }
17210 +
17211 + if (unlikely(req->dst != req->src)) {
17212 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
17213 + if (unlikely(dst_nents < 0)) {
17214 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
17215 + req->nbytes);
17216 + return ERR_PTR(dst_nents);
17217 + }
17218 +
17219 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
17220 + DMA_TO_DEVICE);
17221 + if (unlikely(!mapped_src_nents)) {
17222 + dev_err(dev, "unable to map source\n");
17223 + return ERR_PTR(-ENOMEM);
17224 + }
17225 +
17226 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
17227 + DMA_FROM_DEVICE);
17228 + if (unlikely(!mapped_dst_nents)) {
17229 + dev_err(dev, "unable to map destination\n");
17230 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
17231 + return ERR_PTR(-ENOMEM);
17232 + }
17233 + } else {
17234 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
17235 + DMA_BIDIRECTIONAL);
17236 + if (unlikely(!mapped_src_nents)) {
17237 + dev_err(dev, "unable to map source\n");
17238 + return ERR_PTR(-ENOMEM);
17239 + }
17240 +
17241 + dst_nents = src_nents;
17242 + mapped_dst_nents = src_nents;
17243 + }
17244 +
17245 + iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
17246 + if (dma_mapping_error(dev, iv_dma)) {
17247 + dev_err(dev, "unable to map IV\n");
17248 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
17249 + 0, 0, 0, 0);
17250 + return ERR_PTR(-ENOMEM);
17251 + }
17252 +
17253 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
17254 + dst_sg_idx = qm_sg_ents;
17255 + if (mapped_dst_nents == 1 &&
17256 + iv_dma + ivsize == sg_dma_address(req->dst)) {
17257 + out_contig = true;
17258 + } else {
17259 + out_contig = false;
17260 + qm_sg_ents += 1 + mapped_dst_nents;
17261 + }
17262 +
17263 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
17264 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
17265 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
17266 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
17267 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
17268 + return ERR_PTR(-ENOMEM);
17269 + }
17270 +
17271 + /* allocate space for base edesc and link tables */
17272 + edesc = qi_cache_zalloc(GFP_DMA | flags);
17273 + if (!edesc) {
17274 + dev_err(dev, "could not allocate extended descriptor\n");
17275 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
17276 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
17277 + return ERR_PTR(-ENOMEM);
17278 + }
17279 +
17280 + edesc->src_nents = src_nents;
17281 + edesc->dst_nents = dst_nents;
17282 + edesc->iv_dma = iv_dma;
17283 + sg_table = &edesc->sgt[0];
17284 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
17285 +
17286 + if (mapped_src_nents > 1)
17287 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
17288 +
17289 + if (!out_contig) {
17290 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
17291 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
17292 + dst_sg_idx + 1, 0);
17293 + }
17294 +
17295 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
17296 + DMA_TO_DEVICE);
17297 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
17298 + dev_err(dev, "unable to map S/G table\n");
17299 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
17300 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
17301 + qi_cache_free(edesc);
17302 + return ERR_PTR(-ENOMEM);
17303 + }
17304 +
17305 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
17306 + dpaa2_fl_set_final(in_fle, true);
17307 + dpaa2_fl_set_len(in_fle, req->nbytes);
17308 + dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
17309 +
17310 + if (mapped_src_nents > 1) {
17311 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
17312 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
17313 + } else {
17314 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
17315 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
17316 + }
17317 +
17318 + if (!out_contig) {
17319 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
17320 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
17321 + sizeof(*sg_table));
17322 + } else {
17323 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
17324 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
17325 + }
17326 +
17327 + return edesc;
17328 +}
17329 +
17330 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
17331 + struct aead_request *req)
17332 +{
17333 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17334 + int ivsize = crypto_aead_ivsize(aead);
17335 + struct caam_request *caam_req = aead_request_ctx(req);
17336 +
17337 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17338 + edesc->iv_dma, ivsize, caam_req->op_type,
17339 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17340 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
17341 +}
17342 +
17343 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
17344 + struct aead_request *req)
17345 +{
17346 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17347 + int ivsize = crypto_aead_ivsize(tls);
17348 + struct caam_request *caam_req = aead_request_ctx(req);
17349 +
17350 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
17351 + edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
17352 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17353 +}
17354 +
17355 +static void ablkcipher_unmap(struct device *dev,
17356 + struct ablkcipher_edesc *edesc,
17357 + struct ablkcipher_request *req)
17358 +{
17359 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17360 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17361 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17362 +
17363 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
17364 + edesc->iv_dma, ivsize, caam_req->op_type,
17365 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
17366 +}
17367 +
17368 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
17369 +{
17370 + struct crypto_async_request *areq = cbk_ctx;
17371 + struct aead_request *req = container_of(areq, struct aead_request,
17372 + base);
17373 + struct caam_request *req_ctx = to_caam_req(areq);
17374 + struct aead_edesc *edesc = req_ctx->edesc;
17375 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17376 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17377 + int ecode = 0;
17378 +
17379 +#ifdef DEBUG
17380 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17381 +#endif
17382 +
17383 + if (unlikely(status)) {
17384 + caam_qi2_strstatus(ctx->dev, status);
17385 + ecode = -EIO;
17386 + }
17387 +
17388 + aead_unmap(ctx->dev, edesc, req);
17389 + qi_cache_free(edesc);
17390 + aead_request_complete(req, ecode);
17391 +}
17392 +
17393 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
17394 +{
17395 + struct crypto_async_request *areq = cbk_ctx;
17396 + struct aead_request *req = container_of(areq, struct aead_request,
17397 + base);
17398 + struct caam_request *req_ctx = to_caam_req(areq);
17399 + struct aead_edesc *edesc = req_ctx->edesc;
17400 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17401 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17402 + int ecode = 0;
17403 +
17404 +#ifdef DEBUG
17405 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17406 +#endif
17407 +
17408 + if (unlikely(status)) {
17409 + caam_qi2_strstatus(ctx->dev, status);
17410 + /*
17411 + * verify hw auth check passed else return -EBADMSG
17412 + */
17413 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17414 + JRSTA_CCBERR_ERRID_ICVCHK)
17415 + ecode = -EBADMSG;
17416 + else
17417 + ecode = -EIO;
17418 + }
17419 +
17420 + aead_unmap(ctx->dev, edesc, req);
17421 + qi_cache_free(edesc);
17422 + aead_request_complete(req, ecode);
17423 +}
17424 +
17425 +static int aead_encrypt(struct aead_request *req)
17426 +{
17427 + struct aead_edesc *edesc;
17428 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17429 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17430 + struct caam_request *caam_req = aead_request_ctx(req);
17431 + int ret;
17432 +
17433 + /* allocate extended descriptor */
17434 + edesc = aead_edesc_alloc(req, true);
17435 + if (IS_ERR(edesc))
17436 + return PTR_ERR(edesc);
17437 +
17438 + caam_req->flc = &ctx->flc[ENCRYPT];
17439 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17440 + caam_req->op_type = ENCRYPT;
17441 + caam_req->cbk = aead_encrypt_done;
17442 + caam_req->ctx = &req->base;
17443 + caam_req->edesc = edesc;
17444 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17445 + if (ret != -EINPROGRESS &&
17446 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17447 + aead_unmap(ctx->dev, edesc, req);
17448 + qi_cache_free(edesc);
17449 + }
17450 +
17451 + return ret;
17452 +}
17453 +
17454 +static int aead_decrypt(struct aead_request *req)
17455 +{
17456 + struct aead_edesc *edesc;
17457 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17458 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17459 + struct caam_request *caam_req = aead_request_ctx(req);
17460 + int ret;
17461 +
17462 + /* allocate extended descriptor */
17463 + edesc = aead_edesc_alloc(req, false);
17464 + if (IS_ERR(edesc))
17465 + return PTR_ERR(edesc);
17466 +
17467 + caam_req->flc = &ctx->flc[DECRYPT];
17468 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17469 + caam_req->op_type = DECRYPT;
17470 + caam_req->cbk = aead_decrypt_done;
17471 + caam_req->ctx = &req->base;
17472 + caam_req->edesc = edesc;
17473 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17474 + if (ret != -EINPROGRESS &&
17475 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17476 + aead_unmap(ctx->dev, edesc, req);
17477 + qi_cache_free(edesc);
17478 + }
17479 +
17480 + return ret;
17481 +}
17482 +
17483 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17484 +{
17485 + struct crypto_async_request *areq = cbk_ctx;
17486 + struct aead_request *req = container_of(areq, struct aead_request,
17487 + base);
17488 + struct caam_request *req_ctx = to_caam_req(areq);
17489 + struct tls_edesc *edesc = req_ctx->edesc;
17490 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17491 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17492 + int ecode = 0;
17493 +
17494 +#ifdef DEBUG
17495 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17496 +#endif
17497 +
17498 + if (unlikely(status)) {
17499 + caam_qi2_strstatus(ctx->dev, status);
17500 + ecode = -EIO;
17501 + }
17502 +
17503 + tls_unmap(ctx->dev, edesc, req);
17504 + qi_cache_free(edesc);
17505 + aead_request_complete(req, ecode);
17506 +}
17507 +
17508 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17509 +{
17510 + struct crypto_async_request *areq = cbk_ctx;
17511 + struct aead_request *req = container_of(areq, struct aead_request,
17512 + base);
17513 + struct caam_request *req_ctx = to_caam_req(areq);
17514 + struct tls_edesc *edesc = req_ctx->edesc;
17515 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17516 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17517 + int ecode = 0;
17518 +
17519 +#ifdef DEBUG
17520 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17521 +#endif
17522 +
17523 + if (unlikely(status)) {
17524 + caam_qi2_strstatus(ctx->dev, status);
17525 + /*
17526 + * verify hw auth check passed else return -EBADMSG
17527 + */
17528 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17529 + JRSTA_CCBERR_ERRID_ICVCHK)
17530 + ecode = -EBADMSG;
17531 + else
17532 + ecode = -EIO;
17533 + }
17534 +
17535 + tls_unmap(ctx->dev, edesc, req);
17536 + qi_cache_free(edesc);
17537 + aead_request_complete(req, ecode);
17538 +}
17539 +
17540 +static int tls_encrypt(struct aead_request *req)
17541 +{
17542 + struct tls_edesc *edesc;
17543 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17544 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17545 + struct caam_request *caam_req = aead_request_ctx(req);
17546 + int ret;
17547 +
17548 + /* allocate extended descriptor */
17549 + edesc = tls_edesc_alloc(req, true);
17550 + if (IS_ERR(edesc))
17551 + return PTR_ERR(edesc);
17552 +
17553 + caam_req->flc = &ctx->flc[ENCRYPT];
17554 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17555 + caam_req->op_type = ENCRYPT;
17556 + caam_req->cbk = tls_encrypt_done;
17557 + caam_req->ctx = &req->base;
17558 + caam_req->edesc = edesc;
17559 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17560 + if (ret != -EINPROGRESS &&
17561 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17562 + tls_unmap(ctx->dev, edesc, req);
17563 + qi_cache_free(edesc);
17564 + }
17565 +
17566 + return ret;
17567 +}
17568 +
17569 +static int tls_decrypt(struct aead_request *req)
17570 +{
17571 + struct tls_edesc *edesc;
17572 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17573 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17574 + struct caam_request *caam_req = aead_request_ctx(req);
17575 + int ret;
17576 +
17577 + /* allocate extended descriptor */
17578 + edesc = tls_edesc_alloc(req, false);
17579 + if (IS_ERR(edesc))
17580 + return PTR_ERR(edesc);
17581 +
17582 + caam_req->flc = &ctx->flc[DECRYPT];
17583 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17584 + caam_req->op_type = DECRYPT;
17585 + caam_req->cbk = tls_decrypt_done;
17586 + caam_req->ctx = &req->base;
17587 + caam_req->edesc = edesc;
17588 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17589 + if (ret != -EINPROGRESS &&
17590 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17591 + tls_unmap(ctx->dev, edesc, req);
17592 + qi_cache_free(edesc);
17593 + }
17594 +
17595 + return ret;
17596 +}
17597 +
17598 +static int ipsec_gcm_encrypt(struct aead_request *req)
17599 +{
17600 + if (req->assoclen < 8)
17601 + return -EINVAL;
17602 +
17603 + return aead_encrypt(req);
17604 +}
17605 +
17606 +static int ipsec_gcm_decrypt(struct aead_request *req)
17607 +{
17608 + if (req->assoclen < 8)
17609 + return -EINVAL;
17610 +
17611 + return aead_decrypt(req);
17612 +}
17613 +
17614 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17615 +{
17616 + struct crypto_async_request *areq = cbk_ctx;
17617 + struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17618 + struct caam_request *req_ctx = to_caam_req(areq);
17619 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17620 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17621 + struct ablkcipher_edesc *edesc = req_ctx->edesc;
17622 + int ecode = 0;
17623 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17624 +
17625 +#ifdef DEBUG
17626 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17627 +#endif
17628 +
17629 + if (unlikely(status)) {
17630 + caam_qi2_strstatus(ctx->dev, status);
17631 + ecode = -EIO;
17632 + }
17633 +
17634 +#ifdef DEBUG
17635 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
17636 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17637 + edesc->src_nents > 1 ? 100 : ivsize, 1);
17638 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
17639 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17640 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17641 +#endif
17642 +
17643 + ablkcipher_unmap(ctx->dev, edesc, req);
17644 + qi_cache_free(edesc);
17645 +
17646 + /*
17647 + * The crypto API expects us to set the IV (req->info) to the last
17648 + * ciphertext block. This is used e.g. by the CTS mode.
17649 + */
17650 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17651 + ivsize, 0);
17652 +
17653 + ablkcipher_request_complete(req, ecode);
17654 +}
17655 +
17656 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17657 +{
17658 + struct ablkcipher_edesc *edesc;
17659 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17660 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17661 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17662 + int ret;
17663 +
17664 + /* allocate extended descriptor */
17665 + edesc = ablkcipher_edesc_alloc(req, true);
17666 + if (IS_ERR(edesc))
17667 + return PTR_ERR(edesc);
17668 +
17669 + caam_req->flc = &ctx->flc[ENCRYPT];
17670 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17671 + caam_req->op_type = ENCRYPT;
17672 + caam_req->cbk = ablkcipher_done;
17673 + caam_req->ctx = &req->base;
17674 + caam_req->edesc = edesc;
17675 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17676 + if (ret != -EINPROGRESS &&
17677 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17678 + ablkcipher_unmap(ctx->dev, edesc, req);
17679 + qi_cache_free(edesc);
17680 + }
17681 +
17682 + return ret;
17683 +}
17684 +
17685 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17686 +{
17687 + struct ablkcipher_request *req = &greq->creq;
17688 + struct ablkcipher_edesc *edesc;
17689 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17690 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17691 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17692 + int ret;
17693 +
17694 + /* allocate extended descriptor */
17695 + edesc = ablkcipher_giv_edesc_alloc(greq);
17696 + if (IS_ERR(edesc))
17697 + return PTR_ERR(edesc);
17698 +
17699 + caam_req->flc = &ctx->flc[GIVENCRYPT];
17700 + caam_req->flc_dma = ctx->flc_dma[GIVENCRYPT];
17701 + caam_req->op_type = GIVENCRYPT;
17702 + caam_req->cbk = ablkcipher_done;
17703 + caam_req->ctx = &req->base;
17704 + caam_req->edesc = edesc;
17705 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17706 + if (ret != -EINPROGRESS &&
17707 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17708 + ablkcipher_unmap(ctx->dev, edesc, req);
17709 + qi_cache_free(edesc);
17710 + }
17711 +
17712 + return ret;
17713 +}
17714 +
17715 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17716 +{
17717 + struct ablkcipher_edesc *edesc;
17718 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17719 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17720 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17721 + int ret;
17722 +
17723 + /* allocate extended descriptor */
17724 + edesc = ablkcipher_edesc_alloc(req, false);
17725 + if (IS_ERR(edesc))
17726 + return PTR_ERR(edesc);
17727 +
17728 + caam_req->flc = &ctx->flc[DECRYPT];
17729 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17730 + caam_req->op_type = DECRYPT;
17731 + caam_req->cbk = ablkcipher_done;
17732 + caam_req->ctx = &req->base;
17733 + caam_req->edesc = edesc;
17734 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17735 + if (ret != -EINPROGRESS &&
17736 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17737 + ablkcipher_unmap(ctx->dev, edesc, req);
17738 + qi_cache_free(edesc);
17739 + }
17740 +
17741 + return ret;
17742 +}
17743 +
17744 +struct caam_crypto_alg {
17745 + struct list_head entry;
17746 + struct crypto_alg crypto_alg;
17747 + struct caam_alg_entry caam;
17748 +};
17749 +
17750 +static int caam_cra_init(struct crypto_tfm *tfm, bool uses_dkp)
17751 +{
17752 + struct crypto_alg *alg = tfm->__crt_alg;
17753 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17754 + crypto_alg);
17755 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17756 + dma_addr_t dma_addr;
17757 + int i;
17758 +
17759 + /* copy descriptor header template value */
17760 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17761 + caam_alg->caam.class1_alg_type;
17762 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17763 + caam_alg->caam.class2_alg_type;
17764 +
17765 + ctx->dev = caam_alg->caam.dev;
17766 + ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
17767 +
17768 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
17769 + offsetof(struct caam_ctx, flc_dma),
17770 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
17771 + if (dma_mapping_error(ctx->dev, dma_addr)) {
17772 + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
17773 + return -ENOMEM;
17774 + }
17775 +
17776 + for (i = 0; i < NUM_OP; i++)
17777 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
17778 + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
17779 +
17780 + return 0;
17781 +}
17782 +
17783 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17784 +{
17785 + struct ablkcipher_tfm *ablkcipher_tfm =
17786 + crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17787 +
17788 + ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17789 + return caam_cra_init(tfm, false);
17790 +}
17791 +
17792 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17793 +{
17794 + struct aead_alg *alg = crypto_aead_alg(tfm);
17795 +
17796 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17797 + return caam_cra_init(crypto_aead_tfm(tfm),
17798 + (alg->setkey == aead_setkey) ||
17799 + (alg->setkey == tls_setkey));
17800 +}
17801 +
17802 +static void caam_exit_common(struct caam_ctx *ctx)
17803 +{
17804 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
17805 + offsetof(struct caam_ctx, flc_dma), ctx->dir,
17806 + DMA_ATTR_SKIP_CPU_SYNC);
17807 +}
17808 +
17809 +static void caam_cra_exit(struct crypto_tfm *tfm)
17810 +{
17811 + caam_exit_common(crypto_tfm_ctx(tfm));
17812 +}
17813 +
17814 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17815 +{
17816 + caam_exit_common(crypto_aead_ctx(tfm));
17817 +}
17818 +
17819 +#define template_ablkcipher template_u.ablkcipher
17820 +struct caam_alg_template {
17821 + char name[CRYPTO_MAX_ALG_NAME];
17822 + char driver_name[CRYPTO_MAX_ALG_NAME];
17823 + unsigned int blocksize;
17824 + u32 type;
17825 + union {
17826 + struct ablkcipher_alg ablkcipher;
17827 + } template_u;
17828 + u32 class1_alg_type;
17829 + u32 class2_alg_type;
17830 +};
17831 +
17832 +static struct caam_alg_template driver_algs[] = {
17833 + /* ablkcipher descriptor */
17834 + {
17835 + .name = "cbc(aes)",
17836 + .driver_name = "cbc-aes-caam-qi2",
17837 + .blocksize = AES_BLOCK_SIZE,
17838 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17839 + .template_ablkcipher = {
17840 + .setkey = ablkcipher_setkey,
17841 + .encrypt = ablkcipher_encrypt,
17842 + .decrypt = ablkcipher_decrypt,
17843 + .givencrypt = ablkcipher_givencrypt,
17844 + .geniv = "<built-in>",
17845 + .min_keysize = AES_MIN_KEY_SIZE,
17846 + .max_keysize = AES_MAX_KEY_SIZE,
17847 + .ivsize = AES_BLOCK_SIZE,
17848 + },
17849 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17850 + },
17851 + {
17852 + .name = "cbc(des3_ede)",
17853 + .driver_name = "cbc-3des-caam-qi2",
17854 + .blocksize = DES3_EDE_BLOCK_SIZE,
17855 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17856 + .template_ablkcipher = {
17857 + .setkey = ablkcipher_setkey,
17858 + .encrypt = ablkcipher_encrypt,
17859 + .decrypt = ablkcipher_decrypt,
17860 + .givencrypt = ablkcipher_givencrypt,
17861 + .geniv = "<built-in>",
17862 + .min_keysize = DES3_EDE_KEY_SIZE,
17863 + .max_keysize = DES3_EDE_KEY_SIZE,
17864 + .ivsize = DES3_EDE_BLOCK_SIZE,
17865 + },
17866 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17867 + },
17868 + {
17869 + .name = "cbc(des)",
17870 + .driver_name = "cbc-des-caam-qi2",
17871 + .blocksize = DES_BLOCK_SIZE,
17872 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17873 + .template_ablkcipher = {
17874 + .setkey = ablkcipher_setkey,
17875 + .encrypt = ablkcipher_encrypt,
17876 + .decrypt = ablkcipher_decrypt,
17877 + .givencrypt = ablkcipher_givencrypt,
17878 + .geniv = "<built-in>",
17879 + .min_keysize = DES_KEY_SIZE,
17880 + .max_keysize = DES_KEY_SIZE,
17881 + .ivsize = DES_BLOCK_SIZE,
17882 + },
17883 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17884 + },
17885 + {
17886 + .name = "ctr(aes)",
17887 + .driver_name = "ctr-aes-caam-qi2",
17888 + .blocksize = 1,
17889 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17890 + .template_ablkcipher = {
17891 + .setkey = ablkcipher_setkey,
17892 + .encrypt = ablkcipher_encrypt,
17893 + .decrypt = ablkcipher_decrypt,
17894 + .geniv = "chainiv",
17895 + .min_keysize = AES_MIN_KEY_SIZE,
17896 + .max_keysize = AES_MAX_KEY_SIZE,
17897 + .ivsize = AES_BLOCK_SIZE,
17898 + },
17899 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17900 + },
17901 + {
17902 + .name = "rfc3686(ctr(aes))",
17903 + .driver_name = "rfc3686-ctr-aes-caam-qi2",
17904 + .blocksize = 1,
17905 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17906 + .template_ablkcipher = {
17907 + .setkey = ablkcipher_setkey,
17908 + .encrypt = ablkcipher_encrypt,
17909 + .decrypt = ablkcipher_decrypt,
17910 + .givencrypt = ablkcipher_givencrypt,
17911 + .geniv = "<built-in>",
17912 + .min_keysize = AES_MIN_KEY_SIZE +
17913 + CTR_RFC3686_NONCE_SIZE,
17914 + .max_keysize = AES_MAX_KEY_SIZE +
17915 + CTR_RFC3686_NONCE_SIZE,
17916 + .ivsize = CTR_RFC3686_IV_SIZE,
17917 + },
17918 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17919 + },
17920 + {
17921 + .name = "xts(aes)",
17922 + .driver_name = "xts-aes-caam-qi2",
17923 + .blocksize = AES_BLOCK_SIZE,
17924 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17925 + .template_ablkcipher = {
17926 + .setkey = xts_ablkcipher_setkey,
17927 + .encrypt = ablkcipher_encrypt,
17928 + .decrypt = ablkcipher_decrypt,
17929 + .geniv = "eseqiv",
17930 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
17931 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
17932 + .ivsize = AES_BLOCK_SIZE,
17933 + },
17934 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17935 + }
17936 +};
17937 +
17938 +static struct caam_aead_alg driver_aeads[] = {
17939 + {
17940 + .aead = {
17941 + .base = {
17942 + .cra_name = "rfc4106(gcm(aes))",
17943 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17944 + .cra_blocksize = 1,
17945 + },
17946 + .setkey = rfc4106_setkey,
17947 + .setauthsize = rfc4106_setauthsize,
17948 + .encrypt = ipsec_gcm_encrypt,
17949 + .decrypt = ipsec_gcm_decrypt,
17950 + .ivsize = 8,
17951 + .maxauthsize = AES_BLOCK_SIZE,
17952 + },
17953 + .caam = {
17954 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17955 + },
17956 + },
17957 + {
17958 + .aead = {
17959 + .base = {
17960 + .cra_name = "rfc4543(gcm(aes))",
17961 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17962 + .cra_blocksize = 1,
17963 + },
17964 + .setkey = rfc4543_setkey,
17965 + .setauthsize = rfc4543_setauthsize,
17966 + .encrypt = ipsec_gcm_encrypt,
17967 + .decrypt = ipsec_gcm_decrypt,
17968 + .ivsize = 8,
17969 + .maxauthsize = AES_BLOCK_SIZE,
17970 + },
17971 + .caam = {
17972 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17973 + },
17974 + },
17975 + /* Galois Counter Mode */
17976 + {
17977 + .aead = {
17978 + .base = {
17979 + .cra_name = "gcm(aes)",
17980 + .cra_driver_name = "gcm-aes-caam-qi2",
17981 + .cra_blocksize = 1,
17982 + },
17983 + .setkey = gcm_setkey,
17984 + .setauthsize = gcm_setauthsize,
17985 + .encrypt = aead_encrypt,
17986 + .decrypt = aead_decrypt,
17987 + .ivsize = 12,
17988 + .maxauthsize = AES_BLOCK_SIZE,
17989 + },
17990 + .caam = {
17991 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17992 + }
17993 + },
17994 + /* single-pass ipsec_esp descriptor */
17995 + {
17996 + .aead = {
17997 + .base = {
17998 + .cra_name = "authenc(hmac(md5),cbc(aes))",
17999 + .cra_driver_name = "authenc-hmac-md5-"
18000 + "cbc-aes-caam-qi2",
18001 + .cra_blocksize = AES_BLOCK_SIZE,
18002 + },
18003 + .setkey = aead_setkey,
18004 + .setauthsize = aead_setauthsize,
18005 + .encrypt = aead_encrypt,
18006 + .decrypt = aead_decrypt,
18007 + .ivsize = AES_BLOCK_SIZE,
18008 + .maxauthsize = MD5_DIGEST_SIZE,
18009 + },
18010 + .caam = {
18011 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18012 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18013 + OP_ALG_AAI_HMAC_PRECOMP,
18014 + }
18015 + },
18016 + {
18017 + .aead = {
18018 + .base = {
18019 + .cra_name = "echainiv(authenc(hmac(md5),"
18020 + "cbc(aes)))",
18021 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
18022 + "cbc-aes-caam-qi2",
18023 + .cra_blocksize = AES_BLOCK_SIZE,
18024 + },
18025 + .setkey = aead_setkey,
18026 + .setauthsize = aead_setauthsize,
18027 + .encrypt = aead_encrypt,
18028 + .decrypt = aead_decrypt,
18029 + .ivsize = AES_BLOCK_SIZE,
18030 + .maxauthsize = MD5_DIGEST_SIZE,
18031 + },
18032 + .caam = {
18033 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18034 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18035 + OP_ALG_AAI_HMAC_PRECOMP,
18036 + .geniv = true,
18037 + }
18038 + },
18039 + {
18040 + .aead = {
18041 + .base = {
18042 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
18043 + .cra_driver_name = "authenc-hmac-sha1-"
18044 + "cbc-aes-caam-qi2",
18045 + .cra_blocksize = AES_BLOCK_SIZE,
18046 + },
18047 + .setkey = aead_setkey,
18048 + .setauthsize = aead_setauthsize,
18049 + .encrypt = aead_encrypt,
18050 + .decrypt = aead_decrypt,
18051 + .ivsize = AES_BLOCK_SIZE,
18052 + .maxauthsize = SHA1_DIGEST_SIZE,
18053 + },
18054 + .caam = {
18055 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18056 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18057 + OP_ALG_AAI_HMAC_PRECOMP,
18058 + }
18059 + },
18060 + {
18061 + .aead = {
18062 + .base = {
18063 + .cra_name = "echainiv(authenc(hmac(sha1),"
18064 + "cbc(aes)))",
18065 + .cra_driver_name = "echainiv-authenc-"
18066 + "hmac-sha1-cbc-aes-caam-qi2",
18067 + .cra_blocksize = AES_BLOCK_SIZE,
18068 + },
18069 + .setkey = aead_setkey,
18070 + .setauthsize = aead_setauthsize,
18071 + .encrypt = aead_encrypt,
18072 + .decrypt = aead_decrypt,
18073 + .ivsize = AES_BLOCK_SIZE,
18074 + .maxauthsize = SHA1_DIGEST_SIZE,
18075 + },
18076 + .caam = {
18077 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18078 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18079 + OP_ALG_AAI_HMAC_PRECOMP,
18080 + .geniv = true,
18081 + },
18082 + },
18083 + {
18084 + .aead = {
18085 + .base = {
18086 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
18087 + .cra_driver_name = "authenc-hmac-sha224-"
18088 + "cbc-aes-caam-qi2",
18089 + .cra_blocksize = AES_BLOCK_SIZE,
18090 + },
18091 + .setkey = aead_setkey,
18092 + .setauthsize = aead_setauthsize,
18093 + .encrypt = aead_encrypt,
18094 + .decrypt = aead_decrypt,
18095 + .ivsize = AES_BLOCK_SIZE,
18096 + .maxauthsize = SHA224_DIGEST_SIZE,
18097 + },
18098 + .caam = {
18099 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18100 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18101 + OP_ALG_AAI_HMAC_PRECOMP,
18102 + }
18103 + },
18104 + {
18105 + .aead = {
18106 + .base = {
18107 + .cra_name = "echainiv(authenc(hmac(sha224),"
18108 + "cbc(aes)))",
18109 + .cra_driver_name = "echainiv-authenc-"
18110 + "hmac-sha224-cbc-aes-caam-qi2",
18111 + .cra_blocksize = AES_BLOCK_SIZE,
18112 + },
18113 + .setkey = aead_setkey,
18114 + .setauthsize = aead_setauthsize,
18115 + .encrypt = aead_encrypt,
18116 + .decrypt = aead_decrypt,
18117 + .ivsize = AES_BLOCK_SIZE,
18118 + .maxauthsize = SHA224_DIGEST_SIZE,
18119 + },
18120 + .caam = {
18121 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18122 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18123 + OP_ALG_AAI_HMAC_PRECOMP,
18124 + .geniv = true,
18125 + }
18126 + },
18127 + {
18128 + .aead = {
18129 + .base = {
18130 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
18131 + .cra_driver_name = "authenc-hmac-sha256-"
18132 + "cbc-aes-caam-qi2",
18133 + .cra_blocksize = AES_BLOCK_SIZE,
18134 + },
18135 + .setkey = aead_setkey,
18136 + .setauthsize = aead_setauthsize,
18137 + .encrypt = aead_encrypt,
18138 + .decrypt = aead_decrypt,
18139 + .ivsize = AES_BLOCK_SIZE,
18140 + .maxauthsize = SHA256_DIGEST_SIZE,
18141 + },
18142 + .caam = {
18143 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18144 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18145 + OP_ALG_AAI_HMAC_PRECOMP,
18146 + }
18147 + },
18148 + {
18149 + .aead = {
18150 + .base = {
18151 + .cra_name = "echainiv(authenc(hmac(sha256),"
18152 + "cbc(aes)))",
18153 + .cra_driver_name = "echainiv-authenc-"
18154 + "hmac-sha256-cbc-aes-"
18155 + "caam-qi2",
18156 + .cra_blocksize = AES_BLOCK_SIZE,
18157 + },
18158 + .setkey = aead_setkey,
18159 + .setauthsize = aead_setauthsize,
18160 + .encrypt = aead_encrypt,
18161 + .decrypt = aead_decrypt,
18162 + .ivsize = AES_BLOCK_SIZE,
18163 + .maxauthsize = SHA256_DIGEST_SIZE,
18164 + },
18165 + .caam = {
18166 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18167 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18168 + OP_ALG_AAI_HMAC_PRECOMP,
18169 + .geniv = true,
18170 + }
18171 + },
18172 + {
18173 + .aead = {
18174 + .base = {
18175 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
18176 + .cra_driver_name = "authenc-hmac-sha384-"
18177 + "cbc-aes-caam-qi2",
18178 + .cra_blocksize = AES_BLOCK_SIZE,
18179 + },
18180 + .setkey = aead_setkey,
18181 + .setauthsize = aead_setauthsize,
18182 + .encrypt = aead_encrypt,
18183 + .decrypt = aead_decrypt,
18184 + .ivsize = AES_BLOCK_SIZE,
18185 + .maxauthsize = SHA384_DIGEST_SIZE,
18186 + },
18187 + .caam = {
18188 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18189 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18190 + OP_ALG_AAI_HMAC_PRECOMP,
18191 + }
18192 + },
18193 + {
18194 + .aead = {
18195 + .base = {
18196 + .cra_name = "echainiv(authenc(hmac(sha384),"
18197 + "cbc(aes)))",
18198 + .cra_driver_name = "echainiv-authenc-"
18199 + "hmac-sha384-cbc-aes-"
18200 + "caam-qi2",
18201 + .cra_blocksize = AES_BLOCK_SIZE,
18202 + },
18203 + .setkey = aead_setkey,
18204 + .setauthsize = aead_setauthsize,
18205 + .encrypt = aead_encrypt,
18206 + .decrypt = aead_decrypt,
18207 + .ivsize = AES_BLOCK_SIZE,
18208 + .maxauthsize = SHA384_DIGEST_SIZE,
18209 + },
18210 + .caam = {
18211 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18212 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18213 + OP_ALG_AAI_HMAC_PRECOMP,
18214 + .geniv = true,
18215 + }
18216 + },
18217 + {
18218 + .aead = {
18219 + .base = {
18220 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
18221 + .cra_driver_name = "authenc-hmac-sha512-"
18222 + "cbc-aes-caam-qi2",
18223 + .cra_blocksize = AES_BLOCK_SIZE,
18224 + },
18225 + .setkey = aead_setkey,
18226 + .setauthsize = aead_setauthsize,
18227 + .encrypt = aead_encrypt,
18228 + .decrypt = aead_decrypt,
18229 + .ivsize = AES_BLOCK_SIZE,
18230 + .maxauthsize = SHA512_DIGEST_SIZE,
18231 + },
18232 + .caam = {
18233 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18234 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18235 + OP_ALG_AAI_HMAC_PRECOMP,
18236 + }
18237 + },
18238 + {
18239 + .aead = {
18240 + .base = {
18241 + .cra_name = "echainiv(authenc(hmac(sha512),"
18242 + "cbc(aes)))",
18243 + .cra_driver_name = "echainiv-authenc-"
18244 + "hmac-sha512-cbc-aes-"
18245 + "caam-qi2",
18246 + .cra_blocksize = AES_BLOCK_SIZE,
18247 + },
18248 + .setkey = aead_setkey,
18249 + .setauthsize = aead_setauthsize,
18250 + .encrypt = aead_encrypt,
18251 + .decrypt = aead_decrypt,
18252 + .ivsize = AES_BLOCK_SIZE,
18253 + .maxauthsize = SHA512_DIGEST_SIZE,
18254 + },
18255 + .caam = {
18256 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18257 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18258 + OP_ALG_AAI_HMAC_PRECOMP,
18259 + .geniv = true,
18260 + }
18261 + },
18262 + {
18263 + .aead = {
18264 + .base = {
18265 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
18266 + .cra_driver_name = "authenc-hmac-md5-"
18267 + "cbc-des3_ede-caam-qi2",
18268 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18269 + },
18270 + .setkey = aead_setkey,
18271 + .setauthsize = aead_setauthsize,
18272 + .encrypt = aead_encrypt,
18273 + .decrypt = aead_decrypt,
18274 + .ivsize = DES3_EDE_BLOCK_SIZE,
18275 + .maxauthsize = MD5_DIGEST_SIZE,
18276 + },
18277 + .caam = {
18278 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18279 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18280 + OP_ALG_AAI_HMAC_PRECOMP,
18281 + }
18282 + },
18283 + {
18284 + .aead = {
18285 + .base = {
18286 + .cra_name = "echainiv(authenc(hmac(md5),"
18287 + "cbc(des3_ede)))",
18288 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
18289 + "cbc-des3_ede-caam-qi2",
18290 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18291 + },
18292 + .setkey = aead_setkey,
18293 + .setauthsize = aead_setauthsize,
18294 + .encrypt = aead_encrypt,
18295 + .decrypt = aead_decrypt,
18296 + .ivsize = DES3_EDE_BLOCK_SIZE,
18297 + .maxauthsize = MD5_DIGEST_SIZE,
18298 + },
18299 + .caam = {
18300 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18301 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18302 + OP_ALG_AAI_HMAC_PRECOMP,
18303 + .geniv = true,
18304 + }
18305 + },
18306 + {
18307 + .aead = {
18308 + .base = {
18309 + .cra_name = "authenc(hmac(sha1),"
18310 + "cbc(des3_ede))",
18311 + .cra_driver_name = "authenc-hmac-sha1-"
18312 + "cbc-des3_ede-caam-qi2",
18313 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18314 + },
18315 + .setkey = aead_setkey,
18316 + .setauthsize = aead_setauthsize,
18317 + .encrypt = aead_encrypt,
18318 + .decrypt = aead_decrypt,
18319 + .ivsize = DES3_EDE_BLOCK_SIZE,
18320 + .maxauthsize = SHA1_DIGEST_SIZE,
18321 + },
18322 + .caam = {
18323 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18324 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18325 + OP_ALG_AAI_HMAC_PRECOMP,
18326 + },
18327 + },
18328 + {
18329 + .aead = {
18330 + .base = {
18331 + .cra_name = "echainiv(authenc(hmac(sha1),"
18332 + "cbc(des3_ede)))",
18333 + .cra_driver_name = "echainiv-authenc-"
18334 + "hmac-sha1-"
18335 + "cbc-des3_ede-caam-qi2",
18336 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18337 + },
18338 + .setkey = aead_setkey,
18339 + .setauthsize = aead_setauthsize,
18340 + .encrypt = aead_encrypt,
18341 + .decrypt = aead_decrypt,
18342 + .ivsize = DES3_EDE_BLOCK_SIZE,
18343 + .maxauthsize = SHA1_DIGEST_SIZE,
18344 + },
18345 + .caam = {
18346 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18347 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18348 + OP_ALG_AAI_HMAC_PRECOMP,
18349 + .geniv = true,
18350 + }
18351 + },
18352 + {
18353 + .aead = {
18354 + .base = {
18355 + .cra_name = "authenc(hmac(sha224),"
18356 + "cbc(des3_ede))",
18357 + .cra_driver_name = "authenc-hmac-sha224-"
18358 + "cbc-des3_ede-caam-qi2",
18359 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18360 + },
18361 + .setkey = aead_setkey,
18362 + .setauthsize = aead_setauthsize,
18363 + .encrypt = aead_encrypt,
18364 + .decrypt = aead_decrypt,
18365 + .ivsize = DES3_EDE_BLOCK_SIZE,
18366 + .maxauthsize = SHA224_DIGEST_SIZE,
18367 + },
18368 + .caam = {
18369 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18370 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18371 + OP_ALG_AAI_HMAC_PRECOMP,
18372 + },
18373 + },
18374 + {
18375 + .aead = {
18376 + .base = {
18377 + .cra_name = "echainiv(authenc(hmac(sha224),"
18378 + "cbc(des3_ede)))",
18379 + .cra_driver_name = "echainiv-authenc-"
18380 + "hmac-sha224-"
18381 + "cbc-des3_ede-caam-qi2",
18382 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18383 + },
18384 + .setkey = aead_setkey,
18385 + .setauthsize = aead_setauthsize,
18386 + .encrypt = aead_encrypt,
18387 + .decrypt = aead_decrypt,
18388 + .ivsize = DES3_EDE_BLOCK_SIZE,
18389 + .maxauthsize = SHA224_DIGEST_SIZE,
18390 + },
18391 + .caam = {
18392 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18393 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18394 + OP_ALG_AAI_HMAC_PRECOMP,
18395 + .geniv = true,
18396 + }
18397 + },
18398 + {
18399 + .aead = {
18400 + .base = {
18401 + .cra_name = "authenc(hmac(sha256),"
18402 + "cbc(des3_ede))",
18403 + .cra_driver_name = "authenc-hmac-sha256-"
18404 + "cbc-des3_ede-caam-qi2",
18405 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18406 + },
18407 + .setkey = aead_setkey,
18408 + .setauthsize = aead_setauthsize,
18409 + .encrypt = aead_encrypt,
18410 + .decrypt = aead_decrypt,
18411 + .ivsize = DES3_EDE_BLOCK_SIZE,
18412 + .maxauthsize = SHA256_DIGEST_SIZE,
18413 + },
18414 + .caam = {
18415 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18416 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18417 + OP_ALG_AAI_HMAC_PRECOMP,
18418 + },
18419 + },
18420 + {
18421 + .aead = {
18422 + .base = {
18423 + .cra_name = "echainiv(authenc(hmac(sha256),"
18424 + "cbc(des3_ede)))",
18425 + .cra_driver_name = "echainiv-authenc-"
18426 + "hmac-sha256-"
18427 + "cbc-des3_ede-caam-qi2",
18428 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18429 + },
18430 + .setkey = aead_setkey,
18431 + .setauthsize = aead_setauthsize,
18432 + .encrypt = aead_encrypt,
18433 + .decrypt = aead_decrypt,
18434 + .ivsize = DES3_EDE_BLOCK_SIZE,
18435 + .maxauthsize = SHA256_DIGEST_SIZE,
18436 + },
18437 + .caam = {
18438 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18439 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18440 + OP_ALG_AAI_HMAC_PRECOMP,
18441 + .geniv = true,
18442 + }
18443 + },
18444 + {
18445 + .aead = {
18446 + .base = {
18447 + .cra_name = "authenc(hmac(sha384),"
18448 + "cbc(des3_ede))",
18449 + .cra_driver_name = "authenc-hmac-sha384-"
18450 + "cbc-des3_ede-caam-qi2",
18451 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18452 + },
18453 + .setkey = aead_setkey,
18454 + .setauthsize = aead_setauthsize,
18455 + .encrypt = aead_encrypt,
18456 + .decrypt = aead_decrypt,
18457 + .ivsize = DES3_EDE_BLOCK_SIZE,
18458 + .maxauthsize = SHA384_DIGEST_SIZE,
18459 + },
18460 + .caam = {
18461 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18462 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18463 + OP_ALG_AAI_HMAC_PRECOMP,
18464 + },
18465 + },
18466 + {
18467 + .aead = {
18468 + .base = {
18469 + .cra_name = "echainiv(authenc(hmac(sha384),"
18470 + "cbc(des3_ede)))",
18471 + .cra_driver_name = "echainiv-authenc-"
18472 + "hmac-sha384-"
18473 + "cbc-des3_ede-caam-qi2",
18474 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18475 + },
18476 + .setkey = aead_setkey,
18477 + .setauthsize = aead_setauthsize,
18478 + .encrypt = aead_encrypt,
18479 + .decrypt = aead_decrypt,
18480 + .ivsize = DES3_EDE_BLOCK_SIZE,
18481 + .maxauthsize = SHA384_DIGEST_SIZE,
18482 + },
18483 + .caam = {
18484 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18485 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18486 + OP_ALG_AAI_HMAC_PRECOMP,
18487 + .geniv = true,
18488 + }
18489 + },
18490 + {
18491 + .aead = {
18492 + .base = {
18493 + .cra_name = "authenc(hmac(sha512),"
18494 + "cbc(des3_ede))",
18495 + .cra_driver_name = "authenc-hmac-sha512-"
18496 + "cbc-des3_ede-caam-qi2",
18497 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18498 + },
18499 + .setkey = aead_setkey,
18500 + .setauthsize = aead_setauthsize,
18501 + .encrypt = aead_encrypt,
18502 + .decrypt = aead_decrypt,
18503 + .ivsize = DES3_EDE_BLOCK_SIZE,
18504 + .maxauthsize = SHA512_DIGEST_SIZE,
18505 + },
18506 + .caam = {
18507 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18508 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18509 + OP_ALG_AAI_HMAC_PRECOMP,
18510 + },
18511 + },
18512 + {
18513 + .aead = {
18514 + .base = {
18515 + .cra_name = "echainiv(authenc(hmac(sha512),"
18516 + "cbc(des3_ede)))",
18517 + .cra_driver_name = "echainiv-authenc-"
18518 + "hmac-sha512-"
18519 + "cbc-des3_ede-caam-qi2",
18520 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18521 + },
18522 + .setkey = aead_setkey,
18523 + .setauthsize = aead_setauthsize,
18524 + .encrypt = aead_encrypt,
18525 + .decrypt = aead_decrypt,
18526 + .ivsize = DES3_EDE_BLOCK_SIZE,
18527 + .maxauthsize = SHA512_DIGEST_SIZE,
18528 + },
18529 + .caam = {
18530 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18531 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18532 + OP_ALG_AAI_HMAC_PRECOMP,
18533 + .geniv = true,
18534 + }
18535 + },
18536 + {
18537 + .aead = {
18538 + .base = {
18539 + .cra_name = "authenc(hmac(md5),cbc(des))",
18540 + .cra_driver_name = "authenc-hmac-md5-"
18541 + "cbc-des-caam-qi2",
18542 + .cra_blocksize = DES_BLOCK_SIZE,
18543 + },
18544 + .setkey = aead_setkey,
18545 + .setauthsize = aead_setauthsize,
18546 + .encrypt = aead_encrypt,
18547 + .decrypt = aead_decrypt,
18548 + .ivsize = DES_BLOCK_SIZE,
18549 + .maxauthsize = MD5_DIGEST_SIZE,
18550 + },
18551 + .caam = {
18552 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18553 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18554 + OP_ALG_AAI_HMAC_PRECOMP,
18555 + },
18556 + },
18557 + {
18558 + .aead = {
18559 + .base = {
18560 + .cra_name = "echainiv(authenc(hmac(md5),"
18561 + "cbc(des)))",
18562 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
18563 + "cbc-des-caam-qi2",
18564 + .cra_blocksize = DES_BLOCK_SIZE,
18565 + },
18566 + .setkey = aead_setkey,
18567 + .setauthsize = aead_setauthsize,
18568 + .encrypt = aead_encrypt,
18569 + .decrypt = aead_decrypt,
18570 + .ivsize = DES_BLOCK_SIZE,
18571 + .maxauthsize = MD5_DIGEST_SIZE,
18572 + },
18573 + .caam = {
18574 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18575 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18576 + OP_ALG_AAI_HMAC_PRECOMP,
18577 + .geniv = true,
18578 + }
18579 + },
18580 + {
18581 + .aead = {
18582 + .base = {
18583 + .cra_name = "authenc(hmac(sha1),cbc(des))",
18584 + .cra_driver_name = "authenc-hmac-sha1-"
18585 + "cbc-des-caam-qi2",
18586 + .cra_blocksize = DES_BLOCK_SIZE,
18587 + },
18588 + .setkey = aead_setkey,
18589 + .setauthsize = aead_setauthsize,
18590 + .encrypt = aead_encrypt,
18591 + .decrypt = aead_decrypt,
18592 + .ivsize = DES_BLOCK_SIZE,
18593 + .maxauthsize = SHA1_DIGEST_SIZE,
18594 + },
18595 + .caam = {
18596 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18597 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18598 + OP_ALG_AAI_HMAC_PRECOMP,
18599 + },
18600 + },
18601 + {
18602 + .aead = {
18603 + .base = {
18604 + .cra_name = "echainiv(authenc(hmac(sha1),"
18605 + "cbc(des)))",
18606 + .cra_driver_name = "echainiv-authenc-"
18607 + "hmac-sha1-cbc-des-caam-qi2",
18608 + .cra_blocksize = DES_BLOCK_SIZE,
18609 + },
18610 + .setkey = aead_setkey,
18611 + .setauthsize = aead_setauthsize,
18612 + .encrypt = aead_encrypt,
18613 + .decrypt = aead_decrypt,
18614 + .ivsize = DES_BLOCK_SIZE,
18615 + .maxauthsize = SHA1_DIGEST_SIZE,
18616 + },
18617 + .caam = {
18618 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18619 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18620 + OP_ALG_AAI_HMAC_PRECOMP,
18621 + .geniv = true,
18622 + }
18623 + },
18624 + {
18625 + .aead = {
18626 + .base = {
18627 + .cra_name = "authenc(hmac(sha224),cbc(des))",
18628 + .cra_driver_name = "authenc-hmac-sha224-"
18629 + "cbc-des-caam-qi2",
18630 + .cra_blocksize = DES_BLOCK_SIZE,
18631 + },
18632 + .setkey = aead_setkey,
18633 + .setauthsize = aead_setauthsize,
18634 + .encrypt = aead_encrypt,
18635 + .decrypt = aead_decrypt,
18636 + .ivsize = DES_BLOCK_SIZE,
18637 + .maxauthsize = SHA224_DIGEST_SIZE,
18638 + },
18639 + .caam = {
18640 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18641 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18642 + OP_ALG_AAI_HMAC_PRECOMP,
18643 + },
18644 + },
18645 + {
18646 + .aead = {
18647 + .base = {
18648 + .cra_name = "echainiv(authenc(hmac(sha224),"
18649 + "cbc(des)))",
18650 + .cra_driver_name = "echainiv-authenc-"
18651 + "hmac-sha224-cbc-des-"
18652 + "caam-qi2",
18653 + .cra_blocksize = DES_BLOCK_SIZE,
18654 + },
18655 + .setkey = aead_setkey,
18656 + .setauthsize = aead_setauthsize,
18657 + .encrypt = aead_encrypt,
18658 + .decrypt = aead_decrypt,
18659 + .ivsize = DES_BLOCK_SIZE,
18660 + .maxauthsize = SHA224_DIGEST_SIZE,
18661 + },
18662 + .caam = {
18663 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18664 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18665 + OP_ALG_AAI_HMAC_PRECOMP,
18666 + .geniv = true,
18667 + }
18668 + },
18669 + {
18670 + .aead = {
18671 + .base = {
18672 + .cra_name = "authenc(hmac(sha256),cbc(des))",
18673 + .cra_driver_name = "authenc-hmac-sha256-"
18674 + "cbc-des-caam-qi2",
18675 + .cra_blocksize = DES_BLOCK_SIZE,
18676 + },
18677 + .setkey = aead_setkey,
18678 + .setauthsize = aead_setauthsize,
18679 + .encrypt = aead_encrypt,
18680 + .decrypt = aead_decrypt,
18681 + .ivsize = DES_BLOCK_SIZE,
18682 + .maxauthsize = SHA256_DIGEST_SIZE,
18683 + },
18684 + .caam = {
18685 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18686 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18687 + OP_ALG_AAI_HMAC_PRECOMP,
18688 + },
18689 + },
18690 + {
18691 + .aead = {
18692 + .base = {
18693 + .cra_name = "echainiv(authenc(hmac(sha256),"
18694 + "cbc(des)))",
18695 + .cra_driver_name = "echainiv-authenc-"
18696 + "hmac-sha256-cbc-desi-"
18697 + "caam-qi2",
18698 + .cra_blocksize = DES_BLOCK_SIZE,
18699 + },
18700 + .setkey = aead_setkey,
18701 + .setauthsize = aead_setauthsize,
18702 + .encrypt = aead_encrypt,
18703 + .decrypt = aead_decrypt,
18704 + .ivsize = DES_BLOCK_SIZE,
18705 + .maxauthsize = SHA256_DIGEST_SIZE,
18706 + },
18707 + .caam = {
18708 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18709 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18710 + OP_ALG_AAI_HMAC_PRECOMP,
18711 + .geniv = true,
18712 + },
18713 + },
18714 + {
18715 + .aead = {
18716 + .base = {
18717 + .cra_name = "authenc(hmac(sha384),cbc(des))",
18718 + .cra_driver_name = "authenc-hmac-sha384-"
18719 + "cbc-des-caam-qi2",
18720 + .cra_blocksize = DES_BLOCK_SIZE,
18721 + },
18722 + .setkey = aead_setkey,
18723 + .setauthsize = aead_setauthsize,
18724 + .encrypt = aead_encrypt,
18725 + .decrypt = aead_decrypt,
18726 + .ivsize = DES_BLOCK_SIZE,
18727 + .maxauthsize = SHA384_DIGEST_SIZE,
18728 + },
18729 + .caam = {
18730 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18731 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18732 + OP_ALG_AAI_HMAC_PRECOMP,
18733 + },
18734 + },
18735 + {
18736 + .aead = {
18737 + .base = {
18738 + .cra_name = "echainiv(authenc(hmac(sha384),"
18739 + "cbc(des)))",
18740 + .cra_driver_name = "echainiv-authenc-"
18741 + "hmac-sha384-cbc-des-"
18742 + "caam-qi2",
18743 + .cra_blocksize = DES_BLOCK_SIZE,
18744 + },
18745 + .setkey = aead_setkey,
18746 + .setauthsize = aead_setauthsize,
18747 + .encrypt = aead_encrypt,
18748 + .decrypt = aead_decrypt,
18749 + .ivsize = DES_BLOCK_SIZE,
18750 + .maxauthsize = SHA384_DIGEST_SIZE,
18751 + },
18752 + .caam = {
18753 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18754 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18755 + OP_ALG_AAI_HMAC_PRECOMP,
18756 + .geniv = true,
18757 + }
18758 + },
18759 + {
18760 + .aead = {
18761 + .base = {
18762 + .cra_name = "authenc(hmac(sha512),cbc(des))",
18763 + .cra_driver_name = "authenc-hmac-sha512-"
18764 + "cbc-des-caam-qi2",
18765 + .cra_blocksize = DES_BLOCK_SIZE,
18766 + },
18767 + .setkey = aead_setkey,
18768 + .setauthsize = aead_setauthsize,
18769 + .encrypt = aead_encrypt,
18770 + .decrypt = aead_decrypt,
18771 + .ivsize = DES_BLOCK_SIZE,
18772 + .maxauthsize = SHA512_DIGEST_SIZE,
18773 + },
18774 + .caam = {
18775 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18776 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18777 + OP_ALG_AAI_HMAC_PRECOMP,
18778 + }
18779 + },
18780 + {
18781 + .aead = {
18782 + .base = {
18783 + .cra_name = "echainiv(authenc(hmac(sha512),"
18784 + "cbc(des)))",
18785 + .cra_driver_name = "echainiv-authenc-"
18786 + "hmac-sha512-cbc-des-"
18787 + "caam-qi2",
18788 + .cra_blocksize = DES_BLOCK_SIZE,
18789 + },
18790 + .setkey = aead_setkey,
18791 + .setauthsize = aead_setauthsize,
18792 + .encrypt = aead_encrypt,
18793 + .decrypt = aead_decrypt,
18794 + .ivsize = DES_BLOCK_SIZE,
18795 + .maxauthsize = SHA512_DIGEST_SIZE,
18796 + },
18797 + .caam = {
18798 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18799 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18800 + OP_ALG_AAI_HMAC_PRECOMP,
18801 + .geniv = true,
18802 + }
18803 + },
18804 + {
18805 + .aead = {
18806 + .base = {
18807 + .cra_name = "authenc(hmac(md5),"
18808 + "rfc3686(ctr(aes)))",
18809 + .cra_driver_name = "authenc-hmac-md5-"
18810 + "rfc3686-ctr-aes-caam-qi2",
18811 + .cra_blocksize = 1,
18812 + },
18813 + .setkey = aead_setkey,
18814 + .setauthsize = aead_setauthsize,
18815 + .encrypt = aead_encrypt,
18816 + .decrypt = aead_decrypt,
18817 + .ivsize = CTR_RFC3686_IV_SIZE,
18818 + .maxauthsize = MD5_DIGEST_SIZE,
18819 + },
18820 + .caam = {
18821 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18822 + OP_ALG_AAI_CTR_MOD128,
18823 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18824 + OP_ALG_AAI_HMAC_PRECOMP,
18825 + .rfc3686 = true,
18826 + },
18827 + },
18828 + {
18829 + .aead = {
18830 + .base = {
18831 + .cra_name = "seqiv(authenc("
18832 + "hmac(md5),rfc3686(ctr(aes))))",
18833 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
18834 + "rfc3686-ctr-aes-caam-qi2",
18835 + .cra_blocksize = 1,
18836 + },
18837 + .setkey = aead_setkey,
18838 + .setauthsize = aead_setauthsize,
18839 + .encrypt = aead_encrypt,
18840 + .decrypt = aead_decrypt,
18841 + .ivsize = CTR_RFC3686_IV_SIZE,
18842 + .maxauthsize = MD5_DIGEST_SIZE,
18843 + },
18844 + .caam = {
18845 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18846 + OP_ALG_AAI_CTR_MOD128,
18847 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18848 + OP_ALG_AAI_HMAC_PRECOMP,
18849 + .rfc3686 = true,
18850 + .geniv = true,
18851 + },
18852 + },
18853 + {
18854 + .aead = {
18855 + .base = {
18856 + .cra_name = "authenc(hmac(sha1),"
18857 + "rfc3686(ctr(aes)))",
18858 + .cra_driver_name = "authenc-hmac-sha1-"
18859 + "rfc3686-ctr-aes-caam-qi2",
18860 + .cra_blocksize = 1,
18861 + },
18862 + .setkey = aead_setkey,
18863 + .setauthsize = aead_setauthsize,
18864 + .encrypt = aead_encrypt,
18865 + .decrypt = aead_decrypt,
18866 + .ivsize = CTR_RFC3686_IV_SIZE,
18867 + .maxauthsize = SHA1_DIGEST_SIZE,
18868 + },
18869 + .caam = {
18870 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18871 + OP_ALG_AAI_CTR_MOD128,
18872 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18873 + OP_ALG_AAI_HMAC_PRECOMP,
18874 + .rfc3686 = true,
18875 + },
18876 + },
18877 + {
18878 + .aead = {
18879 + .base = {
18880 + .cra_name = "seqiv(authenc("
18881 + "hmac(sha1),rfc3686(ctr(aes))))",
18882 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18883 + "rfc3686-ctr-aes-caam-qi2",
18884 + .cra_blocksize = 1,
18885 + },
18886 + .setkey = aead_setkey,
18887 + .setauthsize = aead_setauthsize,
18888 + .encrypt = aead_encrypt,
18889 + .decrypt = aead_decrypt,
18890 + .ivsize = CTR_RFC3686_IV_SIZE,
18891 + .maxauthsize = SHA1_DIGEST_SIZE,
18892 + },
18893 + .caam = {
18894 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18895 + OP_ALG_AAI_CTR_MOD128,
18896 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18897 + OP_ALG_AAI_HMAC_PRECOMP,
18898 + .rfc3686 = true,
18899 + .geniv = true,
18900 + },
18901 + },
18902 + {
18903 + .aead = {
18904 + .base = {
18905 + .cra_name = "authenc(hmac(sha224),"
18906 + "rfc3686(ctr(aes)))",
18907 + .cra_driver_name = "authenc-hmac-sha224-"
18908 + "rfc3686-ctr-aes-caam-qi2",
18909 + .cra_blocksize = 1,
18910 + },
18911 + .setkey = aead_setkey,
18912 + .setauthsize = aead_setauthsize,
18913 + .encrypt = aead_encrypt,
18914 + .decrypt = aead_decrypt,
18915 + .ivsize = CTR_RFC3686_IV_SIZE,
18916 + .maxauthsize = SHA224_DIGEST_SIZE,
18917 + },
18918 + .caam = {
18919 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18920 + OP_ALG_AAI_CTR_MOD128,
18921 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18922 + OP_ALG_AAI_HMAC_PRECOMP,
18923 + .rfc3686 = true,
18924 + },
18925 + },
18926 + {
18927 + .aead = {
18928 + .base = {
18929 + .cra_name = "seqiv(authenc("
18930 + "hmac(sha224),rfc3686(ctr(aes))))",
18931 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18932 + "rfc3686-ctr-aes-caam-qi2",
18933 + .cra_blocksize = 1,
18934 + },
18935 + .setkey = aead_setkey,
18936 + .setauthsize = aead_setauthsize,
18937 + .encrypt = aead_encrypt,
18938 + .decrypt = aead_decrypt,
18939 + .ivsize = CTR_RFC3686_IV_SIZE,
18940 + .maxauthsize = SHA224_DIGEST_SIZE,
18941 + },
18942 + .caam = {
18943 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18944 + OP_ALG_AAI_CTR_MOD128,
18945 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18946 + OP_ALG_AAI_HMAC_PRECOMP,
18947 + .rfc3686 = true,
18948 + .geniv = true,
18949 + },
18950 + },
18951 + {
18952 + .aead = {
18953 + .base = {
18954 + .cra_name = "authenc(hmac(sha256),"
18955 + "rfc3686(ctr(aes)))",
18956 + .cra_driver_name = "authenc-hmac-sha256-"
18957 + "rfc3686-ctr-aes-caam-qi2",
18958 + .cra_blocksize = 1,
18959 + },
18960 + .setkey = aead_setkey,
18961 + .setauthsize = aead_setauthsize,
18962 + .encrypt = aead_encrypt,
18963 + .decrypt = aead_decrypt,
18964 + .ivsize = CTR_RFC3686_IV_SIZE,
18965 + .maxauthsize = SHA256_DIGEST_SIZE,
18966 + },
18967 + .caam = {
18968 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18969 + OP_ALG_AAI_CTR_MOD128,
18970 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18971 + OP_ALG_AAI_HMAC_PRECOMP,
18972 + .rfc3686 = true,
18973 + },
18974 + },
18975 + {
18976 + .aead = {
18977 + .base = {
18978 + .cra_name = "seqiv(authenc(hmac(sha256),"
18979 + "rfc3686(ctr(aes))))",
18980 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18981 + "rfc3686-ctr-aes-caam-qi2",
18982 + .cra_blocksize = 1,
18983 + },
18984 + .setkey = aead_setkey,
18985 + .setauthsize = aead_setauthsize,
18986 + .encrypt = aead_encrypt,
18987 + .decrypt = aead_decrypt,
18988 + .ivsize = CTR_RFC3686_IV_SIZE,
18989 + .maxauthsize = SHA256_DIGEST_SIZE,
18990 + },
18991 + .caam = {
18992 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18993 + OP_ALG_AAI_CTR_MOD128,
18994 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18995 + OP_ALG_AAI_HMAC_PRECOMP,
18996 + .rfc3686 = true,
18997 + .geniv = true,
18998 + },
18999 + },
19000 + {
19001 + .aead = {
19002 + .base = {
19003 + .cra_name = "authenc(hmac(sha384),"
19004 + "rfc3686(ctr(aes)))",
19005 + .cra_driver_name = "authenc-hmac-sha384-"
19006 + "rfc3686-ctr-aes-caam-qi2",
19007 + .cra_blocksize = 1,
19008 + },
19009 + .setkey = aead_setkey,
19010 + .setauthsize = aead_setauthsize,
19011 + .encrypt = aead_encrypt,
19012 + .decrypt = aead_decrypt,
19013 + .ivsize = CTR_RFC3686_IV_SIZE,
19014 + .maxauthsize = SHA384_DIGEST_SIZE,
19015 + },
19016 + .caam = {
19017 + .class1_alg_type = OP_ALG_ALGSEL_AES |
19018 + OP_ALG_AAI_CTR_MOD128,
19019 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
19020 + OP_ALG_AAI_HMAC_PRECOMP,
19021 + .rfc3686 = true,
19022 + },
19023 + },
19024 + {
19025 + .aead = {
19026 + .base = {
19027 + .cra_name = "seqiv(authenc(hmac(sha384),"
19028 + "rfc3686(ctr(aes))))",
19029 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
19030 + "rfc3686-ctr-aes-caam-qi2",
19031 + .cra_blocksize = 1,
19032 + },
19033 + .setkey = aead_setkey,
19034 + .setauthsize = aead_setauthsize,
19035 + .encrypt = aead_encrypt,
19036 + .decrypt = aead_decrypt,
19037 + .ivsize = CTR_RFC3686_IV_SIZE,
19038 + .maxauthsize = SHA384_DIGEST_SIZE,
19039 + },
19040 + .caam = {
19041 + .class1_alg_type = OP_ALG_ALGSEL_AES |
19042 + OP_ALG_AAI_CTR_MOD128,
19043 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
19044 + OP_ALG_AAI_HMAC_PRECOMP,
19045 + .rfc3686 = true,
19046 + .geniv = true,
19047 + },
19048 + },
19049 + {
19050 + .aead = {
19051 + .base = {
19052 + .cra_name = "authenc(hmac(sha512),"
19053 + "rfc3686(ctr(aes)))",
19054 + .cra_driver_name = "authenc-hmac-sha512-"
19055 + "rfc3686-ctr-aes-caam-qi2",
19056 + .cra_blocksize = 1,
19057 + },
19058 + .setkey = aead_setkey,
19059 + .setauthsize = aead_setauthsize,
19060 + .encrypt = aead_encrypt,
19061 + .decrypt = aead_decrypt,
19062 + .ivsize = CTR_RFC3686_IV_SIZE,
19063 + .maxauthsize = SHA512_DIGEST_SIZE,
19064 + },
19065 + .caam = {
19066 + .class1_alg_type = OP_ALG_ALGSEL_AES |
19067 + OP_ALG_AAI_CTR_MOD128,
19068 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
19069 + OP_ALG_AAI_HMAC_PRECOMP,
19070 + .rfc3686 = true,
19071 + },
19072 + },
19073 + {
19074 + .aead = {
19075 + .base = {
19076 + .cra_name = "seqiv(authenc(hmac(sha512),"
19077 + "rfc3686(ctr(aes))))",
19078 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
19079 + "rfc3686-ctr-aes-caam-qi2",
19080 + .cra_blocksize = 1,
19081 + },
19082 + .setkey = aead_setkey,
19083 + .setauthsize = aead_setauthsize,
19084 + .encrypt = aead_encrypt,
19085 + .decrypt = aead_decrypt,
19086 + .ivsize = CTR_RFC3686_IV_SIZE,
19087 + .maxauthsize = SHA512_DIGEST_SIZE,
19088 + },
19089 + .caam = {
19090 + .class1_alg_type = OP_ALG_ALGSEL_AES |
19091 + OP_ALG_AAI_CTR_MOD128,
19092 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
19093 + OP_ALG_AAI_HMAC_PRECOMP,
19094 + .rfc3686 = true,
19095 + .geniv = true,
19096 + },
19097 + },
19098 + {
19099 + .aead = {
19100 + .base = {
19101 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
19102 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
19103 + .cra_blocksize = AES_BLOCK_SIZE,
19104 + },
19105 + .setkey = tls_setkey,
19106 + .setauthsize = tls_setauthsize,
19107 + .encrypt = tls_encrypt,
19108 + .decrypt = tls_decrypt,
19109 + .ivsize = AES_BLOCK_SIZE,
19110 + .maxauthsize = SHA1_DIGEST_SIZE,
19111 + },
19112 + .caam = {
19113 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
19114 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
19115 + OP_ALG_AAI_HMAC_PRECOMP,
19116 + },
19117 + },
19118 +};
19119 +
19120 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
19121 + *template)
19122 +{
19123 + struct caam_crypto_alg *t_alg;
19124 + struct crypto_alg *alg;
19125 +
19126 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
19127 + if (!t_alg)
19128 + return ERR_PTR(-ENOMEM);
19129 +
19130 + alg = &t_alg->crypto_alg;
19131 +
19132 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
19133 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
19134 + template->driver_name);
19135 + alg->cra_module = THIS_MODULE;
19136 + alg->cra_exit = caam_cra_exit;
19137 + alg->cra_priority = CAAM_CRA_PRIORITY;
19138 + alg->cra_blocksize = template->blocksize;
19139 + alg->cra_alignmask = 0;
19140 + alg->cra_ctxsize = sizeof(struct caam_ctx);
19141 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
19142 + template->type;
19143 + switch (template->type) {
19144 + case CRYPTO_ALG_TYPE_GIVCIPHER:
19145 + alg->cra_init = caam_cra_init_ablkcipher;
19146 + alg->cra_type = &crypto_givcipher_type;
19147 + alg->cra_ablkcipher = template->template_ablkcipher;
19148 + break;
19149 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
19150 + alg->cra_init = caam_cra_init_ablkcipher;
19151 + alg->cra_type = &crypto_ablkcipher_type;
19152 + alg->cra_ablkcipher = template->template_ablkcipher;
19153 + break;
19154 + }
19155 +
19156 + t_alg->caam.class1_alg_type = template->class1_alg_type;
19157 + t_alg->caam.class2_alg_type = template->class2_alg_type;
19158 +
19159 + return t_alg;
19160 +}
19161 +
19162 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
19163 +{
19164 + struct aead_alg *alg = &t_alg->aead;
19165 +
19166 + alg->base.cra_module = THIS_MODULE;
19167 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
19168 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
19169 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
19170 +
19171 + alg->init = caam_cra_init_aead;
19172 + alg->exit = caam_cra_exit_aead;
19173 +}
19174 +
19175 +/* max hash key is max split key size */
19176 +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
19177 +
19178 +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
19179 +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
19180 +
19181 +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
19182 + CAAM_MAX_HASH_KEY_SIZE)
19183 +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
19184 +
19185 +/* caam context sizes for hashes: running digest + 8 */
19186 +#define HASH_MSG_LEN 8
19187 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
19188 +
19189 +enum hash_optype {
19190 + UPDATE = 0,
19191 + UPDATE_FIRST,
19192 + FINALIZE,
19193 + DIGEST,
19194 + HASH_NUM_OP
19195 +};
19196 +
19197 +/**
19198 + * caam_hash_ctx - ahash per-session context
19199 + * @flc: Flow Contexts array
19200 + * @flc_dma: I/O virtual addresses of the Flow Contexts
19201 + * @key: virtual address of the authentication key
19202 + * @dev: dpseci device
19203 + * @ctx_len: size of Context Register
19204 + * @adata: hashing algorithm details
19205 + */
19206 +struct caam_hash_ctx {
19207 + struct caam_flc flc[HASH_NUM_OP];
19208 + dma_addr_t flc_dma[HASH_NUM_OP];
19209 + u8 key[CAAM_MAX_HASH_KEY_SIZE];
19210 + struct device *dev;
19211 + int ctx_len;
19212 + struct alginfo adata;
19213 +};
19214 +
19215 +/* ahash state */
19216 +struct caam_hash_state {
19217 + struct caam_request caam_req;
19218 + dma_addr_t buf_dma;
19219 + dma_addr_t ctx_dma;
19220 + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
19221 + int buflen_0;
19222 + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
19223 + int buflen_1;
19224 + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
19225 + int (*update)(struct ahash_request *req);
19226 + int (*final)(struct ahash_request *req);
19227 + int (*finup)(struct ahash_request *req);
19228 + int current_buf;
19229 +};
19230 +
19231 +struct caam_export_state {
19232 + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
19233 + u8 caam_ctx[MAX_CTX_LEN];
19234 + int buflen;
19235 + int (*update)(struct ahash_request *req);
19236 + int (*final)(struct ahash_request *req);
19237 + int (*finup)(struct ahash_request *req);
19238 +};
19239 +
19240 +static inline void switch_buf(struct caam_hash_state *state)
19241 +{
19242 + state->current_buf ^= 1;
19243 +}
19244 +
19245 +static inline u8 *current_buf(struct caam_hash_state *state)
19246 +{
19247 + return state->current_buf ? state->buf_1 : state->buf_0;
19248 +}
19249 +
19250 +static inline u8 *alt_buf(struct caam_hash_state *state)
19251 +{
19252 + return state->current_buf ? state->buf_0 : state->buf_1;
19253 +}
19254 +
19255 +static inline int *current_buflen(struct caam_hash_state *state)
19256 +{
19257 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
19258 +}
19259 +
19260 +static inline int *alt_buflen(struct caam_hash_state *state)
19261 +{
19262 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
19263 +}
19264 +
19265 +/* Map current buffer in state (if length > 0) and put it in link table */
19266 +static inline int buf_map_to_qm_sg(struct device *dev,
19267 + struct dpaa2_sg_entry *qm_sg,
19268 + struct caam_hash_state *state)
19269 +{
19270 + int buflen = *current_buflen(state);
19271 +
19272 + if (!buflen)
19273 + return 0;
19274 +
19275 + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
19276 + DMA_TO_DEVICE);
19277 + if (dma_mapping_error(dev, state->buf_dma)) {
19278 + dev_err(dev, "unable to map buf\n");
19279 + state->buf_dma = 0;
19280 + return -ENOMEM;
19281 + }
19282 +
19283 + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
19284 +
19285 + return 0;
19286 +}
19287 +
19288 +/* Map state->caam_ctx, and add it to link table */
19289 +static inline int ctx_map_to_qm_sg(struct device *dev,
19290 + struct caam_hash_state *state, int ctx_len,
19291 + struct dpaa2_sg_entry *qm_sg, u32 flag)
19292 +{
19293 + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
19294 + if (dma_mapping_error(dev, state->ctx_dma)) {
19295 + dev_err(dev, "unable to map ctx\n");
19296 + state->ctx_dma = 0;
19297 + return -ENOMEM;
19298 + }
19299 +
19300 + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
19301 +
19302 + return 0;
19303 +}
19304 +
19305 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
19306 +{
19307 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19308 + int digestsize = crypto_ahash_digestsize(ahash);
19309 + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
19310 + struct caam_flc *flc;
19311 + u32 *desc;
19312 +
19313 + ctx->adata.key_virt = ctx->key;
19314 + ctx->adata.key_inline = true;
19315 +
19316 + /* ahash_update shared descriptor */
19317 + flc = &ctx->flc[UPDATE];
19318 + desc = flc->sh_desc;
19319 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
19320 + ctx->ctx_len, true, priv->sec_attr.era);
19321 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
19322 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
19323 + desc_bytes(desc), DMA_BIDIRECTIONAL);
19324 +#ifdef DEBUG
19325 + print_hex_dump(KERN_ERR,
19326 + "ahash update shdesc@" __stringify(__LINE__)": ",
19327 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
19328 +#endif
19329 +
19330 + /* ahash_update_first shared descriptor */
19331 + flc = &ctx->flc[UPDATE_FIRST];
19332 + desc = flc->sh_desc;
19333 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
19334 + ctx->ctx_len, false, priv->sec_attr.era);
19335 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
19336 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
19337 + desc_bytes(desc), DMA_BIDIRECTIONAL);
19338 +#ifdef DEBUG
19339 + print_hex_dump(KERN_ERR,
19340 + "ahash update first shdesc@" __stringify(__LINE__)": ",
19341 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
19342 +#endif
19343 +
19344 + /* ahash_final shared descriptor */
19345 + flc = &ctx->flc[FINALIZE];
19346 + desc = flc->sh_desc;
19347 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
19348 + ctx->ctx_len, true, priv->sec_attr.era);
19349 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
19350 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
19351 + desc_bytes(desc), DMA_BIDIRECTIONAL);
19352 +#ifdef DEBUG
19353 + print_hex_dump(KERN_ERR,
19354 + "ahash final shdesc@" __stringify(__LINE__)": ",
19355 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
19356 +#endif
19357 +
19358 + /* ahash_digest shared descriptor */
19359 + flc = &ctx->flc[DIGEST];
19360 + desc = flc->sh_desc;
19361 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
19362 + ctx->ctx_len, false, priv->sec_attr.era);
19363 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
19364 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
19365 + desc_bytes(desc), DMA_BIDIRECTIONAL);
19366 +#ifdef DEBUG
19367 + print_hex_dump(KERN_ERR,
19368 + "ahash digest shdesc@" __stringify(__LINE__)": ",
19369 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
19370 +#endif
19371 +
19372 + return 0;
19373 +}
19374 +
19375 +/* Digest hash size if it is too large */
19376 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
19377 + u32 *keylen, u8 *key_out, u32 digestsize)
19378 +{
19379 + struct caam_request *req_ctx;
19380 + u32 *desc;
19381 + struct split_key_sh_result result;
19382 + dma_addr_t src_dma, dst_dma;
19383 + struct caam_flc *flc;
19384 + dma_addr_t flc_dma;
19385 + int ret = -ENOMEM;
19386 + struct dpaa2_fl_entry *in_fle, *out_fle;
19387 +
19388 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
19389 + if (!req_ctx)
19390 + return -ENOMEM;
19391 +
19392 + in_fle = &req_ctx->fd_flt[1];
19393 + out_fle = &req_ctx->fd_flt[0];
19394 +
19395 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
19396 + if (!flc)
19397 + goto err_flc;
19398 +
19399 + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
19400 + DMA_TO_DEVICE);
19401 + if (dma_mapping_error(ctx->dev, src_dma)) {
19402 + dev_err(ctx->dev, "unable to map key input memory\n");
19403 + goto err_src_dma;
19404 + }
19405 + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
19406 + DMA_FROM_DEVICE);
19407 + if (dma_mapping_error(ctx->dev, dst_dma)) {
19408 + dev_err(ctx->dev, "unable to map key output memory\n");
19409 + goto err_dst_dma;
19410 + }
19411 +
19412 + desc = flc->sh_desc;
19413 +
19414 + init_sh_desc(desc, 0);
19415 +
19416 + /* descriptor to perform unkeyed hash on key_in */
19417 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
19418 + OP_ALG_AS_INITFINAL);
19419 + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
19420 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
19421 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
19422 + LDST_SRCDST_BYTE_CONTEXT);
19423 +
19424 + flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
19425 + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
19426 + desc_bytes(desc), DMA_TO_DEVICE);
19427 + if (dma_mapping_error(ctx->dev, flc_dma)) {
19428 + dev_err(ctx->dev, "unable to map shared descriptor\n");
19429 + goto err_flc_dma;
19430 + }
19431 +
19432 + dpaa2_fl_set_final(in_fle, true);
19433 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19434 + dpaa2_fl_set_addr(in_fle, src_dma);
19435 + dpaa2_fl_set_len(in_fle, *keylen);
19436 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19437 + dpaa2_fl_set_addr(out_fle, dst_dma);
19438 + dpaa2_fl_set_len(out_fle, digestsize);
19439 +
19440 +#ifdef DEBUG
19441 + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
19442 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
19443 + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
19444 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
19445 +#endif
19446 +
19447 + result.err = 0;
19448 + init_completion(&result.completion);
19449 + result.dev = ctx->dev;
19450 +
19451 + req_ctx->flc = flc;
19452 + req_ctx->flc_dma = flc_dma;
19453 + req_ctx->cbk = split_key_sh_done;
19454 + req_ctx->ctx = &result;
19455 +
19456 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19457 + if (ret == -EINPROGRESS) {
19458 + /* in progress */
19459 + wait_for_completion(&result.completion);
19460 + ret = result.err;
19461 +#ifdef DEBUG
19462 + print_hex_dump(KERN_ERR,
19463 + "digested key@" __stringify(__LINE__)": ",
19464 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
19465 + 1);
19466 +#endif
19467 + }
19468 +
19469 + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
19470 + DMA_TO_DEVICE);
19471 +err_flc_dma:
19472 + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
19473 +err_dst_dma:
19474 + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
19475 +err_src_dma:
19476 + kfree(flc);
19477 +err_flc:
19478 + kfree(req_ctx);
19479 +
19480 + *keylen = digestsize;
19481 +
19482 + return ret;
19483 +}
19484 +
19485 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
19486 + unsigned int keylen)
19487 +{
19488 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19489 + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
19490 + unsigned int digestsize = crypto_ahash_digestsize(ahash);
19491 + int ret;
19492 + u8 *hashed_key = NULL;
19493 +
19494 +#ifdef DEBUG
19495 + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
19496 +#endif
19497 +
19498 + if (keylen > blocksize) {
19499 + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
19500 + GFP_KERNEL | GFP_DMA);
19501 + if (!hashed_key)
19502 + return -ENOMEM;
19503 + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
19504 + digestsize);
19505 + if (ret)
19506 + goto bad_free_key;
19507 + key = hashed_key;
19508 + }
19509 +
19510 + ctx->adata.keylen = keylen;
19511 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
19512 + OP_ALG_ALGSEL_MASK);
19513 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
19514 + goto bad_free_key;
19515 +
19516 + memcpy(ctx->key, key, keylen);
19517 +
19518 + kfree(hashed_key);
19519 + return ahash_set_sh_desc(ahash);
19520 +bad_free_key:
19521 + kfree(hashed_key);
19522 + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
19523 + return -EINVAL;
19524 +}
19525 +
19526 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
19527 + struct ahash_request *req, int dst_len)
19528 +{
19529 + struct caam_hash_state *state = ahash_request_ctx(req);
19530 +
19531 + if (edesc->src_nents)
19532 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
19533 + if (edesc->dst_dma)
19534 + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
19535 +
19536 + if (edesc->qm_sg_bytes)
19537 + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
19538 + DMA_TO_DEVICE);
19539 +
19540 + if (state->buf_dma) {
19541 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
19542 + DMA_TO_DEVICE);
19543 + state->buf_dma = 0;
19544 + }
19545 +}
19546 +
19547 +static inline void ahash_unmap_ctx(struct device *dev,
19548 + struct ahash_edesc *edesc,
19549 + struct ahash_request *req, int dst_len,
19550 + u32 flag)
19551 +{
19552 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19553 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19554 + struct caam_hash_state *state = ahash_request_ctx(req);
19555 +
19556 + if (state->ctx_dma) {
19557 + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
19558 + state->ctx_dma = 0;
19559 + }
19560 + ahash_unmap(dev, edesc, req, dst_len);
19561 +}
19562 +
19563 +static void ahash_done(void *cbk_ctx, u32 status)
19564 +{
19565 + struct crypto_async_request *areq = cbk_ctx;
19566 + struct ahash_request *req = ahash_request_cast(areq);
19567 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19568 + struct caam_hash_state *state = ahash_request_ctx(req);
19569 + struct ahash_edesc *edesc = state->caam_req.edesc;
19570 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19571 + int digestsize = crypto_ahash_digestsize(ahash);
19572 + int ecode = 0;
19573 +
19574 +#ifdef DEBUG
19575 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19576 +#endif
19577 +
19578 + if (unlikely(status)) {
19579 + caam_qi2_strstatus(ctx->dev, status);
19580 + ecode = -EIO;
19581 + }
19582 +
19583 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19584 + qi_cache_free(edesc);
19585 +
19586 +#ifdef DEBUG
19587 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19588 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19589 + ctx->ctx_len, 1);
19590 + if (req->result)
19591 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19592 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19593 + digestsize, 1);
19594 +#endif
19595 +
19596 + req->base.complete(&req->base, ecode);
19597 +}
19598 +
19599 +static void ahash_done_bi(void *cbk_ctx, u32 status)
19600 +{
19601 + struct crypto_async_request *areq = cbk_ctx;
19602 + struct ahash_request *req = ahash_request_cast(areq);
19603 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19604 + struct caam_hash_state *state = ahash_request_ctx(req);
19605 + struct ahash_edesc *edesc = state->caam_req.edesc;
19606 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19607 + int ecode = 0;
19608 +#ifdef DEBUG
19609 + int digestsize = crypto_ahash_digestsize(ahash);
19610 +
19611 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19612 +#endif
19613 +
19614 + if (unlikely(status)) {
19615 + caam_qi2_strstatus(ctx->dev, status);
19616 + ecode = -EIO;
19617 + }
19618 +
19619 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
19620 + switch_buf(state);
19621 + qi_cache_free(edesc);
19622 +
19623 +#ifdef DEBUG
19624 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19625 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19626 + ctx->ctx_len, 1);
19627 + if (req->result)
19628 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19629 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19630 + digestsize, 1);
19631 +#endif
19632 +
19633 + req->base.complete(&req->base, ecode);
19634 +}
19635 +
19636 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
19637 +{
19638 + struct crypto_async_request *areq = cbk_ctx;
19639 + struct ahash_request *req = ahash_request_cast(areq);
19640 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19641 + struct caam_hash_state *state = ahash_request_ctx(req);
19642 + struct ahash_edesc *edesc = state->caam_req.edesc;
19643 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19644 + int digestsize = crypto_ahash_digestsize(ahash);
19645 + int ecode = 0;
19646 +
19647 +#ifdef DEBUG
19648 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19649 +#endif
19650 +
19651 + if (unlikely(status)) {
19652 + caam_qi2_strstatus(ctx->dev, status);
19653 + ecode = -EIO;
19654 + }
19655 +
19656 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
19657 + qi_cache_free(edesc);
19658 +
19659 +#ifdef DEBUG
19660 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19661 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19662 + ctx->ctx_len, 1);
19663 + if (req->result)
19664 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19665 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19666 + digestsize, 1);
19667 +#endif
19668 +
19669 + req->base.complete(&req->base, ecode);
19670 +}
19671 +
19672 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
19673 +{
19674 + struct crypto_async_request *areq = cbk_ctx;
19675 + struct ahash_request *req = ahash_request_cast(areq);
19676 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19677 + struct caam_hash_state *state = ahash_request_ctx(req);
19678 + struct ahash_edesc *edesc = state->caam_req.edesc;
19679 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19680 + int ecode = 0;
19681 +#ifdef DEBUG
19682 + int digestsize = crypto_ahash_digestsize(ahash);
19683 +
19684 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19685 +#endif
19686 +
19687 + if (unlikely(status)) {
19688 + caam_qi2_strstatus(ctx->dev, status);
19689 + ecode = -EIO;
19690 + }
19691 +
19692 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
19693 + switch_buf(state);
19694 + qi_cache_free(edesc);
19695 +
19696 +#ifdef DEBUG
19697 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19698 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19699 + ctx->ctx_len, 1);
19700 + if (req->result)
19701 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19702 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19703 + digestsize, 1);
19704 +#endif
19705 +
19706 + req->base.complete(&req->base, ecode);
19707 +}
19708 +
19709 +static int ahash_update_ctx(struct ahash_request *req)
19710 +{
19711 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19712 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19713 + struct caam_hash_state *state = ahash_request_ctx(req);
19714 + struct caam_request *req_ctx = &state->caam_req;
19715 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19716 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19717 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19718 + GFP_KERNEL : GFP_ATOMIC;
19719 + u8 *buf = current_buf(state);
19720 + int *buflen = current_buflen(state);
19721 + u8 *next_buf = alt_buf(state);
19722 + int *next_buflen = alt_buflen(state), last_buflen;
19723 + int in_len = *buflen + req->nbytes, to_hash;
19724 + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
19725 + struct ahash_edesc *edesc;
19726 + int ret = 0;
19727 +
19728 + last_buflen = *next_buflen;
19729 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
19730 + to_hash = in_len - *next_buflen;
19731 +
19732 + if (to_hash) {
19733 + struct dpaa2_sg_entry *sg_table;
19734 +
19735 + src_nents = sg_nents_for_len(req->src,
19736 + req->nbytes - (*next_buflen));
19737 + if (src_nents < 0) {
19738 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19739 + return src_nents;
19740 + }
19741 +
19742 + if (src_nents) {
19743 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19744 + DMA_TO_DEVICE);
19745 + if (!mapped_nents) {
19746 + dev_err(ctx->dev, "unable to DMA map source\n");
19747 + return -ENOMEM;
19748 + }
19749 + } else {
19750 + mapped_nents = 0;
19751 + }
19752 +
19753 + /* allocate space for base edesc and link tables */
19754 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19755 + if (!edesc) {
19756 + dma_unmap_sg(ctx->dev, req->src, src_nents,
19757 + DMA_TO_DEVICE);
19758 + return -ENOMEM;
19759 + }
19760 +
19761 + edesc->src_nents = src_nents;
19762 + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
19763 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
19764 + sizeof(*sg_table);
19765 + sg_table = &edesc->sgt[0];
19766 +
19767 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19768 + DMA_BIDIRECTIONAL);
19769 + if (ret)
19770 + goto unmap_ctx;
19771 +
19772 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19773 + if (ret)
19774 + goto unmap_ctx;
19775 +
19776 + if (mapped_nents) {
19777 + sg_to_qm_sg_last(req->src, mapped_nents,
19778 + sg_table + qm_sg_src_index, 0);
19779 + if (*next_buflen)
19780 + scatterwalk_map_and_copy(next_buf, req->src,
19781 + to_hash - *buflen,
19782 + *next_buflen, 0);
19783 + } else {
19784 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
19785 + true);
19786 + }
19787 +
19788 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19789 + qm_sg_bytes, DMA_TO_DEVICE);
19790 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19791 + dev_err(ctx->dev, "unable to map S/G table\n");
19792 + ret = -ENOMEM;
19793 + goto unmap_ctx;
19794 + }
19795 + edesc->qm_sg_bytes = qm_sg_bytes;
19796 +
19797 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19798 + dpaa2_fl_set_final(in_fle, true);
19799 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19800 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19801 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
19802 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19803 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
19804 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
19805 +
19806 + req_ctx->flc = &ctx->flc[UPDATE];
19807 + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
19808 + req_ctx->cbk = ahash_done_bi;
19809 + req_ctx->ctx = &req->base;
19810 + req_ctx->edesc = edesc;
19811 +
19812 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19813 + if (ret != -EINPROGRESS &&
19814 + !(ret == -EBUSY &&
19815 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19816 + goto unmap_ctx;
19817 + } else if (*next_buflen) {
19818 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
19819 + req->nbytes, 0);
19820 + *buflen = *next_buflen;
19821 + *next_buflen = last_buflen;
19822 + }
19823 +#ifdef DEBUG
19824 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
19825 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
19826 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
19827 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
19828 + *next_buflen, 1);
19829 +#endif
19830 +
19831 + return ret;
19832 +unmap_ctx:
19833 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
19834 + qi_cache_free(edesc);
19835 + return ret;
19836 +}
19837 +
19838 +static int ahash_final_ctx(struct ahash_request *req)
19839 +{
19840 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19841 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19842 + struct caam_hash_state *state = ahash_request_ctx(req);
19843 + struct caam_request *req_ctx = &state->caam_req;
19844 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19845 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19846 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19847 + GFP_KERNEL : GFP_ATOMIC;
19848 + int buflen = *current_buflen(state);
19849 + int qm_sg_bytes, qm_sg_src_index;
19850 + int digestsize = crypto_ahash_digestsize(ahash);
19851 + struct ahash_edesc *edesc;
19852 + struct dpaa2_sg_entry *sg_table;
19853 + int ret;
19854 +
19855 + /* allocate space for base edesc and link tables */
19856 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19857 + if (!edesc)
19858 + return -ENOMEM;
19859 +
19860 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
19861 + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
19862 + sg_table = &edesc->sgt[0];
19863 +
19864 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19865 + DMA_TO_DEVICE);
19866 + if (ret)
19867 + goto unmap_ctx;
19868 +
19869 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19870 + if (ret)
19871 + goto unmap_ctx;
19872 +
19873 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
19874 +
19875 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19876 + DMA_TO_DEVICE);
19877 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19878 + dev_err(ctx->dev, "unable to map S/G table\n");
19879 + ret = -ENOMEM;
19880 + goto unmap_ctx;
19881 + }
19882 + edesc->qm_sg_bytes = qm_sg_bytes;
19883 +
19884 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19885 + DMA_FROM_DEVICE);
19886 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19887 + dev_err(ctx->dev, "unable to map dst\n");
19888 + edesc->dst_dma = 0;
19889 + ret = -ENOMEM;
19890 + goto unmap_ctx;
19891 + }
19892 +
19893 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19894 + dpaa2_fl_set_final(in_fle, true);
19895 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19896 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19897 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
19898 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19899 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19900 + dpaa2_fl_set_len(out_fle, digestsize);
19901 +
19902 + req_ctx->flc = &ctx->flc[FINALIZE];
19903 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
19904 + req_ctx->cbk = ahash_done_ctx_src;
19905 + req_ctx->ctx = &req->base;
19906 + req_ctx->edesc = edesc;
19907 +
19908 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19909 + if (ret == -EINPROGRESS ||
19910 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19911 + return ret;
19912 +
19913 +unmap_ctx:
19914 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
19915 + qi_cache_free(edesc);
19916 + return ret;
19917 +}
19918 +
19919 +static int ahash_finup_ctx(struct ahash_request *req)
19920 +{
19921 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19922 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19923 + struct caam_hash_state *state = ahash_request_ctx(req);
19924 + struct caam_request *req_ctx = &state->caam_req;
19925 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19926 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19927 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19928 + GFP_KERNEL : GFP_ATOMIC;
19929 + int buflen = *current_buflen(state);
19930 + int qm_sg_bytes, qm_sg_src_index;
19931 + int src_nents, mapped_nents;
19932 + int digestsize = crypto_ahash_digestsize(ahash);
19933 + struct ahash_edesc *edesc;
19934 + struct dpaa2_sg_entry *sg_table;
19935 + int ret;
19936 +
19937 + src_nents = sg_nents_for_len(req->src, req->nbytes);
19938 + if (src_nents < 0) {
19939 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19940 + return src_nents;
19941 + }
19942 +
19943 + if (src_nents) {
19944 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19945 + DMA_TO_DEVICE);
19946 + if (!mapped_nents) {
19947 + dev_err(ctx->dev, "unable to DMA map source\n");
19948 + return -ENOMEM;
19949 + }
19950 + } else {
19951 + mapped_nents = 0;
19952 + }
19953 +
19954 + /* allocate space for base edesc and link tables */
19955 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19956 + if (!edesc) {
19957 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19958 + return -ENOMEM;
19959 + }
19960 +
19961 + edesc->src_nents = src_nents;
19962 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
19963 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
19964 + sg_table = &edesc->sgt[0];
19965 +
19966 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19967 + DMA_TO_DEVICE);
19968 + if (ret)
19969 + goto unmap_ctx;
19970 +
19971 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19972 + if (ret)
19973 + goto unmap_ctx;
19974 +
19975 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
19976 +
19977 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19978 + DMA_TO_DEVICE);
19979 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19980 + dev_err(ctx->dev, "unable to map S/G table\n");
19981 + ret = -ENOMEM;
19982 + goto unmap_ctx;
19983 + }
19984 + edesc->qm_sg_bytes = qm_sg_bytes;
19985 +
19986 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19987 + DMA_FROM_DEVICE);
19988 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19989 + dev_err(ctx->dev, "unable to map dst\n");
19990 + edesc->dst_dma = 0;
19991 + ret = -ENOMEM;
19992 + goto unmap_ctx;
19993 + }
19994 +
19995 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19996 + dpaa2_fl_set_final(in_fle, true);
19997 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19998 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19999 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
20000 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20001 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
20002 + dpaa2_fl_set_len(out_fle, digestsize);
20003 +
20004 + req_ctx->flc = &ctx->flc[FINALIZE];
20005 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
20006 + req_ctx->cbk = ahash_done_ctx_src;
20007 + req_ctx->ctx = &req->base;
20008 + req_ctx->edesc = edesc;
20009 +
20010 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20011 + if (ret == -EINPROGRESS ||
20012 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
20013 + return ret;
20014 +
20015 +unmap_ctx:
20016 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
20017 + qi_cache_free(edesc);
20018 + return ret;
20019 +}
20020 +
20021 +static int ahash_digest(struct ahash_request *req)
20022 +{
20023 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20024 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20025 + struct caam_hash_state *state = ahash_request_ctx(req);
20026 + struct caam_request *req_ctx = &state->caam_req;
20027 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
20028 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
20029 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20030 + GFP_KERNEL : GFP_ATOMIC;
20031 + int digestsize = crypto_ahash_digestsize(ahash);
20032 + int src_nents, mapped_nents;
20033 + struct ahash_edesc *edesc;
20034 + int ret = -ENOMEM;
20035 +
20036 + state->buf_dma = 0;
20037 +
20038 + src_nents = sg_nents_for_len(req->src, req->nbytes);
20039 + if (src_nents < 0) {
20040 + dev_err(ctx->dev, "Invalid number of src SG.\n");
20041 + return src_nents;
20042 + }
20043 +
20044 + if (src_nents) {
20045 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
20046 + DMA_TO_DEVICE);
20047 + if (!mapped_nents) {
20048 + dev_err(ctx->dev, "unable to map source for DMA\n");
20049 + return ret;
20050 + }
20051 + } else {
20052 + mapped_nents = 0;
20053 + }
20054 +
20055 + /* allocate space for base edesc and link tables */
20056 + edesc = qi_cache_zalloc(GFP_DMA | flags);
20057 + if (!edesc) {
20058 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
20059 + return ret;
20060 + }
20061 +
20062 + edesc->src_nents = src_nents;
20063 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
20064 +
20065 + if (mapped_nents > 1) {
20066 + int qm_sg_bytes;
20067 + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
20068 +
20069 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
20070 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
20071 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
20072 + qm_sg_bytes, DMA_TO_DEVICE);
20073 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
20074 + dev_err(ctx->dev, "unable to map S/G table\n");
20075 + goto unmap;
20076 + }
20077 + edesc->qm_sg_bytes = qm_sg_bytes;
20078 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
20079 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
20080 + } else {
20081 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
20082 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
20083 + }
20084 +
20085 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
20086 + DMA_FROM_DEVICE);
20087 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
20088 + dev_err(ctx->dev, "unable to map dst\n");
20089 + edesc->dst_dma = 0;
20090 + goto unmap;
20091 + }
20092 +
20093 + dpaa2_fl_set_final(in_fle, true);
20094 + dpaa2_fl_set_len(in_fle, req->nbytes);
20095 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20096 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
20097 + dpaa2_fl_set_len(out_fle, digestsize);
20098 +
20099 + req_ctx->flc = &ctx->flc[DIGEST];
20100 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
20101 + req_ctx->cbk = ahash_done;
20102 + req_ctx->ctx = &req->base;
20103 + req_ctx->edesc = edesc;
20104 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20105 + if (ret == -EINPROGRESS ||
20106 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
20107 + return ret;
20108 +
20109 +unmap:
20110 + ahash_unmap(ctx->dev, edesc, req, digestsize);
20111 + qi_cache_free(edesc);
20112 + return ret;
20113 +}
20114 +
20115 +static int ahash_final_no_ctx(struct ahash_request *req)
20116 +{
20117 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20118 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20119 + struct caam_hash_state *state = ahash_request_ctx(req);
20120 + struct caam_request *req_ctx = &state->caam_req;
20121 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
20122 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
20123 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20124 + GFP_KERNEL : GFP_ATOMIC;
20125 + u8 *buf = current_buf(state);
20126 + int buflen = *current_buflen(state);
20127 + int digestsize = crypto_ahash_digestsize(ahash);
20128 + struct ahash_edesc *edesc;
20129 + int ret = -ENOMEM;
20130 +
20131 + /* allocate space for base edesc and link tables */
20132 + edesc = qi_cache_zalloc(GFP_DMA | flags);
20133 + if (!edesc)
20134 + return ret;
20135 +
20136 + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
20137 + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
20138 + dev_err(ctx->dev, "unable to map src\n");
20139 + goto unmap;
20140 + }
20141 +
20142 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
20143 + DMA_FROM_DEVICE);
20144 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
20145 + dev_err(ctx->dev, "unable to map dst\n");
20146 + edesc->dst_dma = 0;
20147 + goto unmap;
20148 + }
20149 +
20150 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
20151 + dpaa2_fl_set_final(in_fle, true);
20152 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
20153 + dpaa2_fl_set_addr(in_fle, state->buf_dma);
20154 + dpaa2_fl_set_len(in_fle, buflen);
20155 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20156 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
20157 + dpaa2_fl_set_len(out_fle, digestsize);
20158 +
20159 + req_ctx->flc = &ctx->flc[DIGEST];
20160 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
20161 + req_ctx->cbk = ahash_done;
20162 + req_ctx->ctx = &req->base;
20163 + req_ctx->edesc = edesc;
20164 +
20165 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20166 + if (ret == -EINPROGRESS ||
20167 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
20168 + return ret;
20169 +
20170 +unmap:
20171 + ahash_unmap(ctx->dev, edesc, req, digestsize);
20172 + qi_cache_free(edesc);
20173 + return ret;
20174 +}
20175 +
20176 +static int ahash_update_no_ctx(struct ahash_request *req)
20177 +{
20178 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20179 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20180 + struct caam_hash_state *state = ahash_request_ctx(req);
20181 + struct caam_request *req_ctx = &state->caam_req;
20182 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
20183 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
20184 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20185 + GFP_KERNEL : GFP_ATOMIC;
20186 + u8 *buf = current_buf(state);
20187 + int *buflen = current_buflen(state);
20188 + u8 *next_buf = alt_buf(state);
20189 + int *next_buflen = alt_buflen(state);
20190 + int in_len = *buflen + req->nbytes, to_hash;
20191 + int qm_sg_bytes, src_nents, mapped_nents;
20192 + struct ahash_edesc *edesc;
20193 + int ret = 0;
20194 +
20195 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
20196 + to_hash = in_len - *next_buflen;
20197 +
20198 + if (to_hash) {
20199 + struct dpaa2_sg_entry *sg_table;
20200 +
20201 + src_nents = sg_nents_for_len(req->src,
20202 + req->nbytes - *next_buflen);
20203 + if (src_nents < 0) {
20204 + dev_err(ctx->dev, "Invalid number of src SG.\n");
20205 + return src_nents;
20206 + }
20207 +
20208 + if (src_nents) {
20209 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
20210 + DMA_TO_DEVICE);
20211 + if (!mapped_nents) {
20212 + dev_err(ctx->dev, "unable to DMA map source\n");
20213 + return -ENOMEM;
20214 + }
20215 + } else {
20216 + mapped_nents = 0;
20217 + }
20218 +
20219 + /* allocate space for base edesc and link tables */
20220 + edesc = qi_cache_zalloc(GFP_DMA | flags);
20221 + if (!edesc) {
20222 + dma_unmap_sg(ctx->dev, req->src, src_nents,
20223 + DMA_TO_DEVICE);
20224 + return -ENOMEM;
20225 + }
20226 +
20227 + edesc->src_nents = src_nents;
20228 + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
20229 + sg_table = &edesc->sgt[0];
20230 +
20231 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
20232 + if (ret)
20233 + goto unmap_ctx;
20234 +
20235 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
20236 +
20237 + if (*next_buflen)
20238 + scatterwalk_map_and_copy(next_buf, req->src,
20239 + to_hash - *buflen,
20240 + *next_buflen, 0);
20241 +
20242 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
20243 + qm_sg_bytes, DMA_TO_DEVICE);
20244 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
20245 + dev_err(ctx->dev, "unable to map S/G table\n");
20246 + ret = -ENOMEM;
20247 + goto unmap_ctx;
20248 + }
20249 + edesc->qm_sg_bytes = qm_sg_bytes;
20250 +
20251 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
20252 + ctx->ctx_len, DMA_FROM_DEVICE);
20253 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
20254 + dev_err(ctx->dev, "unable to map ctx\n");
20255 + state->ctx_dma = 0;
20256 + ret = -ENOMEM;
20257 + goto unmap_ctx;
20258 + }
20259 +
20260 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
20261 + dpaa2_fl_set_final(in_fle, true);
20262 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
20263 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
20264 + dpaa2_fl_set_len(in_fle, to_hash);
20265 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20266 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
20267 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
20268 +
20269 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
20270 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
20271 + req_ctx->cbk = ahash_done_ctx_dst;
20272 + req_ctx->ctx = &req->base;
20273 + req_ctx->edesc = edesc;
20274 +
20275 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20276 + if (ret != -EINPROGRESS &&
20277 + !(ret == -EBUSY &&
20278 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
20279 + goto unmap_ctx;
20280 +
20281 + state->update = ahash_update_ctx;
20282 + state->finup = ahash_finup_ctx;
20283 + state->final = ahash_final_ctx;
20284 + } else if (*next_buflen) {
20285 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
20286 + req->nbytes, 0);
20287 + *buflen = *next_buflen;
20288 + *next_buflen = 0;
20289 + }
20290 +#ifdef DEBUG
20291 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
20292 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
20293 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
20294 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
20295 + *next_buflen, 1);
20296 +#endif
20297 +
20298 + return ret;
20299 +unmap_ctx:
20300 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
20301 + qi_cache_free(edesc);
20302 + return ret;
20303 +}
20304 +
20305 +static int ahash_finup_no_ctx(struct ahash_request *req)
20306 +{
20307 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20308 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20309 + struct caam_hash_state *state = ahash_request_ctx(req);
20310 + struct caam_request *req_ctx = &state->caam_req;
20311 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
20312 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
20313 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20314 + GFP_KERNEL : GFP_ATOMIC;
20315 + int buflen = *current_buflen(state);
20316 + int qm_sg_bytes, src_nents, mapped_nents;
20317 + int digestsize = crypto_ahash_digestsize(ahash);
20318 + struct ahash_edesc *edesc;
20319 + struct dpaa2_sg_entry *sg_table;
20320 + int ret;
20321 +
20322 + src_nents = sg_nents_for_len(req->src, req->nbytes);
20323 + if (src_nents < 0) {
20324 + dev_err(ctx->dev, "Invalid number of src SG.\n");
20325 + return src_nents;
20326 + }
20327 +
20328 + if (src_nents) {
20329 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
20330 + DMA_TO_DEVICE);
20331 + if (!mapped_nents) {
20332 + dev_err(ctx->dev, "unable to DMA map source\n");
20333 + return -ENOMEM;
20334 + }
20335 + } else {
20336 + mapped_nents = 0;
20337 + }
20338 +
20339 + /* allocate space for base edesc and link tables */
20340 + edesc = qi_cache_zalloc(GFP_DMA | flags);
20341 + if (!edesc) {
20342 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
20343 + return -ENOMEM;
20344 + }
20345 +
20346 + edesc->src_nents = src_nents;
20347 + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
20348 + sg_table = &edesc->sgt[0];
20349 +
20350 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
20351 + if (ret)
20352 + goto unmap;
20353 +
20354 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
20355 +
20356 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
20357 + DMA_TO_DEVICE);
20358 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
20359 + dev_err(ctx->dev, "unable to map S/G table\n");
20360 + ret = -ENOMEM;
20361 + goto unmap;
20362 + }
20363 + edesc->qm_sg_bytes = qm_sg_bytes;
20364 +
20365 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
20366 + DMA_FROM_DEVICE);
20367 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
20368 + dev_err(ctx->dev, "unable to map dst\n");
20369 + edesc->dst_dma = 0;
20370 + ret = -ENOMEM;
20371 + goto unmap;
20372 + }
20373 +
20374 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
20375 + dpaa2_fl_set_final(in_fle, true);
20376 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
20377 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
20378 + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
20379 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20380 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
20381 + dpaa2_fl_set_len(out_fle, digestsize);
20382 +
20383 + req_ctx->flc = &ctx->flc[DIGEST];
20384 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
20385 + req_ctx->cbk = ahash_done;
20386 + req_ctx->ctx = &req->base;
20387 + req_ctx->edesc = edesc;
20388 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20389 + if (ret != -EINPROGRESS &&
20390 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
20391 + goto unmap;
20392 +
20393 + return ret;
20394 +unmap:
20395 + ahash_unmap(ctx->dev, edesc, req, digestsize);
20396 + qi_cache_free(edesc);
20397 + return -ENOMEM;
20398 +}
20399 +
20400 +static int ahash_update_first(struct ahash_request *req)
20401 +{
20402 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
20403 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
20404 + struct caam_hash_state *state = ahash_request_ctx(req);
20405 + struct caam_request *req_ctx = &state->caam_req;
20406 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
20407 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
20408 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
20409 + GFP_KERNEL : GFP_ATOMIC;
20410 + u8 *next_buf = alt_buf(state);
20411 + int *next_buflen = alt_buflen(state);
20412 + int to_hash;
20413 + int src_nents, mapped_nents;
20414 + struct ahash_edesc *edesc;
20415 + int ret = 0;
20416 +
20417 + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
20418 + 1);
20419 + to_hash = req->nbytes - *next_buflen;
20420 +
20421 + if (to_hash) {
20422 + struct dpaa2_sg_entry *sg_table;
20423 +
20424 + src_nents = sg_nents_for_len(req->src,
20425 + req->nbytes - (*next_buflen));
20426 + if (src_nents < 0) {
20427 + dev_err(ctx->dev, "Invalid number of src SG.\n");
20428 + return src_nents;
20429 + }
20430 +
20431 + if (src_nents) {
20432 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
20433 + DMA_TO_DEVICE);
20434 + if (!mapped_nents) {
20435 + dev_err(ctx->dev, "unable to map source for DMA\n");
20436 + return -ENOMEM;
20437 + }
20438 + } else {
20439 + mapped_nents = 0;
20440 + }
20441 +
20442 + /* allocate space for base edesc and link tables */
20443 + edesc = qi_cache_zalloc(GFP_DMA | flags);
20444 + if (!edesc) {
20445 + dma_unmap_sg(ctx->dev, req->src, src_nents,
20446 + DMA_TO_DEVICE);
20447 + return -ENOMEM;
20448 + }
20449 +
20450 + edesc->src_nents = src_nents;
20451 + sg_table = &edesc->sgt[0];
20452 +
20453 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
20454 + dpaa2_fl_set_final(in_fle, true);
20455 + dpaa2_fl_set_len(in_fle, to_hash);
20456 +
20457 + if (mapped_nents > 1) {
20458 + int qm_sg_bytes;
20459 +
20460 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
20461 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
20462 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
20463 + qm_sg_bytes,
20464 + DMA_TO_DEVICE);
20465 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
20466 + dev_err(ctx->dev, "unable to map S/G table\n");
20467 + ret = -ENOMEM;
20468 + goto unmap_ctx;
20469 + }
20470 + edesc->qm_sg_bytes = qm_sg_bytes;
20471 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
20472 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
20473 + } else {
20474 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
20475 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
20476 + }
20477 +
20478 + if (*next_buflen)
20479 + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
20480 + *next_buflen, 0);
20481 +
20482 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
20483 + ctx->ctx_len, DMA_FROM_DEVICE);
20484 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
20485 + dev_err(ctx->dev, "unable to map ctx\n");
20486 + state->ctx_dma = 0;
20487 + ret = -ENOMEM;
20488 + goto unmap_ctx;
20489 + }
20490 +
20491 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20492 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
20493 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
20494 +
20495 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
20496 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
20497 + req_ctx->cbk = ahash_done_ctx_dst;
20498 + req_ctx->ctx = &req->base;
20499 + req_ctx->edesc = edesc;
20500 +
20501 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20502 + if (ret != -EINPROGRESS &&
20503 + !(ret == -EBUSY && req->base.flags &
20504 + CRYPTO_TFM_REQ_MAY_BACKLOG))
20505 + goto unmap_ctx;
20506 +
20507 + state->update = ahash_update_ctx;
20508 + state->finup = ahash_finup_ctx;
20509 + state->final = ahash_final_ctx;
20510 + } else if (*next_buflen) {
20511 + state->update = ahash_update_no_ctx;
20512 + state->finup = ahash_finup_no_ctx;
20513 + state->final = ahash_final_no_ctx;
20514 + scatterwalk_map_and_copy(next_buf, req->src, 0,
20515 + req->nbytes, 0);
20516 + switch_buf(state);
20517 + }
20518 +#ifdef DEBUG
20519 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
20520 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
20521 +#endif
20522 +
20523 + return ret;
20524 +unmap_ctx:
20525 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
20526 + qi_cache_free(edesc);
20527 + return ret;
20528 +}
20529 +
20530 +static int ahash_finup_first(struct ahash_request *req)
20531 +{
20532 + return ahash_digest(req);
20533 +}
20534 +
20535 +static int ahash_init(struct ahash_request *req)
20536 +{
20537 + struct caam_hash_state *state = ahash_request_ctx(req);
20538 +
20539 + state->update = ahash_update_first;
20540 + state->finup = ahash_finup_first;
20541 + state->final = ahash_final_no_ctx;
20542 +
20543 + state->ctx_dma = 0;
20544 + state->current_buf = 0;
20545 + state->buf_dma = 0;
20546 + state->buflen_0 = 0;
20547 + state->buflen_1 = 0;
20548 +
20549 + return 0;
20550 +}
20551 +
20552 +static int ahash_update(struct ahash_request *req)
20553 +{
20554 + struct caam_hash_state *state = ahash_request_ctx(req);
20555 +
20556 + return state->update(req);
20557 +}
20558 +
20559 +static int ahash_finup(struct ahash_request *req)
20560 +{
20561 + struct caam_hash_state *state = ahash_request_ctx(req);
20562 +
20563 + return state->finup(req);
20564 +}
20565 +
20566 +static int ahash_final(struct ahash_request *req)
20567 +{
20568 + struct caam_hash_state *state = ahash_request_ctx(req);
20569 +
20570 + return state->final(req);
20571 +}
20572 +
20573 +static int ahash_export(struct ahash_request *req, void *out)
20574 +{
20575 + struct caam_hash_state *state = ahash_request_ctx(req);
20576 + struct caam_export_state *export = out;
20577 + int len;
20578 + u8 *buf;
20579 +
20580 + if (state->current_buf) {
20581 + buf = state->buf_1;
20582 + len = state->buflen_1;
20583 + } else {
20584 + buf = state->buf_0;
20585 + len = state->buflen_0;
20586 + }
20587 +
20588 + memcpy(export->buf, buf, len);
20589 + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
20590 + export->buflen = len;
20591 + export->update = state->update;
20592 + export->final = state->final;
20593 + export->finup = state->finup;
20594 +
20595 + return 0;
20596 +}
20597 +
20598 +static int ahash_import(struct ahash_request *req, const void *in)
20599 +{
20600 + struct caam_hash_state *state = ahash_request_ctx(req);
20601 + const struct caam_export_state *export = in;
20602 +
20603 + memset(state, 0, sizeof(*state));
20604 + memcpy(state->buf_0, export->buf, export->buflen);
20605 + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
20606 + state->buflen_0 = export->buflen;
20607 + state->update = export->update;
20608 + state->final = export->final;
20609 + state->finup = export->finup;
20610 +
20611 + return 0;
20612 +}
20613 +
20614 +struct caam_hash_template {
20615 + char name[CRYPTO_MAX_ALG_NAME];
20616 + char driver_name[CRYPTO_MAX_ALG_NAME];
20617 + char hmac_name[CRYPTO_MAX_ALG_NAME];
20618 + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
20619 + unsigned int blocksize;
20620 + struct ahash_alg template_ahash;
20621 + u32 alg_type;
20622 +};
20623 +
20624 +/* ahash descriptors */
20625 +static struct caam_hash_template driver_hash[] = {
20626 + {
20627 + .name = "sha1",
20628 + .driver_name = "sha1-caam-qi2",
20629 + .hmac_name = "hmac(sha1)",
20630 + .hmac_driver_name = "hmac-sha1-caam-qi2",
20631 + .blocksize = SHA1_BLOCK_SIZE,
20632 + .template_ahash = {
20633 + .init = ahash_init,
20634 + .update = ahash_update,
20635 + .final = ahash_final,
20636 + .finup = ahash_finup,
20637 + .digest = ahash_digest,
20638 + .export = ahash_export,
20639 + .import = ahash_import,
20640 + .setkey = ahash_setkey,
20641 + .halg = {
20642 + .digestsize = SHA1_DIGEST_SIZE,
20643 + .statesize = sizeof(struct caam_export_state),
20644 + },
20645 + },
20646 + .alg_type = OP_ALG_ALGSEL_SHA1,
20647 + }, {
20648 + .name = "sha224",
20649 + .driver_name = "sha224-caam-qi2",
20650 + .hmac_name = "hmac(sha224)",
20651 + .hmac_driver_name = "hmac-sha224-caam-qi2",
20652 + .blocksize = SHA224_BLOCK_SIZE,
20653 + .template_ahash = {
20654 + .init = ahash_init,
20655 + .update = ahash_update,
20656 + .final = ahash_final,
20657 + .finup = ahash_finup,
20658 + .digest = ahash_digest,
20659 + .export = ahash_export,
20660 + .import = ahash_import,
20661 + .setkey = ahash_setkey,
20662 + .halg = {
20663 + .digestsize = SHA224_DIGEST_SIZE,
20664 + .statesize = sizeof(struct caam_export_state),
20665 + },
20666 + },
20667 + .alg_type = OP_ALG_ALGSEL_SHA224,
20668 + }, {
20669 + .name = "sha256",
20670 + .driver_name = "sha256-caam-qi2",
20671 + .hmac_name = "hmac(sha256)",
20672 + .hmac_driver_name = "hmac-sha256-caam-qi2",
20673 + .blocksize = SHA256_BLOCK_SIZE,
20674 + .template_ahash = {
20675 + .init = ahash_init,
20676 + .update = ahash_update,
20677 + .final = ahash_final,
20678 + .finup = ahash_finup,
20679 + .digest = ahash_digest,
20680 + .export = ahash_export,
20681 + .import = ahash_import,
20682 + .setkey = ahash_setkey,
20683 + .halg = {
20684 + .digestsize = SHA256_DIGEST_SIZE,
20685 + .statesize = sizeof(struct caam_export_state),
20686 + },
20687 + },
20688 + .alg_type = OP_ALG_ALGSEL_SHA256,
20689 + }, {
20690 + .name = "sha384",
20691 + .driver_name = "sha384-caam-qi2",
20692 + .hmac_name = "hmac(sha384)",
20693 + .hmac_driver_name = "hmac-sha384-caam-qi2",
20694 + .blocksize = SHA384_BLOCK_SIZE,
20695 + .template_ahash = {
20696 + .init = ahash_init,
20697 + .update = ahash_update,
20698 + .final = ahash_final,
20699 + .finup = ahash_finup,
20700 + .digest = ahash_digest,
20701 + .export = ahash_export,
20702 + .import = ahash_import,
20703 + .setkey = ahash_setkey,
20704 + .halg = {
20705 + .digestsize = SHA384_DIGEST_SIZE,
20706 + .statesize = sizeof(struct caam_export_state),
20707 + },
20708 + },
20709 + .alg_type = OP_ALG_ALGSEL_SHA384,
20710 + }, {
20711 + .name = "sha512",
20712 + .driver_name = "sha512-caam-qi2",
20713 + .hmac_name = "hmac(sha512)",
20714 + .hmac_driver_name = "hmac-sha512-caam-qi2",
20715 + .blocksize = SHA512_BLOCK_SIZE,
20716 + .template_ahash = {
20717 + .init = ahash_init,
20718 + .update = ahash_update,
20719 + .final = ahash_final,
20720 + .finup = ahash_finup,
20721 + .digest = ahash_digest,
20722 + .export = ahash_export,
20723 + .import = ahash_import,
20724 + .setkey = ahash_setkey,
20725 + .halg = {
20726 + .digestsize = SHA512_DIGEST_SIZE,
20727 + .statesize = sizeof(struct caam_export_state),
20728 + },
20729 + },
20730 + .alg_type = OP_ALG_ALGSEL_SHA512,
20731 + }, {
20732 + .name = "md5",
20733 + .driver_name = "md5-caam-qi2",
20734 + .hmac_name = "hmac(md5)",
20735 + .hmac_driver_name = "hmac-md5-caam-qi2",
20736 + .blocksize = MD5_BLOCK_WORDS * 4,
20737 + .template_ahash = {
20738 + .init = ahash_init,
20739 + .update = ahash_update,
20740 + .final = ahash_final,
20741 + .finup = ahash_finup,
20742 + .digest = ahash_digest,
20743 + .export = ahash_export,
20744 + .import = ahash_import,
20745 + .setkey = ahash_setkey,
20746 + .halg = {
20747 + .digestsize = MD5_DIGEST_SIZE,
20748 + .statesize = sizeof(struct caam_export_state),
20749 + },
20750 + },
20751 + .alg_type = OP_ALG_ALGSEL_MD5,
20752 + }
20753 +};
20754 +
20755 +struct caam_hash_alg {
20756 + struct list_head entry;
20757 + struct device *dev;
20758 + int alg_type;
20759 + struct ahash_alg ahash_alg;
20760 +};
20761 +
20762 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
20763 +{
20764 + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
20765 + struct crypto_alg *base = tfm->__crt_alg;
20766 + struct hash_alg_common *halg =
20767 + container_of(base, struct hash_alg_common, base);
20768 + struct ahash_alg *alg =
20769 + container_of(halg, struct ahash_alg, halg);
20770 + struct caam_hash_alg *caam_hash =
20771 + container_of(alg, struct caam_hash_alg, ahash_alg);
20772 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20773 + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
20774 + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
20775 + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
20776 + HASH_MSG_LEN + 32,
20777 + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20778 + HASH_MSG_LEN + 64,
20779 + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20780 + dma_addr_t dma_addr;
20781 + int i;
20782 +
20783 + ctx->dev = caam_hash->dev;
20784 +
20785 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
20786 + DMA_BIDIRECTIONAL,
20787 + DMA_ATTR_SKIP_CPU_SYNC);
20788 + if (dma_mapping_error(ctx->dev, dma_addr)) {
20789 + dev_err(ctx->dev, "unable to map shared descriptors\n");
20790 + return -ENOMEM;
20791 + }
20792 +
20793 + for (i = 0; i < HASH_NUM_OP; i++)
20794 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
20795 +
20796 + /* copy descriptor header template value */
20797 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20798 +
20799 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
20800 + OP_ALG_ALGSEL_SUBMASK) >>
20801 + OP_ALG_ALGSEL_SHIFT];
20802 +
20803 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20804 + sizeof(struct caam_hash_state));
20805 +
20806 + return ahash_set_sh_desc(ahash);
20807 +}
20808 +
20809 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
20810 +{
20811 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20812 +
20813 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
20814 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
20815 +}
20816 +
20817 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
20818 + struct caam_hash_template *template, bool keyed)
20819 +{
20820 + struct caam_hash_alg *t_alg;
20821 + struct ahash_alg *halg;
20822 + struct crypto_alg *alg;
20823 +
20824 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
20825 + if (!t_alg)
20826 + return ERR_PTR(-ENOMEM);
20827 +
20828 + t_alg->ahash_alg = template->template_ahash;
20829 + halg = &t_alg->ahash_alg;
20830 + alg = &halg->halg.base;
20831 +
20832 + if (keyed) {
20833 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
20834 + template->hmac_name);
20835 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
20836 + template->hmac_driver_name);
20837 + } else {
20838 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
20839 + template->name);
20840 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
20841 + template->driver_name);
20842 + t_alg->ahash_alg.setkey = NULL;
20843 + }
20844 + alg->cra_module = THIS_MODULE;
20845 + alg->cra_init = caam_hash_cra_init;
20846 + alg->cra_exit = caam_hash_cra_exit;
20847 + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
20848 + alg->cra_priority = CAAM_CRA_PRIORITY;
20849 + alg->cra_blocksize = template->blocksize;
20850 + alg->cra_alignmask = 0;
20851 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
20852 + alg->cra_type = &crypto_ahash_type;
20853 +
20854 + t_alg->alg_type = template->alg_type;
20855 + t_alg->dev = dev;
20856 +
20857 + return t_alg;
20858 +}
20859 +
20860 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
20861 +{
20862 + struct dpaa2_caam_priv_per_cpu *ppriv;
20863 +
20864 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
20865 + napi_schedule_irqoff(&ppriv->napi);
20866 +}
20867 +
20868 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
20869 +{
20870 + struct device *dev = priv->dev;
20871 + struct dpaa2_io_notification_ctx *nctx;
20872 + struct dpaa2_caam_priv_per_cpu *ppriv;
20873 + int err, i = 0, cpu;
20874 +
20875 + for_each_online_cpu(cpu) {
20876 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20877 + ppriv->priv = priv;
20878 + nctx = &ppriv->nctx;
20879 + nctx->is_cdan = 0;
20880 + nctx->id = ppriv->rsp_fqid;
20881 + nctx->desired_cpu = cpu;
20882 + nctx->cb = dpaa2_caam_fqdan_cb;
20883 +
20884 + /* Register notification callbacks */
20885 + err = dpaa2_io_service_register(NULL, nctx);
20886 + if (unlikely(err)) {
20887 + dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
20888 + nctx->cb = NULL;
20889 + /*
20890 + * If no affine DPIO for this core, there's probably
20891 + * none available for next cores either. Signal we want
20892 + * to retry later, in case the DPIO devices weren't
20893 + * probed yet.
20894 + */
20895 + err = -EPROBE_DEFER;
20896 + goto err;
20897 + }
20898 +
20899 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
20900 + dev);
20901 + if (unlikely(!ppriv->store)) {
20902 + dev_err(dev, "dpaa2_io_store_create() failed\n");
20903 + goto err;
20904 + }
20905 +
20906 + if (++i == priv->num_pairs)
20907 + break;
20908 + }
20909 +
20910 + return 0;
20911 +
20912 +err:
20913 + for_each_online_cpu(cpu) {
20914 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20915 + if (!ppriv->nctx.cb)
20916 + break;
20917 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
20918 + }
20919 +
20920 + for_each_online_cpu(cpu) {
20921 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20922 + if (!ppriv->store)
20923 + break;
20924 + dpaa2_io_store_destroy(ppriv->store);
20925 + }
20926 +
20927 + return err;
20928 +}
20929 +
20930 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
20931 +{
20932 + struct dpaa2_caam_priv_per_cpu *ppriv;
20933 + int i = 0, cpu;
20934 +
20935 + for_each_online_cpu(cpu) {
20936 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20937 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
20938 + dpaa2_io_store_destroy(ppriv->store);
20939 +
20940 + if (++i == priv->num_pairs)
20941 + return;
20942 + }
20943 +}
20944 +
20945 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
20946 +{
20947 + struct dpseci_rx_queue_cfg rx_queue_cfg;
20948 + struct device *dev = priv->dev;
20949 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20950 + struct dpaa2_caam_priv_per_cpu *ppriv;
20951 + int err = 0, i = 0, cpu;
20952 +
20953 + /* Configure Rx queues */
20954 + for_each_online_cpu(cpu) {
20955 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20956 +
20957 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
20958 + DPSECI_QUEUE_OPT_USER_CTX;
20959 + rx_queue_cfg.order_preservation_en = 0;
20960 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
20961 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
20962 + /*
20963 + * Rx priority (WQ) doesn't really matter, since we use
20964 + * pull mode, i.e. volatile dequeues from specific FQs
20965 + */
20966 + rx_queue_cfg.dest_cfg.priority = 0;
20967 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
20968 +
20969 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20970 + &rx_queue_cfg);
20971 + if (err) {
20972 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
20973 + err);
20974 + return err;
20975 + }
20976 +
20977 + if (++i == priv->num_pairs)
20978 + break;
20979 + }
20980 +
20981 + return err;
20982 +}
20983 +
20984 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
20985 +{
20986 + struct device *dev = priv->dev;
20987 +
20988 + if (!priv->cscn_mem)
20989 + return;
20990 +
20991 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20992 + kfree(priv->cscn_mem);
20993 +}
20994 +
20995 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
20996 +{
20997 + struct device *dev = priv->dev;
20998 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20999 +
21000 + dpaa2_dpseci_congestion_free(priv);
21001 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
21002 +}
21003 +
21004 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
21005 + const struct dpaa2_fd *fd)
21006 +{
21007 + struct caam_request *req;
21008 + u32 fd_err;
21009 +
21010 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
21011 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
21012 + return;
21013 + }
21014 +
21015 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
21016 + if (unlikely(fd_err))
21017 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
21018 +
21019 + /*
21020 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
21021 + * in FD[ERR] or FD[FRC].
21022 + */
21023 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
21024 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
21025 + DMA_BIDIRECTIONAL);
21026 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
21027 +}
21028 +
21029 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
21030 +{
21031 + int err;
21032 +
21033 + /* Retry while portal is busy */
21034 + do {
21035 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
21036 + ppriv->store);
21037 + } while (err == -EBUSY);
21038 +
21039 + if (unlikely(err))
21040 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
21041 +
21042 + return err;
21043 +}
21044 +
21045 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
21046 +{
21047 + struct dpaa2_dq *dq;
21048 + int cleaned = 0, is_last;
21049 +
21050 + do {
21051 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
21052 + if (unlikely(!dq)) {
21053 + if (unlikely(!is_last)) {
21054 + dev_dbg(ppriv->priv->dev,
21055 + "FQ %d returned no valid frames\n",
21056 + ppriv->rsp_fqid);
21057 + /*
21058 + * MUST retry until we get some sort of
21059 + * valid response token (be it "empty dequeue"
21060 + * or a valid frame).
21061 + */
21062 + continue;
21063 + }
21064 + break;
21065 + }
21066 +
21067 + /* Process FD */
21068 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
21069 + cleaned++;
21070 + } while (!is_last);
21071 +
21072 + return cleaned;
21073 +}
21074 +
21075 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
21076 +{
21077 + struct dpaa2_caam_priv_per_cpu *ppriv;
21078 + struct dpaa2_caam_priv *priv;
21079 + int err, cleaned = 0, store_cleaned;
21080 +
21081 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
21082 + priv = ppriv->priv;
21083 +
21084 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
21085 + return 0;
21086 +
21087 + do {
21088 + store_cleaned = dpaa2_caam_store_consume(ppriv);
21089 + cleaned += store_cleaned;
21090 +
21091 + if (store_cleaned == 0 ||
21092 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
21093 + break;
21094 +
21095 + /* Try to dequeue some more */
21096 + err = dpaa2_caam_pull_fq(ppriv);
21097 + if (unlikely(err))
21098 + break;
21099 + } while (1);
21100 +
21101 + if (cleaned < budget) {
21102 + napi_complete_done(napi, cleaned);
21103 + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
21104 + if (unlikely(err))
21105 + dev_err(priv->dev, "Notification rearm failed: %d\n",
21106 + err);
21107 + }
21108 +
21109 + return cleaned;
21110 +}
21111 +
21112 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
21113 + u16 token)
21114 +{
21115 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
21116 + struct device *dev = priv->dev;
21117 + int err;
21118 +
21119 + /*
21120 + * Congestion group feature supported starting with DPSECI API v5.1
21121 + * and only when object has been created with this capability.
21122 + */
21123 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
21124 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
21125 + return 0;
21126 +
21127 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
21128 + GFP_KERNEL | GFP_DMA);
21129 + if (!priv->cscn_mem)
21130 + return -ENOMEM;
21131 +
21132 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
21133 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
21134 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
21135 + if (dma_mapping_error(dev, priv->cscn_dma)) {
21136 + dev_err(dev, "Error mapping CSCN memory area\n");
21137 + err = -ENOMEM;
21138 + goto err_dma_map;
21139 + }
21140 +
21141 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
21142 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
21143 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
21144 + cong_notif_cfg.message_ctx = (u64)priv;
21145 + cong_notif_cfg.message_iova = priv->cscn_dma;
21146 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
21147 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
21148 + DPSECI_CGN_MODE_COHERENT_WRITE;
21149 +
21150 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
21151 + &cong_notif_cfg);
21152 + if (err) {
21153 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
21154 + goto err_set_cong;
21155 + }
21156 +
21157 + return 0;
21158 +
21159 +err_set_cong:
21160 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
21161 +err_dma_map:
21162 + kfree(priv->cscn_mem);
21163 +
21164 + return err;
21165 +}
21166 +
21167 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
21168 +{
21169 + struct device *dev = &ls_dev->dev;
21170 + struct dpaa2_caam_priv *priv;
21171 + struct dpaa2_caam_priv_per_cpu *ppriv;
21172 + int err, cpu;
21173 + u8 i;
21174 +
21175 + priv = dev_get_drvdata(dev);
21176 +
21177 + priv->dev = dev;
21178 + priv->dpsec_id = ls_dev->obj_desc.id;
21179 +
21180 + /* Get a handle for the DPSECI this interface is associate with */
21181 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
21182 + if (err) {
21183 + dev_err(dev, "dpsec_open() failed: %d\n", err);
21184 + goto err_open;
21185 + }
21186 +
21187 + dev_info(dev, "Opened dpseci object successfully\n");
21188 +
21189 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
21190 + &priv->minor_ver);
21191 + if (err) {
21192 + dev_err(dev, "dpseci_get_api_version() failed\n");
21193 + goto err_get_vers;
21194 + }
21195 +
21196 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
21197 + &priv->dpseci_attr);
21198 + if (err) {
21199 + dev_err(dev, "dpseci_get_attributes() failed\n");
21200 + goto err_get_vers;
21201 + }
21202 +
21203 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
21204 + &priv->sec_attr);
21205 + if (err) {
21206 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
21207 + goto err_get_vers;
21208 + }
21209 +
21210 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
21211 + if (err) {
21212 + dev_err(dev, "setup_congestion() failed\n");
21213 + goto err_get_vers;
21214 + }
21215 +
21216 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
21217 + priv->dpseci_attr.num_tx_queues);
21218 + if (priv->num_pairs > num_online_cpus()) {
21219 + dev_warn(dev, "%d queues won't be used\n",
21220 + priv->num_pairs - num_online_cpus());
21221 + priv->num_pairs = num_online_cpus();
21222 + }
21223 +
21224 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
21225 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
21226 + &priv->rx_queue_attr[i]);
21227 + if (err) {
21228 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
21229 + goto err_get_rx_queue;
21230 + }
21231 + }
21232 +
21233 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
21234 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
21235 + &priv->tx_queue_attr[i]);
21236 + if (err) {
21237 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
21238 + goto err_get_rx_queue;
21239 + }
21240 + }
21241 +
21242 + i = 0;
21243 + for_each_online_cpu(cpu) {
21244 + dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
21245 + priv->rx_queue_attr[i].fqid,
21246 + priv->tx_queue_attr[i].fqid);
21247 +
21248 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
21249 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
21250 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
21251 + ppriv->prio = i;
21252 +
21253 + ppriv->net_dev.dev = *dev;
21254 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
21255 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
21256 + DPAA2_CAAM_NAPI_WEIGHT);
21257 + if (++i == priv->num_pairs)
21258 + break;
21259 + }
21260 +
21261 + return 0;
21262 +
21263 +err_get_rx_queue:
21264 + dpaa2_dpseci_congestion_free(priv);
21265 +err_get_vers:
21266 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
21267 +err_open:
21268 + return err;
21269 +}
21270 +
21271 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
21272 +{
21273 + struct device *dev = priv->dev;
21274 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
21275 + struct dpaa2_caam_priv_per_cpu *ppriv;
21276 + int err, i;
21277 +
21278 + for (i = 0; i < priv->num_pairs; i++) {
21279 + ppriv = per_cpu_ptr(priv->ppriv, i);
21280 + napi_enable(&ppriv->napi);
21281 + }
21282 +
21283 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
21284 + if (err) {
21285 + dev_err(dev, "dpseci_enable() failed\n");
21286 + return err;
21287 + }
21288 +
21289 + dev_info(dev, "DPSECI version %d.%d\n",
21290 + priv->major_ver,
21291 + priv->minor_ver);
21292 +
21293 + return 0;
21294 +}
21295 +
21296 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
21297 +{
21298 + struct device *dev = priv->dev;
21299 + struct dpaa2_caam_priv_per_cpu *ppriv;
21300 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
21301 + int i, err = 0, enabled;
21302 +
21303 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
21304 + if (err) {
21305 + dev_err(dev, "dpseci_disable() failed\n");
21306 + return err;
21307 + }
21308 +
21309 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
21310 + if (err) {
21311 + dev_err(dev, "dpseci_is_enabled() failed\n");
21312 + return err;
21313 + }
21314 +
21315 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
21316 +
21317 + for (i = 0; i < priv->num_pairs; i++) {
21318 + ppriv = per_cpu_ptr(priv->ppriv, i);
21319 + napi_disable(&ppriv->napi);
21320 + netif_napi_del(&ppriv->napi);
21321 + }
21322 +
21323 + return 0;
21324 +}
21325 +
21326 +static struct list_head alg_list;
21327 +static struct list_head hash_list;
21328 +
21329 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
21330 +{
21331 + struct device *dev;
21332 + struct dpaa2_caam_priv *priv;
21333 + int i, err = 0;
21334 + bool registered = false;
21335 +
21336 + /*
21337 + * There is no way to get CAAM endianness - there is no direct register
21338 + * space access and MC f/w does not provide this attribute.
21339 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
21340 + * property.
21341 + */
21342 + caam_little_end = true;
21343 +
21344 + caam_imx = false;
21345 +
21346 + dev = &dpseci_dev->dev;
21347 +
21348 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
21349 + if (!priv)
21350 + return -ENOMEM;
21351 +
21352 + dev_set_drvdata(dev, priv);
21353 +
21354 + priv->domain = iommu_get_domain_for_dev(dev);
21355 +
21356 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
21357 + 0, SLAB_CACHE_DMA, NULL);
21358 + if (!qi_cache) {
21359 + dev_err(dev, "Can't allocate SEC cache\n");
21360 + err = -ENOMEM;
21361 + goto err_qicache;
21362 + }
21363 +
21364 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
21365 + if (err) {
21366 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
21367 + goto err_dma_mask;
21368 + }
21369 +
21370 + /* Obtain a MC portal */
21371 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
21372 + if (err) {
21373 + if (err == -ENXIO)
21374 + err = -EPROBE_DEFER;
21375 + else
21376 + dev_err(dev, "MC portal allocation failed\n");
21377 +
21378 + goto err_dma_mask;
21379 + }
21380 +
21381 + priv->ppriv = alloc_percpu(*priv->ppriv);
21382 + if (!priv->ppriv) {
21383 + dev_err(dev, "alloc_percpu() failed\n");
21384 + goto err_alloc_ppriv;
21385 + }
21386 +
21387 + /* DPSECI initialization */
21388 + err = dpaa2_dpseci_setup(dpseci_dev);
21389 + if (err < 0) {
21390 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
21391 + goto err_dpseci_setup;
21392 + }
21393 +
21394 + /* DPIO */
21395 + err = dpaa2_dpseci_dpio_setup(priv);
21396 + if (err) {
21397 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
21398 + goto err_dpio_setup;
21399 + }
21400 +
21401 + /* DPSECI binding to DPIO */
21402 + err = dpaa2_dpseci_bind(priv);
21403 + if (err) {
21404 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
21405 + goto err_bind;
21406 + }
21407 +
21408 + /* DPSECI enable */
21409 + err = dpaa2_dpseci_enable(priv);
21410 + if (err) {
21411 + dev_err(dev, "dpaa2_dpseci_enable() failed");
21412 + goto err_bind;
21413 + }
21414 +
21415 + /* register crypto algorithms the device supports */
21416 + INIT_LIST_HEAD(&alg_list);
21417 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
21418 + struct caam_crypto_alg *t_alg;
21419 + struct caam_alg_template *alg = driver_algs + i;
21420 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
21421 +
21422 + /* Skip DES algorithms if not supported by device */
21423 + if (!priv->sec_attr.des_acc_num &&
21424 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
21425 + (alg_sel == OP_ALG_ALGSEL_DES)))
21426 + continue;
21427 +
21428 + /* Skip AES algorithms if not supported by device */
21429 + if (!priv->sec_attr.aes_acc_num &&
21430 + (alg_sel == OP_ALG_ALGSEL_AES))
21431 + continue;
21432 +
21433 + t_alg = caam_alg_alloc(alg);
21434 + if (IS_ERR(t_alg)) {
21435 + err = PTR_ERR(t_alg);
21436 + dev_warn(dev, "%s alg allocation failed: %d\n",
21437 + alg->driver_name, err);
21438 + continue;
21439 + }
21440 + t_alg->caam.dev = dev;
21441 +
21442 + err = crypto_register_alg(&t_alg->crypto_alg);
21443 + if (err) {
21444 + dev_warn(dev, "%s alg registration failed: %d\n",
21445 + t_alg->crypto_alg.cra_driver_name, err);
21446 + kfree(t_alg);
21447 + continue;
21448 + }
21449 +
21450 + list_add_tail(&t_alg->entry, &alg_list);
21451 + registered = true;
21452 + }
21453 +
21454 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
21455 + struct caam_aead_alg *t_alg = driver_aeads + i;
21456 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
21457 + OP_ALG_ALGSEL_MASK;
21458 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
21459 + OP_ALG_ALGSEL_MASK;
21460 +
21461 + /* Skip DES algorithms if not supported by device */
21462 + if (!priv->sec_attr.des_acc_num &&
21463 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
21464 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
21465 + continue;
21466 +
21467 + /* Skip AES algorithms if not supported by device */
21468 + if (!priv->sec_attr.aes_acc_num &&
21469 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
21470 + continue;
21471 +
21472 + /*
21473 + * Skip algorithms requiring message digests
21474 + * if MD not supported by device.
21475 + */
21476 + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
21477 + continue;
21478 +
21479 + t_alg->caam.dev = dev;
21480 + caam_aead_alg_init(t_alg);
21481 +
21482 + err = crypto_register_aead(&t_alg->aead);
21483 + if (err) {
21484 + dev_warn(dev, "%s alg registration failed: %d\n",
21485 + t_alg->aead.base.cra_driver_name, err);
21486 + continue;
21487 + }
21488 +
21489 + t_alg->registered = true;
21490 + registered = true;
21491 + }
21492 + if (registered)
21493 + dev_info(dev, "algorithms registered in /proc/crypto\n");
21494 +
21495 + /* register hash algorithms the device supports */
21496 + INIT_LIST_HEAD(&hash_list);
21497 +
21498 + /*
21499 + * Skip registration of any hashing algorithms if MD block
21500 + * is not present.
21501 + */
21502 + if (!priv->sec_attr.md_acc_num)
21503 + return 0;
21504 +
21505 + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
21506 + struct caam_hash_alg *t_alg;
21507 + struct caam_hash_template *alg = driver_hash + i;
21508 +
21509 + /* register hmac version */
21510 + t_alg = caam_hash_alloc(dev, alg, true);
21511 + if (IS_ERR(t_alg)) {
21512 + err = PTR_ERR(t_alg);
21513 + dev_warn(dev, "%s hash alg allocation failed: %d\n",
21514 + alg->driver_name, err);
21515 + continue;
21516 + }
21517 +
21518 + err = crypto_register_ahash(&t_alg->ahash_alg);
21519 + if (err) {
21520 + dev_warn(dev, "%s alg registration failed: %d\n",
21521 + t_alg->ahash_alg.halg.base.cra_driver_name,
21522 + err);
21523 + kfree(t_alg);
21524 + } else {
21525 + list_add_tail(&t_alg->entry, &hash_list);
21526 + }
21527 +
21528 + /* register unkeyed version */
21529 + t_alg = caam_hash_alloc(dev, alg, false);
21530 + if (IS_ERR(t_alg)) {
21531 + err = PTR_ERR(t_alg);
21532 + dev_warn(dev, "%s alg allocation failed: %d\n",
21533 + alg->driver_name, err);
21534 + continue;
21535 + }
21536 +
21537 + err = crypto_register_ahash(&t_alg->ahash_alg);
21538 + if (err) {
21539 + dev_warn(dev, "%s alg registration failed: %d\n",
21540 + t_alg->ahash_alg.halg.base.cra_driver_name,
21541 + err);
21542 + kfree(t_alg);
21543 + } else {
21544 + list_add_tail(&t_alg->entry, &hash_list);
21545 + }
21546 + }
21547 + if (!list_empty(&hash_list))
21548 + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
21549 +
21550 + return err;
21551 +
21552 +err_bind:
21553 + dpaa2_dpseci_dpio_free(priv);
21554 +err_dpio_setup:
21555 + dpaa2_dpseci_free(priv);
21556 +err_dpseci_setup:
21557 + free_percpu(priv->ppriv);
21558 +err_alloc_ppriv:
21559 + fsl_mc_portal_free(priv->mc_io);
21560 +err_dma_mask:
21561 + kmem_cache_destroy(qi_cache);
21562 +err_qicache:
21563 + dev_set_drvdata(dev, NULL);
21564 +
21565 + return err;
21566 +}
21567 +
21568 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
21569 +{
21570 + struct device *dev;
21571 + struct dpaa2_caam_priv *priv;
21572 + int i;
21573 +
21574 + dev = &ls_dev->dev;
21575 + priv = dev_get_drvdata(dev);
21576 +
21577 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
21578 + struct caam_aead_alg *t_alg = driver_aeads + i;
21579 +
21580 + if (t_alg->registered)
21581 + crypto_unregister_aead(&t_alg->aead);
21582 + }
21583 +
21584 + if (alg_list.next) {
21585 + struct caam_crypto_alg *t_alg, *n;
21586 +
21587 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
21588 + crypto_unregister_alg(&t_alg->crypto_alg);
21589 + list_del(&t_alg->entry);
21590 + kfree(t_alg);
21591 + }
21592 + }
21593 +
21594 + if (hash_list.next) {
21595 + struct caam_hash_alg *t_hash_alg, *p;
21596 +
21597 + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
21598 + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
21599 + list_del(&t_hash_alg->entry);
21600 + kfree(t_hash_alg);
21601 + }
21602 + }
21603 +
21604 + dpaa2_dpseci_disable(priv);
21605 + dpaa2_dpseci_dpio_free(priv);
21606 + dpaa2_dpseci_free(priv);
21607 + free_percpu(priv->ppriv);
21608 + fsl_mc_portal_free(priv->mc_io);
21609 + dev_set_drvdata(dev, NULL);
21610 + kmem_cache_destroy(qi_cache);
21611 +
21612 + return 0;
21613 +}
21614 +
21615 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
21616 +{
21617 + struct dpaa2_fd fd;
21618 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
21619 + int err = 0, i, id;
21620 +
21621 + if (IS_ERR(req))
21622 + return PTR_ERR(req);
21623 +
21624 + if (priv->cscn_mem) {
21625 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
21626 + DPAA2_CSCN_SIZE,
21627 + DMA_FROM_DEVICE);
21628 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
21629 + dev_dbg_ratelimited(dev, "Dropping request\n");
21630 + return -EBUSY;
21631 + }
21632 + }
21633 +
21634 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
21635 +
21636 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
21637 + DMA_BIDIRECTIONAL);
21638 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
21639 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
21640 + goto err_out;
21641 + }
21642 +
21643 + memset(&fd, 0, sizeof(fd));
21644 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
21645 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
21646 + dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
21647 + dpaa2_fd_set_flc(&fd, req->flc_dma);
21648 +
21649 + /*
21650 + * There is no guarantee that preemption is disabled here,
21651 + * thus take action.
21652 + */
21653 + preempt_disable();
21654 + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
21655 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
21656 + err = dpaa2_io_service_enqueue_fq(NULL,
21657 + priv->tx_queue_attr[id].fqid,
21658 + &fd);
21659 + if (err != -EBUSY)
21660 + break;
21661 + }
21662 + preempt_enable();
21663 +
21664 + if (unlikely(err < 0)) {
21665 + dev_err(dev, "Error enqueuing frame: %d\n", err);
21666 + goto err_out;
21667 + }
21668 +
21669 + return -EINPROGRESS;
21670 +
21671 +err_out:
21672 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
21673 + DMA_BIDIRECTIONAL);
21674 + return -EIO;
21675 +}
21676 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
21677 +
21678 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
21679 + {
21680 + .vendor = FSL_MC_VENDOR_FREESCALE,
21681 + .obj_type = "dpseci",
21682 + },
21683 + { .vendor = 0x0 }
21684 +};
21685 +
21686 +static struct fsl_mc_driver dpaa2_caam_driver = {
21687 + .driver = {
21688 + .name = KBUILD_MODNAME,
21689 + .owner = THIS_MODULE,
21690 + },
21691 + .probe = dpaa2_caam_probe,
21692 + .remove = dpaa2_caam_remove,
21693 + .match_id_table = dpaa2_caam_match_id_table
21694 +};
21695 +
21696 +MODULE_LICENSE("Dual BSD/GPL");
21697 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
21698 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
21699 +
21700 +module_fsl_mc_driver(dpaa2_caam_driver);
21701 --- /dev/null
21702 +++ b/drivers/crypto/caam/caamalg_qi2.h
21703 @@ -0,0 +1,283 @@
21704 +/*
21705 + * Copyright 2015-2016 Freescale Semiconductor Inc.
21706 + * Copyright 2017 NXP
21707 + *
21708 + * Redistribution and use in source and binary forms, with or without
21709 + * modification, are permitted provided that the following conditions are met:
21710 + * * Redistributions of source code must retain the above copyright
21711 + * notice, this list of conditions and the following disclaimer.
21712 + * * Redistributions in binary form must reproduce the above copyright
21713 + * notice, this list of conditions and the following disclaimer in the
21714 + * documentation and/or other materials provided with the distribution.
21715 + * * Neither the names of the above-listed copyright holders nor the
21716 + * names of any contributors may be used to endorse or promote products
21717 + * derived from this software without specific prior written permission.
21718 + *
21719 + *
21720 + * ALTERNATIVELY, this software may be distributed under the terms of the
21721 + * GNU General Public License ("GPL") as published by the Free Software
21722 + * Foundation, either version 2 of that License or (at your option) any
21723 + * later version.
21724 + *
21725 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21726 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21727 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21728 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21729 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21730 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21731 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21732 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21733 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21734 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21735 + * POSSIBILITY OF SUCH DAMAGE.
21736 + */
21737 +
21738 +#ifndef _CAAMALG_QI2_H_
21739 +#define _CAAMALG_QI2_H_
21740 +
21741 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
21742 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
21743 +#include <linux/threads.h>
21744 +#include "dpseci.h"
21745 +#include "desc_constr.h"
21746 +
21747 +#define DPAA2_CAAM_STORE_SIZE 16
21748 +/* NAPI weight *must* be a multiple of the store size. */
21749 +#define DPAA2_CAAM_NAPI_WEIGHT 64
21750 +
21751 +/* The congestion entrance threshold was chosen so that on LS2088
21752 + * we support the maximum throughput for the available memory
21753 + */
21754 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
21755 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
21756 +
21757 +/**
21758 + * dpaa2_caam_priv - driver private data
21759 + * @dpseci_id: DPSECI object unique ID
21760 + * @major_ver: DPSECI major version
21761 + * @minor_ver: DPSECI minor version
21762 + * @dpseci_attr: DPSECI attributes
21763 + * @sec_attr: SEC engine attributes
21764 + * @rx_queue_attr: array of Rx queue attributes
21765 + * @tx_queue_attr: array of Tx queue attributes
21766 + * @cscn_mem: pointer to memory region containing the
21767 + * dpaa2_cscn struct; it's size is larger than
21768 + * sizeof(struct dpaa2_cscn) to accommodate alignment
21769 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
21770 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
21771 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
21772 + * @dev: device associated with the DPSECI object
21773 + * @mc_io: pointer to MC portal's I/O object
21774 + * @domain: IOMMU domain
21775 + * @ppriv: per CPU pointers to privata data
21776 + */
21777 +struct dpaa2_caam_priv {
21778 + int dpsec_id;
21779 +
21780 + u16 major_ver;
21781 + u16 minor_ver;
21782 +
21783 + struct dpseci_attr dpseci_attr;
21784 + struct dpseci_sec_attr sec_attr;
21785 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
21786 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
21787 + int num_pairs;
21788 +
21789 + /* congestion */
21790 + void *cscn_mem;
21791 + void *cscn_mem_aligned;
21792 + dma_addr_t cscn_dma;
21793 +
21794 + struct device *dev;
21795 + struct fsl_mc_io *mc_io;
21796 + struct iommu_domain *domain;
21797 +
21798 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
21799 +};
21800 +
21801 +/**
21802 + * dpaa2_caam_priv_per_cpu - per CPU private data
21803 + * @napi: napi structure
21804 + * @net_dev: netdev used by napi
21805 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
21806 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
21807 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
21808 + * @nctx: notification context of response FQ
21809 + * @store: where dequeued frames are stored
21810 + * @priv: backpointer to dpaa2_caam_priv
21811 + */
21812 +struct dpaa2_caam_priv_per_cpu {
21813 + struct napi_struct napi;
21814 + struct net_device net_dev;
21815 + int req_fqid;
21816 + int rsp_fqid;
21817 + int prio;
21818 + struct dpaa2_io_notification_ctx nctx;
21819 + struct dpaa2_io_store *store;
21820 + struct dpaa2_caam_priv *priv;
21821 +};
21822 +
21823 +/*
21824 + * The CAAM QI hardware constructs a job descriptor which points
21825 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
21826 + * When the job descriptor is executed by deco, the whole job
21827 + * descriptor together with shared descriptor gets loaded in
21828 + * deco buffer which is 64 words long (each 32-bit).
21829 + *
21830 + * The job descriptor constructed by QI hardware has layout:
21831 + *
21832 + * HEADER (1 word)
21833 + * Shdesc ptr (1 or 2 words)
21834 + * SEQ_OUT_PTR (1 word)
21835 + * Out ptr (1 or 2 words)
21836 + * Out length (1 word)
21837 + * SEQ_IN_PTR (1 word)
21838 + * In ptr (1 or 2 words)
21839 + * In length (1 word)
21840 + *
21841 + * The shdesc ptr is used to fetch shared descriptor contents
21842 + * into deco buffer.
21843 + *
21844 + * Apart from shdesc contents, the total number of words that
21845 + * get loaded in deco buffer are '8' or '11'. The remaining words
21846 + * in deco buffer can be used for storing shared descriptor.
21847 + */
21848 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
21849 +
21850 +/* Length of a single buffer in the QI driver memory cache */
21851 +#define CAAM_QI_MEMCACHE_SIZE 512
21852 +
21853 +/*
21854 + * aead_edesc - s/w-extended aead descriptor
21855 + * @src_nents: number of segments in input scatterlist
21856 + * @dst_nents: number of segments in output scatterlist
21857 + * @iv_dma: dma address of iv for checking continuity and link table
21858 + * @qm_sg_bytes: length of dma mapped h/w link table
21859 + * @qm_sg_dma: bus physical mapped address of h/w link table
21860 + * @assoclen: associated data length, in CAAM endianness
21861 + * @assoclen_dma: bus physical mapped address of req->assoclen
21862 + * @sgt: the h/w link table
21863 + */
21864 +struct aead_edesc {
21865 + int src_nents;
21866 + int dst_nents;
21867 + dma_addr_t iv_dma;
21868 + int qm_sg_bytes;
21869 + dma_addr_t qm_sg_dma;
21870 + unsigned int assoclen;
21871 + dma_addr_t assoclen_dma;
21872 +#define CAAM_QI_MAX_AEAD_SG \
21873 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
21874 + sizeof(struct dpaa2_sg_entry))
21875 + struct dpaa2_sg_entry sgt[0];
21876 +};
21877 +
21878 +/*
21879 + * tls_edesc - s/w-extended tls descriptor
21880 + * @src_nents: number of segments in input scatterlist
21881 + * @dst_nents: number of segments in output scatterlist
21882 + * @iv_dma: dma address of iv for checking continuity and link table
21883 + * @qm_sg_bytes: length of dma mapped h/w link table
21884 + * @qm_sg_dma: bus physical mapped address of h/w link table
21885 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
21886 + * @dst: pointer to output scatterlist, usefull for unmapping
21887 + * @sgt: the h/w link table
21888 + */
21889 +struct tls_edesc {
21890 + int src_nents;
21891 + int dst_nents;
21892 + dma_addr_t iv_dma;
21893 + int qm_sg_bytes;
21894 + dma_addr_t qm_sg_dma;
21895 + struct scatterlist tmp[2];
21896 + struct scatterlist *dst;
21897 + struct dpaa2_sg_entry sgt[0];
21898 +};
21899 +
21900 +/*
21901 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
21902 + * @src_nents: number of segments in input scatterlist
21903 + * @dst_nents: number of segments in output scatterlist
21904 + * @iv_dma: dma address of iv for checking continuity and link table
21905 + * @qm_sg_bytes: length of dma mapped qm_sg space
21906 + * @qm_sg_dma: I/O virtual address of h/w link table
21907 + * @sgt: the h/w link table
21908 + */
21909 +struct ablkcipher_edesc {
21910 + int src_nents;
21911 + int dst_nents;
21912 + dma_addr_t iv_dma;
21913 + int qm_sg_bytes;
21914 + dma_addr_t qm_sg_dma;
21915 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
21916 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
21917 + sizeof(struct dpaa2_sg_entry))
21918 + struct dpaa2_sg_entry sgt[0];
21919 +};
21920 +
21921 +/*
21922 + * ahash_edesc - s/w-extended ahash descriptor
21923 + * @dst_dma: I/O virtual address of req->result
21924 + * @qm_sg_dma: I/O virtual address of h/w link table
21925 + * @src_nents: number of segments in input scatterlist
21926 + * @qm_sg_bytes: length of dma mapped qm_sg space
21927 + * @sgt: pointer to h/w link table
21928 + */
21929 +struct ahash_edesc {
21930 + dma_addr_t dst_dma;
21931 + dma_addr_t qm_sg_dma;
21932 + int src_nents;
21933 + int qm_sg_bytes;
21934 + struct dpaa2_sg_entry sgt[0];
21935 +};
21936 +
21937 +/**
21938 + * caam_flc - Flow Context (FLC)
21939 + * @flc: Flow Context options
21940 + * @sh_desc: Shared Descriptor
21941 + */
21942 +struct caam_flc {
21943 + u32 flc[16];
21944 + u32 sh_desc[MAX_SDLEN];
21945 +} ____cacheline_aligned;
21946 +
21947 +enum optype {
21948 + ENCRYPT = 0,
21949 + DECRYPT,
21950 + GIVENCRYPT,
21951 + NUM_OP
21952 +};
21953 +
21954 +/**
21955 + * caam_request - the request structure the driver application should fill while
21956 + * submitting a job to driver.
21957 + * @fd_flt: Frame list table defining input and output
21958 + * fd_flt[0] - FLE pointing to output buffer
21959 + * fd_flt[1] - FLE pointing to input buffer
21960 + * @fd_flt_dma: DMA address for the frame list table
21961 + * @flc: Flow Context
21962 + * @flc_dma: I/O virtual address of Flow Context
21963 + * @op_type: operation type
21964 + * @cbk: Callback function to invoke when job is completed
21965 + * @ctx: arbit context attached with request by the application
21966 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
21967 + */
21968 +struct caam_request {
21969 + struct dpaa2_fl_entry fd_flt[2];
21970 + dma_addr_t fd_flt_dma;
21971 + struct caam_flc *flc;
21972 + dma_addr_t flc_dma;
21973 + enum optype op_type;
21974 + void (*cbk)(void *ctx, u32 err);
21975 + void *ctx;
21976 + void *edesc;
21977 +};
21978 +
21979 +/**
21980 + * dpaa2_caam_enqueue() - enqueue a crypto request
21981 + * @dev: device associated with the DPSECI object
21982 + * @req: pointer to caam_request
21983 + */
21984 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
21985 +
21986 +#endif /* _CAAMALG_QI2_H_ */
21987 --- a/drivers/crypto/caam/caamhash.c
21988 +++ b/drivers/crypto/caam/caamhash.c
21989 @@ -62,6 +62,7 @@
21990 #include "error.h"
21991 #include "sg_sw_sec4.h"
21992 #include "key_gen.h"
21993 +#include "caamhash_desc.h"
21994
21995 #define CAAM_CRA_PRIORITY 3000
21996
21997 @@ -71,14 +72,6 @@
21998 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
21999 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
22000
22001 -/* length of descriptors text */
22002 -#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
22003 -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
22004 -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
22005 -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
22006 -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
22007 -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
22008 -
22009 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
22010 CAAM_MAX_HASH_KEY_SIZE)
22011 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
22012 @@ -103,20 +96,15 @@ struct caam_hash_ctx {
22013 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
22014 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
22015 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
22016 - u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
22017 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
22018 dma_addr_t sh_desc_update_first_dma;
22019 dma_addr_t sh_desc_fin_dma;
22020 dma_addr_t sh_desc_digest_dma;
22021 - dma_addr_t sh_desc_finup_dma;
22022 + enum dma_data_direction dir;
22023 struct device *jrdev;
22024 - u32 alg_type;
22025 - u32 alg_op;
22026 u8 key[CAAM_MAX_HASH_KEY_SIZE];
22027 - dma_addr_t key_dma;
22028 int ctx_len;
22029 - unsigned int split_key_len;
22030 - unsigned int split_key_pad_len;
22031 + struct alginfo adata;
22032 };
22033
22034 /* ahash state */
22035 @@ -143,6 +131,31 @@ struct caam_export_state {
22036 int (*finup)(struct ahash_request *req);
22037 };
22038
22039 +static inline void switch_buf(struct caam_hash_state *state)
22040 +{
22041 + state->current_buf ^= 1;
22042 +}
22043 +
22044 +static inline u8 *current_buf(struct caam_hash_state *state)
22045 +{
22046 + return state->current_buf ? state->buf_1 : state->buf_0;
22047 +}
22048 +
22049 +static inline u8 *alt_buf(struct caam_hash_state *state)
22050 +{
22051 + return state->current_buf ? state->buf_0 : state->buf_1;
22052 +}
22053 +
22054 +static inline int *current_buflen(struct caam_hash_state *state)
22055 +{
22056 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
22057 +}
22058 +
22059 +static inline int *alt_buflen(struct caam_hash_state *state)
22060 +{
22061 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
22062 +}
22063 +
22064 /* Common job descriptor seq in/out ptr routines */
22065
22066 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
22067 @@ -175,40 +188,31 @@ static inline dma_addr_t map_seq_out_ptr
22068 return dst_dma;
22069 }
22070
22071 -/* Map current buffer in state and put it in link table */
22072 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
22073 - struct sec4_sg_entry *sec4_sg,
22074 - u8 *buf, int buflen)
22075 +/* Map current buffer in state (if length > 0) and put it in link table */
22076 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
22077 + struct sec4_sg_entry *sec4_sg,
22078 + struct caam_hash_state *state)
22079 {
22080 - dma_addr_t buf_dma;
22081 + int buflen = *current_buflen(state);
22082
22083 - buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
22084 - dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
22085 + if (!buflen)
22086 + return 0;
22087
22088 - return buf_dma;
22089 -}
22090 + state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
22091 + DMA_TO_DEVICE);
22092 + if (dma_mapping_error(jrdev, state->buf_dma)) {
22093 + dev_err(jrdev, "unable to map buf\n");
22094 + state->buf_dma = 0;
22095 + return -ENOMEM;
22096 + }
22097
22098 -/*
22099 - * Only put buffer in link table if it contains data, which is possible,
22100 - * since a buffer has previously been used, and needs to be unmapped,
22101 - */
22102 -static inline dma_addr_t
22103 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
22104 - u8 *buf, dma_addr_t buf_dma, int buflen,
22105 - int last_buflen)
22106 -{
22107 - if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
22108 - dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
22109 - if (buflen)
22110 - buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
22111 - else
22112 - buf_dma = 0;
22113 + dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
22114
22115 - return buf_dma;
22116 + return 0;
22117 }
22118
22119 /* Map state->caam_ctx, and add it to link table */
22120 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
22121 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
22122 struct caam_hash_state *state, int ctx_len,
22123 struct sec4_sg_entry *sec4_sg, u32 flag)
22124 {
22125 @@ -224,124 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
22126 return 0;
22127 }
22128
22129 -/* Common shared descriptor commands */
22130 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
22131 -{
22132 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
22133 - ctx->split_key_len, CLASS_2 |
22134 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
22135 -}
22136 -
22137 -/* Append key if it has been set */
22138 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
22139 -{
22140 - u32 *key_jump_cmd;
22141 -
22142 - init_sh_desc(desc, HDR_SHARE_SERIAL);
22143 -
22144 - if (ctx->split_key_len) {
22145 - /* Skip if already shared */
22146 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
22147 - JUMP_COND_SHRD);
22148 -
22149 - append_key_ahash(desc, ctx);
22150 -
22151 - set_jump_tgt_here(desc, key_jump_cmd);
22152 - }
22153 -
22154 - /* Propagate errors from shared to job descriptor */
22155 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
22156 -}
22157 -
22158 -/*
22159 - * For ahash read data from seqin following state->caam_ctx,
22160 - * and write resulting class2 context to seqout, which may be state->caam_ctx
22161 - * or req->result
22162 - */
22163 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
22164 -{
22165 - /* Calculate remaining bytes to read */
22166 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
22167 -
22168 - /* Read remaining bytes */
22169 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
22170 - FIFOLD_TYPE_MSG | KEY_VLF);
22171 -
22172 - /* Store class2 context bytes */
22173 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
22174 - LDST_SRCDST_BYTE_CONTEXT);
22175 -}
22176 -
22177 -/*
22178 - * For ahash update, final and finup, import context, read and write to seqout
22179 - */
22180 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
22181 - int digestsize,
22182 - struct caam_hash_ctx *ctx)
22183 -{
22184 - init_sh_desc_key_ahash(desc, ctx);
22185 -
22186 - /* Import context from software */
22187 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
22188 - LDST_CLASS_2_CCB | ctx->ctx_len);
22189 -
22190 - /* Class 2 operation */
22191 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
22192 -
22193 - /*
22194 - * Load from buf and/or src and write to req->result or state->context
22195 - */
22196 - ahash_append_load_str(desc, digestsize);
22197 -}
22198 -
22199 -/* For ahash firsts and digest, read and write to seqout */
22200 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
22201 - int digestsize, struct caam_hash_ctx *ctx)
22202 -{
22203 - init_sh_desc_key_ahash(desc, ctx);
22204 -
22205 - /* Class 2 operation */
22206 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
22207 -
22208 - /*
22209 - * Load from buf and/or src and write to req->result or state->context
22210 - */
22211 - ahash_append_load_str(desc, digestsize);
22212 -}
22213 -
22214 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
22215 {
22216 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22217 int digestsize = crypto_ahash_digestsize(ahash);
22218 struct device *jrdev = ctx->jrdev;
22219 - u32 have_key = 0;
22220 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
22221 u32 *desc;
22222
22223 - if (ctx->split_key_len)
22224 - have_key = OP_ALG_AAI_HMAC_PRECOMP;
22225 + ctx->adata.key_virt = ctx->key;
22226
22227 /* ahash_update shared descriptor */
22228 desc = ctx->sh_desc_update;
22229 -
22230 - init_sh_desc(desc, HDR_SHARE_SERIAL);
22231 -
22232 - /* Import context from software */
22233 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
22234 - LDST_CLASS_2_CCB | ctx->ctx_len);
22235 -
22236 - /* Class 2 operation */
22237 - append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
22238 - OP_ALG_ENCRYPT);
22239 -
22240 - /* Load data and write to result or context */
22241 - ahash_append_load_str(desc, ctx->ctx_len);
22242 -
22243 - ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
22244 - DMA_TO_DEVICE);
22245 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
22246 - dev_err(jrdev, "unable to map shared descriptor\n");
22247 - return -ENOMEM;
22248 - }
22249 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
22250 + ctx->ctx_len, true, ctrlpriv->era);
22251 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
22252 + desc_bytes(desc), ctx->dir);
22253 #ifdef DEBUG
22254 print_hex_dump(KERN_ERR,
22255 "ahash update shdesc@"__stringify(__LINE__)": ",
22256 @@ -350,17 +252,10 @@ static int ahash_set_sh_desc(struct cryp
22257
22258 /* ahash_update_first shared descriptor */
22259 desc = ctx->sh_desc_update_first;
22260 -
22261 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
22262 - ctx->ctx_len, ctx);
22263 -
22264 - ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
22265 - desc_bytes(desc),
22266 - DMA_TO_DEVICE);
22267 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
22268 - dev_err(jrdev, "unable to map shared descriptor\n");
22269 - return -ENOMEM;
22270 - }
22271 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
22272 + ctx->ctx_len, false, ctrlpriv->era);
22273 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
22274 + desc_bytes(desc), ctx->dir);
22275 #ifdef DEBUG
22276 print_hex_dump(KERN_ERR,
22277 "ahash update first shdesc@"__stringify(__LINE__)": ",
22278 @@ -369,53 +264,22 @@ static int ahash_set_sh_desc(struct cryp
22279
22280 /* ahash_final shared descriptor */
22281 desc = ctx->sh_desc_fin;
22282 -
22283 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
22284 - OP_ALG_AS_FINALIZE, digestsize, ctx);
22285 -
22286 - ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
22287 - DMA_TO_DEVICE);
22288 - if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
22289 - dev_err(jrdev, "unable to map shared descriptor\n");
22290 - return -ENOMEM;
22291 - }
22292 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
22293 + ctx->ctx_len, true, ctrlpriv->era);
22294 + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
22295 + desc_bytes(desc), ctx->dir);
22296 #ifdef DEBUG
22297 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
22298 DUMP_PREFIX_ADDRESS, 16, 4, desc,
22299 desc_bytes(desc), 1);
22300 #endif
22301
22302 - /* ahash_finup shared descriptor */
22303 - desc = ctx->sh_desc_finup;
22304 -
22305 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
22306 - OP_ALG_AS_FINALIZE, digestsize, ctx);
22307 -
22308 - ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
22309 - DMA_TO_DEVICE);
22310 - if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
22311 - dev_err(jrdev, "unable to map shared descriptor\n");
22312 - return -ENOMEM;
22313 - }
22314 -#ifdef DEBUG
22315 - print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
22316 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
22317 - desc_bytes(desc), 1);
22318 -#endif
22319 -
22320 /* ahash_digest shared descriptor */
22321 desc = ctx->sh_desc_digest;
22322 -
22323 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
22324 - digestsize, ctx);
22325 -
22326 - ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
22327 - desc_bytes(desc),
22328 - DMA_TO_DEVICE);
22329 - if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
22330 - dev_err(jrdev, "unable to map shared descriptor\n");
22331 - return -ENOMEM;
22332 - }
22333 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
22334 + ctx->ctx_len, false, ctrlpriv->era);
22335 + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
22336 + desc_bytes(desc), ctx->dir);
22337 #ifdef DEBUG
22338 print_hex_dump(KERN_ERR,
22339 "ahash digest shdesc@"__stringify(__LINE__)": ",
22340 @@ -426,14 +290,6 @@ static int ahash_set_sh_desc(struct cryp
22341 return 0;
22342 }
22343
22344 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
22345 - u32 keylen)
22346 -{
22347 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
22348 - ctx->split_key_pad_len, key_in, keylen,
22349 - ctx->alg_op);
22350 -}
22351 -
22352 /* Digest hash size if it is too large */
22353 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
22354 u32 *keylen, u8 *key_out, u32 digestsize)
22355 @@ -469,7 +325,7 @@ static int hash_digest_key(struct caam_h
22356 }
22357
22358 /* Job descriptor to perform unkeyed hash on key_in */
22359 - append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
22360 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
22361 OP_ALG_AS_INITFINAL);
22362 append_seq_in_ptr(desc, src_dma, *keylen, 0);
22363 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
22364 @@ -513,12 +369,10 @@ static int hash_digest_key(struct caam_h
22365 static int ahash_setkey(struct crypto_ahash *ahash,
22366 const u8 *key, unsigned int keylen)
22367 {
22368 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
22369 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
22370 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22371 - struct device *jrdev = ctx->jrdev;
22372 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
22373 int digestsize = crypto_ahash_digestsize(ahash);
22374 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
22375 int ret;
22376 u8 *hashed_key = NULL;
22377
22378 @@ -539,43 +393,29 @@ static int ahash_setkey(struct crypto_ah
22379 key = hashed_key;
22380 }
22381
22382 - /* Pick class 2 key length from algorithm submask */
22383 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
22384 - OP_ALG_ALGSEL_SHIFT] * 2;
22385 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
22386 -
22387 -#ifdef DEBUG
22388 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
22389 - ctx->split_key_len, ctx->split_key_pad_len);
22390 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
22391 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
22392 -#endif
22393 + /*
22394 + * If DKP is supported, use it in the shared descriptor to generate
22395 + * the split key.
22396 + */
22397 + if (ctrlpriv->era >= 6) {
22398 + ctx->adata.key_inline = true;
22399 + ctx->adata.keylen = keylen;
22400 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
22401 + OP_ALG_ALGSEL_MASK);
22402
22403 - ret = gen_split_hash_key(ctx, key, keylen);
22404 - if (ret)
22405 - goto bad_free_key;
22406 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
22407 + goto bad_free_key;
22408
22409 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
22410 - DMA_TO_DEVICE);
22411 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
22412 - dev_err(jrdev, "unable to map key i/o memory\n");
22413 - ret = -ENOMEM;
22414 - goto error_free_key;
22415 + memcpy(ctx->key, key, keylen);
22416 + } else {
22417 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
22418 + keylen, CAAM_MAX_HASH_KEY_SIZE);
22419 + if (ret)
22420 + goto bad_free_key;
22421 }
22422 -#ifdef DEBUG
22423 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
22424 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
22425 - ctx->split_key_pad_len, 1);
22426 -#endif
22427
22428 - ret = ahash_set_sh_desc(ahash);
22429 - if (ret) {
22430 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
22431 - DMA_TO_DEVICE);
22432 - }
22433 - error_free_key:
22434 kfree(hashed_key);
22435 - return ret;
22436 + return ahash_set_sh_desc(ahash);
22437 bad_free_key:
22438 kfree(hashed_key);
22439 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
22440 @@ -604,6 +444,8 @@ static inline void ahash_unmap(struct de
22441 struct ahash_edesc *edesc,
22442 struct ahash_request *req, int dst_len)
22443 {
22444 + struct caam_hash_state *state = ahash_request_ctx(req);
22445 +
22446 if (edesc->src_nents)
22447 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
22448 if (edesc->dst_dma)
22449 @@ -612,6 +454,12 @@ static inline void ahash_unmap(struct de
22450 if (edesc->sec4_sg_bytes)
22451 dma_unmap_single(dev, edesc->sec4_sg_dma,
22452 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
22453 +
22454 + if (state->buf_dma) {
22455 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
22456 + DMA_TO_DEVICE);
22457 + state->buf_dma = 0;
22458 + }
22459 }
22460
22461 static inline void ahash_unmap_ctx(struct device *dev,
22462 @@ -643,8 +491,7 @@ static void ahash_done(struct device *jr
22463 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22464 #endif
22465
22466 - edesc = (struct ahash_edesc *)((char *)desc -
22467 - offsetof(struct ahash_edesc, hw_desc));
22468 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22469 if (err)
22470 caam_jr_strstatus(jrdev, err);
22471
22472 @@ -671,19 +518,19 @@ static void ahash_done_bi(struct device
22473 struct ahash_edesc *edesc;
22474 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22475 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22476 -#ifdef DEBUG
22477 struct caam_hash_state *state = ahash_request_ctx(req);
22478 +#ifdef DEBUG
22479 int digestsize = crypto_ahash_digestsize(ahash);
22480
22481 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22482 #endif
22483
22484 - edesc = (struct ahash_edesc *)((char *)desc -
22485 - offsetof(struct ahash_edesc, hw_desc));
22486 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22487 if (err)
22488 caam_jr_strstatus(jrdev, err);
22489
22490 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
22491 + switch_buf(state);
22492 kfree(edesc);
22493
22494 #ifdef DEBUG
22495 @@ -713,8 +560,7 @@ static void ahash_done_ctx_src(struct de
22496 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22497 #endif
22498
22499 - edesc = (struct ahash_edesc *)((char *)desc -
22500 - offsetof(struct ahash_edesc, hw_desc));
22501 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22502 if (err)
22503 caam_jr_strstatus(jrdev, err);
22504
22505 @@ -741,19 +587,19 @@ static void ahash_done_ctx_dst(struct de
22506 struct ahash_edesc *edesc;
22507 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22508 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22509 -#ifdef DEBUG
22510 struct caam_hash_state *state = ahash_request_ctx(req);
22511 +#ifdef DEBUG
22512 int digestsize = crypto_ahash_digestsize(ahash);
22513
22514 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22515 #endif
22516
22517 - edesc = (struct ahash_edesc *)((char *)desc -
22518 - offsetof(struct ahash_edesc, hw_desc));
22519 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22520 if (err)
22521 caam_jr_strstatus(jrdev, err);
22522
22523 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
22524 + switch_buf(state);
22525 kfree(edesc);
22526
22527 #ifdef DEBUG
22528 @@ -835,13 +681,12 @@ static int ahash_update_ctx(struct ahash
22529 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22530 struct caam_hash_state *state = ahash_request_ctx(req);
22531 struct device *jrdev = ctx->jrdev;
22532 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22533 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22534 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22535 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
22536 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
22537 - int *next_buflen = state->current_buf ? &state->buflen_0 :
22538 - &state->buflen_1, last_buflen;
22539 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22540 + GFP_KERNEL : GFP_ATOMIC;
22541 + u8 *buf = current_buf(state);
22542 + int *buflen = current_buflen(state);
22543 + u8 *next_buf = alt_buf(state);
22544 + int *next_buflen = alt_buflen(state), last_buflen;
22545 int in_len = *buflen + req->nbytes, to_hash;
22546 u32 *desc;
22547 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
22548 @@ -890,15 +735,14 @@ static int ahash_update_ctx(struct ahash
22549 edesc->src_nents = src_nents;
22550 edesc->sec4_sg_bytes = sec4_sg_bytes;
22551
22552 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22553 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22554 edesc->sec4_sg, DMA_BIDIRECTIONAL);
22555 if (ret)
22556 goto unmap_ctx;
22557
22558 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
22559 - edesc->sec4_sg + 1,
22560 - buf, state->buf_dma,
22561 - *buflen, last_buflen);
22562 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22563 + if (ret)
22564 + goto unmap_ctx;
22565
22566 if (mapped_nents) {
22567 sg_to_sec4_sg_last(req->src, mapped_nents,
22568 @@ -909,12 +753,10 @@ static int ahash_update_ctx(struct ahash
22569 to_hash - *buflen,
22570 *next_buflen, 0);
22571 } else {
22572 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
22573 - cpu_to_caam32(SEC4_SG_LEN_FIN);
22574 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
22575 + 1);
22576 }
22577
22578 - state->current_buf = !state->current_buf;
22579 -
22580 desc = edesc->hw_desc;
22581
22582 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22583 @@ -969,12 +811,9 @@ static int ahash_final_ctx(struct ahash_
22584 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22585 struct caam_hash_state *state = ahash_request_ctx(req);
22586 struct device *jrdev = ctx->jrdev;
22587 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22588 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22589 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22590 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22591 - int last_buflen = state->current_buf ? state->buflen_0 :
22592 - state->buflen_1;
22593 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22594 + GFP_KERNEL : GFP_ATOMIC;
22595 + int buflen = *current_buflen(state);
22596 u32 *desc;
22597 int sec4_sg_bytes, sec4_sg_src_index;
22598 int digestsize = crypto_ahash_digestsize(ahash);
22599 @@ -994,18 +833,17 @@ static int ahash_final_ctx(struct ahash_
22600 desc = edesc->hw_desc;
22601
22602 edesc->sec4_sg_bytes = sec4_sg_bytes;
22603 - edesc->src_nents = 0;
22604
22605 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22606 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22607 edesc->sec4_sg, DMA_TO_DEVICE);
22608 if (ret)
22609 goto unmap_ctx;
22610
22611 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
22612 - buf, state->buf_dma, buflen,
22613 - last_buflen);
22614 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
22615 - cpu_to_caam32(SEC4_SG_LEN_FIN);
22616 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22617 + if (ret)
22618 + goto unmap_ctx;
22619 +
22620 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
22621
22622 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22623 sec4_sg_bytes, DMA_TO_DEVICE);
22624 @@ -1048,12 +886,9 @@ static int ahash_finup_ctx(struct ahash_
22625 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22626 struct caam_hash_state *state = ahash_request_ctx(req);
22627 struct device *jrdev = ctx->jrdev;
22628 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22629 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22630 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22631 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22632 - int last_buflen = state->current_buf ? state->buflen_0 :
22633 - state->buflen_1;
22634 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22635 + GFP_KERNEL : GFP_ATOMIC;
22636 + int buflen = *current_buflen(state);
22637 u32 *desc;
22638 int sec4_sg_src_index;
22639 int src_nents, mapped_nents;
22640 @@ -1082,7 +917,7 @@ static int ahash_finup_ctx(struct ahash_
22641
22642 /* allocate space for base edesc and hw desc commands, link tables */
22643 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
22644 - ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
22645 + ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
22646 flags);
22647 if (!edesc) {
22648 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
22649 @@ -1093,14 +928,14 @@ static int ahash_finup_ctx(struct ahash_
22650
22651 edesc->src_nents = src_nents;
22652
22653 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22654 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22655 edesc->sec4_sg, DMA_TO_DEVICE);
22656 if (ret)
22657 goto unmap_ctx;
22658
22659 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
22660 - buf, state->buf_dma, buflen,
22661 - last_buflen);
22662 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22663 + if (ret)
22664 + goto unmap_ctx;
22665
22666 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
22667 sec4_sg_src_index, ctx->ctx_len + buflen,
22668 @@ -1136,15 +971,18 @@ static int ahash_digest(struct ahash_req
22669 {
22670 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22671 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22672 + struct caam_hash_state *state = ahash_request_ctx(req);
22673 struct device *jrdev = ctx->jrdev;
22674 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22675 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22676 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22677 + GFP_KERNEL : GFP_ATOMIC;
22678 u32 *desc;
22679 int digestsize = crypto_ahash_digestsize(ahash);
22680 int src_nents, mapped_nents;
22681 struct ahash_edesc *edesc;
22682 int ret;
22683
22684 + state->buf_dma = 0;
22685 +
22686 src_nents = sg_nents_for_len(req->src, req->nbytes);
22687 if (src_nents < 0) {
22688 dev_err(jrdev, "Invalid number of src SG.\n");
22689 @@ -1215,10 +1053,10 @@ static int ahash_final_no_ctx(struct aha
22690 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22691 struct caam_hash_state *state = ahash_request_ctx(req);
22692 struct device *jrdev = ctx->jrdev;
22693 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22694 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22695 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22696 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22697 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22698 + GFP_KERNEL : GFP_ATOMIC;
22699 + u8 *buf = current_buf(state);
22700 + int buflen = *current_buflen(state);
22701 u32 *desc;
22702 int digestsize = crypto_ahash_digestsize(ahash);
22703 struct ahash_edesc *edesc;
22704 @@ -1246,7 +1084,6 @@ static int ahash_final_no_ctx(struct aha
22705 dev_err(jrdev, "unable to map dst\n");
22706 goto unmap;
22707 }
22708 - edesc->src_nents = 0;
22709
22710 #ifdef DEBUG
22711 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
22712 @@ -1276,13 +1113,12 @@ static int ahash_update_no_ctx(struct ah
22713 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22714 struct caam_hash_state *state = ahash_request_ctx(req);
22715 struct device *jrdev = ctx->jrdev;
22716 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22717 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22718 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22719 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
22720 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
22721 - int *next_buflen = state->current_buf ? &state->buflen_0 :
22722 - &state->buflen_1;
22723 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22724 + GFP_KERNEL : GFP_ATOMIC;
22725 + u8 *buf = current_buf(state);
22726 + int *buflen = current_buflen(state);
22727 + u8 *next_buf = alt_buf(state);
22728 + int *next_buflen = alt_buflen(state);
22729 int in_len = *buflen + req->nbytes, to_hash;
22730 int sec4_sg_bytes, src_nents, mapped_nents;
22731 struct ahash_edesc *edesc;
22732 @@ -1329,10 +1165,11 @@ static int ahash_update_no_ctx(struct ah
22733
22734 edesc->src_nents = src_nents;
22735 edesc->sec4_sg_bytes = sec4_sg_bytes;
22736 - edesc->dst_dma = 0;
22737
22738 - state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
22739 - buf, *buflen);
22740 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
22741 + if (ret)
22742 + goto unmap_ctx;
22743 +
22744 sg_to_sec4_sg_last(req->src, mapped_nents,
22745 edesc->sec4_sg + 1, 0);
22746
22747 @@ -1342,8 +1179,6 @@ static int ahash_update_no_ctx(struct ah
22748 *next_buflen, 0);
22749 }
22750
22751 - state->current_buf = !state->current_buf;
22752 -
22753 desc = edesc->hw_desc;
22754
22755 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22756 @@ -1403,12 +1238,9 @@ static int ahash_finup_no_ctx(struct aha
22757 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22758 struct caam_hash_state *state = ahash_request_ctx(req);
22759 struct device *jrdev = ctx->jrdev;
22760 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22761 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22762 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22763 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22764 - int last_buflen = state->current_buf ? state->buflen_0 :
22765 - state->buflen_1;
22766 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22767 + GFP_KERNEL : GFP_ATOMIC;
22768 + int buflen = *current_buflen(state);
22769 u32 *desc;
22770 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
22771 int digestsize = crypto_ahash_digestsize(ahash);
22772 @@ -1450,9 +1282,9 @@ static int ahash_finup_no_ctx(struct aha
22773 edesc->src_nents = src_nents;
22774 edesc->sec4_sg_bytes = sec4_sg_bytes;
22775
22776 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
22777 - state->buf_dma, buflen,
22778 - last_buflen);
22779 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
22780 + if (ret)
22781 + goto unmap;
22782
22783 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
22784 req->nbytes);
22785 @@ -1496,11 +1328,10 @@ static int ahash_update_first(struct aha
22786 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22787 struct caam_hash_state *state = ahash_request_ctx(req);
22788 struct device *jrdev = ctx->jrdev;
22789 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22790 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22791 - u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
22792 - int *next_buflen = state->current_buf ?
22793 - &state->buflen_1 : &state->buflen_0;
22794 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22795 + GFP_KERNEL : GFP_ATOMIC;
22796 + u8 *next_buf = alt_buf(state);
22797 + int *next_buflen = alt_buflen(state);
22798 int to_hash;
22799 u32 *desc;
22800 int src_nents, mapped_nents;
22801 @@ -1545,7 +1376,6 @@ static int ahash_update_first(struct aha
22802 }
22803
22804 edesc->src_nents = src_nents;
22805 - edesc->dst_dma = 0;
22806
22807 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
22808 to_hash);
22809 @@ -1582,6 +1412,7 @@ static int ahash_update_first(struct aha
22810 state->final = ahash_final_no_ctx;
22811 scatterwalk_map_and_copy(next_buf, req->src, 0,
22812 req->nbytes, 0);
22813 + switch_buf(state);
22814 }
22815 #ifdef DEBUG
22816 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
22817 @@ -1688,7 +1519,6 @@ struct caam_hash_template {
22818 unsigned int blocksize;
22819 struct ahash_alg template_ahash;
22820 u32 alg_type;
22821 - u32 alg_op;
22822 };
22823
22824 /* ahash descriptors */
22825 @@ -1714,7 +1544,6 @@ static struct caam_hash_template driver_
22826 },
22827 },
22828 .alg_type = OP_ALG_ALGSEL_SHA1,
22829 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
22830 }, {
22831 .name = "sha224",
22832 .driver_name = "sha224-caam",
22833 @@ -1736,7 +1565,6 @@ static struct caam_hash_template driver_
22834 },
22835 },
22836 .alg_type = OP_ALG_ALGSEL_SHA224,
22837 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
22838 }, {
22839 .name = "sha256",
22840 .driver_name = "sha256-caam",
22841 @@ -1758,7 +1586,6 @@ static struct caam_hash_template driver_
22842 },
22843 },
22844 .alg_type = OP_ALG_ALGSEL_SHA256,
22845 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
22846 }, {
22847 .name = "sha384",
22848 .driver_name = "sha384-caam",
22849 @@ -1780,7 +1607,6 @@ static struct caam_hash_template driver_
22850 },
22851 },
22852 .alg_type = OP_ALG_ALGSEL_SHA384,
22853 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
22854 }, {
22855 .name = "sha512",
22856 .driver_name = "sha512-caam",
22857 @@ -1802,7 +1628,6 @@ static struct caam_hash_template driver_
22858 },
22859 },
22860 .alg_type = OP_ALG_ALGSEL_SHA512,
22861 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
22862 }, {
22863 .name = "md5",
22864 .driver_name = "md5-caam",
22865 @@ -1824,14 +1649,12 @@ static struct caam_hash_template driver_
22866 },
22867 },
22868 .alg_type = OP_ALG_ALGSEL_MD5,
22869 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
22870 },
22871 };
22872
22873 struct caam_hash_alg {
22874 struct list_head entry;
22875 int alg_type;
22876 - int alg_op;
22877 struct ahash_alg ahash_alg;
22878 };
22879
22880 @@ -1853,6 +1676,8 @@ static int caam_hash_cra_init(struct cry
22881 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
22882 HASH_MSG_LEN + 64,
22883 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
22884 + dma_addr_t dma_addr;
22885 + struct caam_drv_private *priv;
22886
22887 /*
22888 * Get a Job ring from Job Ring driver to ensure in-order
22889 @@ -1863,11 +1688,34 @@ static int caam_hash_cra_init(struct cry
22890 pr_err("Job Ring Device allocation for transform failed\n");
22891 return PTR_ERR(ctx->jrdev);
22892 }
22893 +
22894 + priv = dev_get_drvdata(ctx->jrdev->parent);
22895 + ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
22896 +
22897 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
22898 + offsetof(struct caam_hash_ctx,
22899 + sh_desc_update_dma),
22900 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
22901 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
22902 + dev_err(ctx->jrdev, "unable to map shared descriptors\n");
22903 + caam_jr_free(ctx->jrdev);
22904 + return -ENOMEM;
22905 + }
22906 +
22907 + ctx->sh_desc_update_dma = dma_addr;
22908 + ctx->sh_desc_update_first_dma = dma_addr +
22909 + offsetof(struct caam_hash_ctx,
22910 + sh_desc_update_first);
22911 + ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
22912 + sh_desc_fin);
22913 + ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
22914 + sh_desc_digest);
22915 +
22916 /* copy descriptor header template value */
22917 - ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
22918 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
22919 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
22920
22921 - ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
22922 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
22923 + OP_ALG_ALGSEL_SUBMASK) >>
22924 OP_ALG_ALGSEL_SHIFT];
22925
22926 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
22927 @@ -1879,30 +1727,10 @@ static void caam_hash_cra_exit(struct cr
22928 {
22929 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
22930
22931 - if (ctx->sh_desc_update_dma &&
22932 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
22933 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
22934 - desc_bytes(ctx->sh_desc_update),
22935 - DMA_TO_DEVICE);
22936 - if (ctx->sh_desc_update_first_dma &&
22937 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
22938 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
22939 - desc_bytes(ctx->sh_desc_update_first),
22940 - DMA_TO_DEVICE);
22941 - if (ctx->sh_desc_fin_dma &&
22942 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
22943 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
22944 - desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
22945 - if (ctx->sh_desc_digest_dma &&
22946 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
22947 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
22948 - desc_bytes(ctx->sh_desc_digest),
22949 - DMA_TO_DEVICE);
22950 - if (ctx->sh_desc_finup_dma &&
22951 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
22952 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
22953 - desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
22954 -
22955 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
22956 + offsetof(struct caam_hash_ctx,
22957 + sh_desc_update_dma),
22958 + ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
22959 caam_jr_free(ctx->jrdev);
22960 }
22961
22962 @@ -1961,7 +1789,6 @@ caam_hash_alloc(struct caam_hash_templat
22963 alg->cra_type = &crypto_ahash_type;
22964
22965 t_alg->alg_type = template->alg_type;
22966 - t_alg->alg_op = template->alg_op;
22967
22968 return t_alg;
22969 }
22970 --- /dev/null
22971 +++ b/drivers/crypto/caam/caamhash_desc.c
22972 @@ -0,0 +1,108 @@
22973 +/*
22974 + * Shared descriptors for ahash algorithms
22975 + *
22976 + * Copyright 2017 NXP
22977 + *
22978 + * Redistribution and use in source and binary forms, with or without
22979 + * modification, are permitted provided that the following conditions are met:
22980 + * * Redistributions of source code must retain the above copyright
22981 + * notice, this list of conditions and the following disclaimer.
22982 + * * Redistributions in binary form must reproduce the above copyright
22983 + * notice, this list of conditions and the following disclaimer in the
22984 + * documentation and/or other materials provided with the distribution.
22985 + * * Neither the names of the above-listed copyright holders nor the
22986 + * names of any contributors may be used to endorse or promote products
22987 + * derived from this software without specific prior written permission.
22988 + *
22989 + *
22990 + * ALTERNATIVELY, this software may be distributed under the terms of the
22991 + * GNU General Public License ("GPL") as published by the Free Software
22992 + * Foundation, either version 2 of that License or (at your option) any
22993 + * later version.
22994 + *
22995 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22996 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22997 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22998 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22999 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23000 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23001 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23002 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23003 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23004 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23005 + * POSSIBILITY OF SUCH DAMAGE.
23006 + */
23007 +
23008 +#include "compat.h"
23009 +#include "desc_constr.h"
23010 +#include "caamhash_desc.h"
23011 +
23012 +/**
23013 + * cnstr_shdsc_ahash - ahash shared descriptor
23014 + * @desc: pointer to buffer used for descriptor construction
23015 + * @adata: pointer to authentication transform definitions.
23016 + * A split key is required for SEC Era < 6; the size of the split key
23017 + * is specified in this case.
23018 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
23019 + * SHA256, SHA384, SHA512}.
23020 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
23021 + * @digestsize: algorithm's digest size
23022 + * @ctx_len: size of Context Register
23023 + * @import_ctx: true if previous Context Register needs to be restored
23024 + * must be true for ahash update and final
23025 + * must be false for for ahash first and digest
23026 + * @era: SEC Era
23027 + */
23028 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
23029 + int digestsize, int ctx_len, bool import_ctx, int era)
23030 +{
23031 + u32 op = adata->algtype;
23032 +
23033 + init_sh_desc(desc, HDR_SHARE_SERIAL);
23034 +
23035 + /* Append key if it has been set; ahash update excluded */
23036 + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
23037 + u32 *skip_key_load;
23038 +
23039 + /* Skip key loading if already shared */
23040 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
23041 + JUMP_COND_SHRD);
23042 +
23043 + if (era < 6)
23044 + append_key_as_imm(desc, adata->key_virt,
23045 + adata->keylen_pad,
23046 + adata->keylen, CLASS_2 |
23047 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
23048 + else
23049 + append_proto_dkp(desc, adata);
23050 +
23051 + set_jump_tgt_here(desc, skip_key_load);
23052 +
23053 + op |= OP_ALG_AAI_HMAC_PRECOMP;
23054 + }
23055 +
23056 + /* If needed, import context from software */
23057 + if (import_ctx)
23058 + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
23059 + LDST_SRCDST_BYTE_CONTEXT);
23060 +
23061 + /* Class 2 operation */
23062 + append_operation(desc, op | state | OP_ALG_ENCRYPT);
23063 +
23064 + /*
23065 + * Load from buf and/or src and write to req->result or state->context
23066 + * Calculate remaining bytes to read
23067 + */
23068 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
23069 + /* Read remaining bytes */
23070 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
23071 + FIFOLD_TYPE_MSG | KEY_VLF);
23072 + /* Store class2 context bytes */
23073 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
23074 + LDST_SRCDST_BYTE_CONTEXT);
23075 +}
23076 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
23077 +
23078 +MODULE_LICENSE("Dual BSD/GPL");
23079 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
23080 +MODULE_AUTHOR("NXP Semiconductors");
23081 --- /dev/null
23082 +++ b/drivers/crypto/caam/caamhash_desc.h
23083 @@ -0,0 +1,49 @@
23084 +/*
23085 + * Shared descriptors for ahash algorithms
23086 + *
23087 + * Copyright 2017 NXP
23088 + *
23089 + * Redistribution and use in source and binary forms, with or without
23090 + * modification, are permitted provided that the following conditions are met:
23091 + * * Redistributions of source code must retain the above copyright
23092 + * notice, this list of conditions and the following disclaimer.
23093 + * * Redistributions in binary form must reproduce the above copyright
23094 + * notice, this list of conditions and the following disclaimer in the
23095 + * documentation and/or other materials provided with the distribution.
23096 + * * Neither the names of the above-listed copyright holders nor the
23097 + * names of any contributors may be used to endorse or promote products
23098 + * derived from this software without specific prior written permission.
23099 + *
23100 + *
23101 + * ALTERNATIVELY, this software may be distributed under the terms of the
23102 + * GNU General Public License ("GPL") as published by the Free Software
23103 + * Foundation, either version 2 of that License or (at your option) any
23104 + * later version.
23105 + *
23106 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23107 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23108 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23109 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
23110 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23111 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23112 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23113 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23114 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
23115 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
23116 + * POSSIBILITY OF SUCH DAMAGE.
23117 + */
23118 +
23119 +#ifndef _CAAMHASH_DESC_H_
23120 +#define _CAAMHASH_DESC_H_
23121 +
23122 +/* length of descriptors text */
23123 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
23124 +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
23125 +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
23126 +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
23127 +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
23128 +
23129 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
23130 + int digestsize, int ctx_len, bool import_ctx, int era);
23131 +
23132 +#endif /* _CAAMHASH_DESC_H_ */
23133 --- a/drivers/crypto/caam/caampkc.c
23134 +++ b/drivers/crypto/caam/caampkc.c
23135 @@ -18,6 +18,10 @@
23136 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
23137 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
23138 sizeof(struct rsa_priv_f1_pdb))
23139 +#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
23140 + sizeof(struct rsa_priv_f2_pdb))
23141 +#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
23142 + sizeof(struct rsa_priv_f3_pdb))
23143
23144 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
23145 struct akcipher_request *req)
23146 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct dev
23147 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
23148 }
23149
23150 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
23151 + struct akcipher_request *req)
23152 +{
23153 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23154 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23155 + struct caam_rsa_key *key = &ctx->key;
23156 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
23157 + size_t p_sz = key->p_sz;
23158 + size_t q_sz = key->p_sz;
23159 +
23160 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
23161 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
23162 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
23163 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
23164 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
23165 +}
23166 +
23167 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
23168 + struct akcipher_request *req)
23169 +{
23170 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23171 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23172 + struct caam_rsa_key *key = &ctx->key;
23173 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
23174 + size_t p_sz = key->p_sz;
23175 + size_t q_sz = key->p_sz;
23176 +
23177 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
23178 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
23179 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
23180 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
23181 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
23182 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
23183 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
23184 +}
23185 +
23186 /* RSA Job Completion handler */
23187 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
23188 {
23189 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct devi
23190 akcipher_request_complete(req, err);
23191 }
23192
23193 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
23194 + void *context)
23195 +{
23196 + struct akcipher_request *req = context;
23197 + struct rsa_edesc *edesc;
23198 +
23199 + if (err)
23200 + caam_jr_strstatus(dev, err);
23201 +
23202 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
23203 +
23204 + rsa_priv_f2_unmap(dev, edesc, req);
23205 + rsa_io_unmap(dev, edesc, req);
23206 + kfree(edesc);
23207 +
23208 + akcipher_request_complete(req, err);
23209 +}
23210 +
23211 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
23212 + void *context)
23213 +{
23214 + struct akcipher_request *req = context;
23215 + struct rsa_edesc *edesc;
23216 +
23217 + if (err)
23218 + caam_jr_strstatus(dev, err);
23219 +
23220 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
23221 +
23222 + rsa_priv_f3_unmap(dev, edesc, req);
23223 + rsa_io_unmap(dev, edesc, req);
23224 + kfree(edesc);
23225 +
23226 + akcipher_request_complete(req, err);
23227 +}
23228 +
23229 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
23230 size_t desclen)
23231 {
23232 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc
23233 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23234 struct device *dev = ctx->dev;
23235 struct rsa_edesc *edesc;
23236 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
23237 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
23238 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
23239 + GFP_KERNEL : GFP_ATOMIC;
23240 int sgc;
23241 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
23242 int src_nents, dst_nents;
23243 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct ak
23244 return 0;
23245 }
23246
23247 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
23248 + struct rsa_edesc *edesc)
23249 +{
23250 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23251 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23252 + struct caam_rsa_key *key = &ctx->key;
23253 + struct device *dev = ctx->dev;
23254 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
23255 + int sec4_sg_index = 0;
23256 + size_t p_sz = key->p_sz;
23257 + size_t q_sz = key->p_sz;
23258 +
23259 + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
23260 + if (dma_mapping_error(dev, pdb->d_dma)) {
23261 + dev_err(dev, "Unable to map RSA private exponent memory\n");
23262 + return -ENOMEM;
23263 + }
23264 +
23265 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
23266 + if (dma_mapping_error(dev, pdb->p_dma)) {
23267 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
23268 + goto unmap_d;
23269 + }
23270 +
23271 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
23272 + if (dma_mapping_error(dev, pdb->q_dma)) {
23273 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
23274 + goto unmap_p;
23275 + }
23276 +
23277 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
23278 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
23279 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
23280 + goto unmap_q;
23281 + }
23282 +
23283 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
23284 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
23285 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
23286 + goto unmap_tmp1;
23287 + }
23288 +
23289 + if (edesc->src_nents > 1) {
23290 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
23291 + pdb->g_dma = edesc->sec4_sg_dma;
23292 + sec4_sg_index += edesc->src_nents;
23293 + } else {
23294 + pdb->g_dma = sg_dma_address(req->src);
23295 + }
23296 +
23297 + if (edesc->dst_nents > 1) {
23298 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
23299 + pdb->f_dma = edesc->sec4_sg_dma +
23300 + sec4_sg_index * sizeof(struct sec4_sg_entry);
23301 + } else {
23302 + pdb->f_dma = sg_dma_address(req->dst);
23303 + }
23304 +
23305 + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
23306 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
23307 +
23308 + return 0;
23309 +
23310 +unmap_tmp1:
23311 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
23312 +unmap_q:
23313 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
23314 +unmap_p:
23315 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
23316 +unmap_d:
23317 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
23318 +
23319 + return -ENOMEM;
23320 +}
23321 +
23322 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
23323 + struct rsa_edesc *edesc)
23324 +{
23325 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23326 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23327 + struct caam_rsa_key *key = &ctx->key;
23328 + struct device *dev = ctx->dev;
23329 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
23330 + int sec4_sg_index = 0;
23331 + size_t p_sz = key->p_sz;
23332 + size_t q_sz = key->p_sz;
23333 +
23334 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
23335 + if (dma_mapping_error(dev, pdb->p_dma)) {
23336 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
23337 + return -ENOMEM;
23338 + }
23339 +
23340 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
23341 + if (dma_mapping_error(dev, pdb->q_dma)) {
23342 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
23343 + goto unmap_p;
23344 + }
23345 +
23346 + pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
23347 + if (dma_mapping_error(dev, pdb->dp_dma)) {
23348 + dev_err(dev, "Unable to map RSA exponent dp memory\n");
23349 + goto unmap_q;
23350 + }
23351 +
23352 + pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
23353 + if (dma_mapping_error(dev, pdb->dq_dma)) {
23354 + dev_err(dev, "Unable to map RSA exponent dq memory\n");
23355 + goto unmap_dp;
23356 + }
23357 +
23358 + pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
23359 + if (dma_mapping_error(dev, pdb->c_dma)) {
23360 + dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
23361 + goto unmap_dq;
23362 + }
23363 +
23364 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
23365 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
23366 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
23367 + goto unmap_qinv;
23368 + }
23369 +
23370 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
23371 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
23372 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
23373 + goto unmap_tmp1;
23374 + }
23375 +
23376 + if (edesc->src_nents > 1) {
23377 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
23378 + pdb->g_dma = edesc->sec4_sg_dma;
23379 + sec4_sg_index += edesc->src_nents;
23380 + } else {
23381 + pdb->g_dma = sg_dma_address(req->src);
23382 + }
23383 +
23384 + if (edesc->dst_nents > 1) {
23385 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
23386 + pdb->f_dma = edesc->sec4_sg_dma +
23387 + sec4_sg_index * sizeof(struct sec4_sg_entry);
23388 + } else {
23389 + pdb->f_dma = sg_dma_address(req->dst);
23390 + }
23391 +
23392 + pdb->sgf |= key->n_sz;
23393 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
23394 +
23395 + return 0;
23396 +
23397 +unmap_tmp1:
23398 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
23399 +unmap_qinv:
23400 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
23401 +unmap_dq:
23402 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
23403 +unmap_dp:
23404 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
23405 +unmap_q:
23406 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
23407 +unmap_p:
23408 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
23409 +
23410 + return -ENOMEM;
23411 +}
23412 +
23413 static int caam_rsa_enc(struct akcipher_request *req)
23414 {
23415 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23416 @@ -301,24 +543,14 @@ init_fail:
23417 return ret;
23418 }
23419
23420 -static int caam_rsa_dec(struct akcipher_request *req)
23421 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
23422 {
23423 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23424 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23425 - struct caam_rsa_key *key = &ctx->key;
23426 struct device *jrdev = ctx->dev;
23427 struct rsa_edesc *edesc;
23428 int ret;
23429
23430 - if (unlikely(!key->n || !key->d))
23431 - return -EINVAL;
23432 -
23433 - if (req->dst_len < key->n_sz) {
23434 - req->dst_len = key->n_sz;
23435 - dev_err(jrdev, "Output buffer length less than parameter n\n");
23436 - return -EOVERFLOW;
23437 - }
23438 -
23439 /* Allocate extended descriptor */
23440 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
23441 if (IS_ERR(edesc))
23442 @@ -344,17 +576,147 @@ init_fail:
23443 return ret;
23444 }
23445
23446 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
23447 +{
23448 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23449 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23450 + struct device *jrdev = ctx->dev;
23451 + struct rsa_edesc *edesc;
23452 + int ret;
23453 +
23454 + /* Allocate extended descriptor */
23455 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
23456 + if (IS_ERR(edesc))
23457 + return PTR_ERR(edesc);
23458 +
23459 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
23460 + ret = set_rsa_priv_f2_pdb(req, edesc);
23461 + if (ret)
23462 + goto init_fail;
23463 +
23464 + /* Initialize Job Descriptor */
23465 + init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
23466 +
23467 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
23468 + if (!ret)
23469 + return -EINPROGRESS;
23470 +
23471 + rsa_priv_f2_unmap(jrdev, edesc, req);
23472 +
23473 +init_fail:
23474 + rsa_io_unmap(jrdev, edesc, req);
23475 + kfree(edesc);
23476 + return ret;
23477 +}
23478 +
23479 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
23480 +{
23481 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23482 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23483 + struct device *jrdev = ctx->dev;
23484 + struct rsa_edesc *edesc;
23485 + int ret;
23486 +
23487 + /* Allocate extended descriptor */
23488 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
23489 + if (IS_ERR(edesc))
23490 + return PTR_ERR(edesc);
23491 +
23492 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
23493 + ret = set_rsa_priv_f3_pdb(req, edesc);
23494 + if (ret)
23495 + goto init_fail;
23496 +
23497 + /* Initialize Job Descriptor */
23498 + init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
23499 +
23500 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
23501 + if (!ret)
23502 + return -EINPROGRESS;
23503 +
23504 + rsa_priv_f3_unmap(jrdev, edesc, req);
23505 +
23506 +init_fail:
23507 + rsa_io_unmap(jrdev, edesc, req);
23508 + kfree(edesc);
23509 + return ret;
23510 +}
23511 +
23512 +static int caam_rsa_dec(struct akcipher_request *req)
23513 +{
23514 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23515 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23516 + struct caam_rsa_key *key = &ctx->key;
23517 + int ret;
23518 +
23519 + if (unlikely(!key->n || !key->d))
23520 + return -EINVAL;
23521 +
23522 + if (req->dst_len < key->n_sz) {
23523 + req->dst_len = key->n_sz;
23524 + dev_err(ctx->dev, "Output buffer length less than parameter n\n");
23525 + return -EOVERFLOW;
23526 + }
23527 +
23528 + if (key->priv_form == FORM3)
23529 + ret = caam_rsa_dec_priv_f3(req);
23530 + else if (key->priv_form == FORM2)
23531 + ret = caam_rsa_dec_priv_f2(req);
23532 + else
23533 + ret = caam_rsa_dec_priv_f1(req);
23534 +
23535 + return ret;
23536 +}
23537 +
23538 static void caam_rsa_free_key(struct caam_rsa_key *key)
23539 {
23540 kzfree(key->d);
23541 + kzfree(key->p);
23542 + kzfree(key->q);
23543 + kzfree(key->dp);
23544 + kzfree(key->dq);
23545 + kzfree(key->qinv);
23546 + kzfree(key->tmp1);
23547 + kzfree(key->tmp2);
23548 kfree(key->e);
23549 kfree(key->n);
23550 - key->d = NULL;
23551 - key->e = NULL;
23552 - key->n = NULL;
23553 - key->d_sz = 0;
23554 - key->e_sz = 0;
23555 - key->n_sz = 0;
23556 + memset(key, 0, sizeof(*key));
23557 +}
23558 +
23559 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
23560 +{
23561 + while (!**ptr && *nbytes) {
23562 + (*ptr)++;
23563 + (*nbytes)--;
23564 + }
23565 +}
23566 +
23567 +/**
23568 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
23569 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
23570 + * BER-encoding requires that the minimum number of bytes be used to encode the
23571 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
23572 + * length.
23573 + *
23574 + * @ptr : pointer to {dP, dQ, qInv} CRT member
23575 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
23576 + * @dstlen: length in bytes of corresponding p or q prime factor
23577 + */
23578 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
23579 +{
23580 + u8 *dst;
23581 +
23582 + caam_rsa_drop_leading_zeros(&ptr, &nbytes);
23583 + if (!nbytes)
23584 + return NULL;
23585 +
23586 + dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
23587 + if (!dst)
23588 + return NULL;
23589 +
23590 + memcpy(dst + (dstlen - nbytes), ptr, nbytes);
23591 +
23592 + return dst;
23593 }
23594
23595 /**
23596 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(con
23597 {
23598 u8 *val;
23599
23600 - while (!*buf && *nbytes) {
23601 - buf++;
23602 - (*nbytes)--;
23603 - }
23604 + caam_rsa_drop_leading_zeros(&buf, nbytes);
23605 + if (!*nbytes)
23606 + return NULL;
23607
23608 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
23609 if (!val)
23610 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct c
23611 unsigned int keylen)
23612 {
23613 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23614 - struct rsa_key raw_key = {0};
23615 + struct rsa_key raw_key = {NULL};
23616 struct caam_rsa_key *rsa_key = &ctx->key;
23617 int ret;
23618
23619 @@ -437,11 +798,69 @@ err:
23620 return -ENOMEM;
23621 }
23622
23623 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
23624 + struct rsa_key *raw_key)
23625 +{
23626 + struct caam_rsa_key *rsa_key = &ctx->key;
23627 + size_t p_sz = raw_key->p_sz;
23628 + size_t q_sz = raw_key->q_sz;
23629 +
23630 + rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
23631 + if (!rsa_key->p)
23632 + return;
23633 + rsa_key->p_sz = p_sz;
23634 +
23635 + rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
23636 + if (!rsa_key->q)
23637 + goto free_p;
23638 + rsa_key->q_sz = q_sz;
23639 +
23640 + rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
23641 + if (!rsa_key->tmp1)
23642 + goto free_q;
23643 +
23644 + rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
23645 + if (!rsa_key->tmp2)
23646 + goto free_tmp1;
23647 +
23648 + rsa_key->priv_form = FORM2;
23649 +
23650 + rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
23651 + if (!rsa_key->dp)
23652 + goto free_tmp2;
23653 +
23654 + rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
23655 + if (!rsa_key->dq)
23656 + goto free_dp;
23657 +
23658 + rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
23659 + q_sz);
23660 + if (!rsa_key->qinv)
23661 + goto free_dq;
23662 +
23663 + rsa_key->priv_form = FORM3;
23664 +
23665 + return;
23666 +
23667 +free_dq:
23668 + kzfree(rsa_key->dq);
23669 +free_dp:
23670 + kzfree(rsa_key->dp);
23671 +free_tmp2:
23672 + kzfree(rsa_key->tmp2);
23673 +free_tmp1:
23674 + kzfree(rsa_key->tmp1);
23675 +free_q:
23676 + kzfree(rsa_key->q);
23677 +free_p:
23678 + kzfree(rsa_key->p);
23679 +}
23680 +
23681 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
23682 unsigned int keylen)
23683 {
23684 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23685 - struct rsa_key raw_key = {0};
23686 + struct rsa_key raw_key = {NULL};
23687 struct caam_rsa_key *rsa_key = &ctx->key;
23688 int ret;
23689
23690 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct
23691 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
23692 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
23693
23694 + caam_rsa_set_priv_key_form(ctx, &raw_key);
23695 +
23696 return 0;
23697
23698 err:
23699 --- a/drivers/crypto/caam/caampkc.h
23700 +++ b/drivers/crypto/caam/caampkc.h
23701 @@ -13,21 +13,75 @@
23702 #include "pdb.h"
23703
23704 /**
23705 + * caam_priv_key_form - CAAM RSA private key representation
23706 + * CAAM RSA private key may have either of three forms.
23707 + *
23708 + * 1. The first representation consists of the pair (n, d), where the
23709 + * components have the following meanings:
23710 + * n the RSA modulus
23711 + * d the RSA private exponent
23712 + *
23713 + * 2. The second representation consists of the triplet (p, q, d), where the
23714 + * components have the following meanings:
23715 + * p the first prime factor of the RSA modulus n
23716 + * q the second prime factor of the RSA modulus n
23717 + * d the RSA private exponent
23718 + *
23719 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
23720 + * where the components have the following meanings:
23721 + * p the first prime factor of the RSA modulus n
23722 + * q the second prime factor of the RSA modulus n
23723 + * dP the first factors's CRT exponent
23724 + * dQ the second factors's CRT exponent
23725 + * qInv the (first) CRT coefficient
23726 + *
23727 + * The benefit of using the third or the second key form is lower computational
23728 + * cost for the decryption and signature operations.
23729 + */
23730 +enum caam_priv_key_form {
23731 + FORM1,
23732 + FORM2,
23733 + FORM3
23734 +};
23735 +
23736 +/**
23737 * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
23738 * @n : RSA modulus raw byte stream
23739 * @e : RSA public exponent raw byte stream
23740 * @d : RSA private exponent raw byte stream
23741 + * @p : RSA prime factor p of RSA modulus n
23742 + * @q : RSA prime factor q of RSA modulus n
23743 + * @dp : RSA CRT exponent of p
23744 + * @dp : RSA CRT exponent of q
23745 + * @qinv : RSA CRT coefficient
23746 + * @tmp1 : CAAM uses this temporary buffer as internal state buffer.
23747 + * It is assumed to be as long as p.
23748 + * @tmp2 : CAAM uses this temporary buffer as internal state buffer.
23749 + * It is assumed to be as long as q.
23750 * @n_sz : length in bytes of RSA modulus n
23751 * @e_sz : length in bytes of RSA public exponent
23752 * @d_sz : length in bytes of RSA private exponent
23753 + * @p_sz : length in bytes of RSA prime factor p of RSA modulus n
23754 + * @q_sz : length in bytes of RSA prime factor q of RSA modulus n
23755 + * @priv_form : CAAM RSA private key representation
23756 */
23757 struct caam_rsa_key {
23758 u8 *n;
23759 u8 *e;
23760 u8 *d;
23761 + u8 *p;
23762 + u8 *q;
23763 + u8 *dp;
23764 + u8 *dq;
23765 + u8 *qinv;
23766 + u8 *tmp1;
23767 + u8 *tmp2;
23768 size_t n_sz;
23769 size_t e_sz;
23770 size_t d_sz;
23771 + size_t p_sz;
23772 + size_t q_sz;
23773 + enum caam_priv_key_form priv_form;
23774 };
23775
23776 /**
23777 @@ -59,6 +113,8 @@ struct rsa_edesc {
23778 union {
23779 struct rsa_pub_pdb pub;
23780 struct rsa_priv_f1_pdb priv_f1;
23781 + struct rsa_priv_f2_pdb priv_f2;
23782 + struct rsa_priv_f3_pdb priv_f3;
23783 } pdb;
23784 u32 hw_desc[];
23785 };
23786 @@ -66,5 +122,7 @@ struct rsa_edesc {
23787 /* Descriptor construction primitives. */
23788 void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
23789 void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
23790 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
23791 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
23792
23793 #endif
23794 --- a/drivers/crypto/caam/caamrng.c
23795 +++ b/drivers/crypto/caam/caamrng.c
23796 @@ -52,7 +52,7 @@
23797
23798 /* length of descriptors */
23799 #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
23800 -#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
23801 +#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
23802
23803 /* Buffer, its dma address and lock */
23804 struct buf_data {
23805 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrde
23806 {
23807 struct buf_data *bd;
23808
23809 - bd = (struct buf_data *)((char *)desc -
23810 - offsetof(struct buf_data, hw_desc));
23811 + bd = container_of(desc, struct buf_data, hw_desc[0]);
23812
23813 if (err)
23814 caam_jr_strstatus(jrdev, err);
23815 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(str
23816
23817 init_sh_desc(desc, HDR_SHARE_SERIAL);
23818
23819 - /* Propagate errors from shared to job descriptor */
23820 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
23821 -
23822 /* Generate random bytes */
23823 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
23824
23825 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng
23826 if (err)
23827 return err;
23828
23829 - err = caam_init_buf(ctx, 1);
23830 - if (err)
23831 - return err;
23832 -
23833 - return 0;
23834 + return caam_init_buf(ctx, 1);
23835 }
23836
23837 static struct hwrng caam_rng = {
23838 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
23839 pr_err("Job Ring Device allocation for transform failed\n");
23840 return PTR_ERR(dev);
23841 }
23842 - rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
23843 + rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
23844 if (!rng_ctx) {
23845 err = -ENOMEM;
23846 goto free_caam_alloc;
23847 --- a/drivers/crypto/caam/compat.h
23848 +++ b/drivers/crypto/caam/compat.h
23849 @@ -16,6 +16,7 @@
23850 #include <linux/of_platform.h>
23851 #include <linux/dma-mapping.h>
23852 #include <linux/io.h>
23853 +#include <linux/iommu.h>
23854 #include <linux/spinlock.h>
23855 #include <linux/rtnetlink.h>
23856 #include <linux/in.h>
23857 --- a/drivers/crypto/caam/ctrl.c
23858 +++ b/drivers/crypto/caam/ctrl.c
23859 @@ -2,40 +2,41 @@
23860 * Controller-level driver, kernel property detection, initialization
23861 *
23862 * Copyright 2008-2012 Freescale Semiconductor, Inc.
23863 + * Copyright 2017 NXP
23864 */
23865
23866 #include <linux/device.h>
23867 #include <linux/of_address.h>
23868 #include <linux/of_irq.h>
23869 +#include <linux/sys_soc.h>
23870
23871 #include "compat.h"
23872 #include "regs.h"
23873 #include "intern.h"
23874 #include "jr.h"
23875 #include "desc_constr.h"
23876 -#include "error.h"
23877 #include "ctrl.h"
23878
23879 bool caam_little_end;
23880 EXPORT_SYMBOL(caam_little_end);
23881 +bool caam_imx;
23882 +EXPORT_SYMBOL(caam_imx);
23883 +bool caam_dpaa2;
23884 +EXPORT_SYMBOL(caam_dpaa2);
23885 +
23886 +#ifdef CONFIG_CAAM_QI
23887 +#include "qi.h"
23888 +#endif
23889
23890 /*
23891 * i.MX targets tend to have clock control subsystems that can
23892 * enable/disable clocking to our device.
23893 */
23894 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
23895 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
23896 - char *clk_name)
23897 -{
23898 - return devm_clk_get(dev, clk_name);
23899 -}
23900 -#else
23901 static inline struct clk *caam_drv_identify_clk(struct device *dev,
23902 char *clk_name)
23903 {
23904 - return NULL;
23905 + return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
23906 }
23907 -#endif
23908
23909 /*
23910 * Descriptor to instantiate RNG State Handle 0 in normal mode and
23911 @@ -274,7 +275,7 @@ static int deinstantiate_rng(struct devi
23912 /*
23913 * If the corresponding bit is set, then it means the state
23914 * handle was initialized by us, and thus it needs to be
23915 - * deintialized as well
23916 + * deinitialized as well
23917 */
23918 if ((1 << sh_idx) & state_handle_mask) {
23919 /*
23920 @@ -307,20 +308,24 @@ static int caam_remove(struct platform_d
23921 struct device *ctrldev;
23922 struct caam_drv_private *ctrlpriv;
23923 struct caam_ctrl __iomem *ctrl;
23924 - int ring;
23925
23926 ctrldev = &pdev->dev;
23927 ctrlpriv = dev_get_drvdata(ctrldev);
23928 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
23929
23930 - /* Remove platform devices for JobRs */
23931 - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
23932 - if (ctrlpriv->jrpdev[ring])
23933 - of_device_unregister(ctrlpriv->jrpdev[ring]);
23934 - }
23935 + /* Remove platform devices under the crypto node */
23936 + of_platform_depopulate(ctrldev);
23937 +
23938 +#ifdef CONFIG_CAAM_QI
23939 + if (ctrlpriv->qidev)
23940 + caam_qi_shutdown(ctrlpriv->qidev);
23941 +#endif
23942
23943 - /* De-initialize RNG state handles initialized by this driver. */
23944 - if (ctrlpriv->rng4_sh_init)
23945 + /*
23946 + * De-initialize RNG state handles initialized by this driver.
23947 + * In case of DPAA 2.x, RNG is managed by MC firmware.
23948 + */
23949 + if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
23950 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
23951
23952 /* Shut down debug views */
23953 @@ -335,8 +340,8 @@ static int caam_remove(struct platform_d
23954 clk_disable_unprepare(ctrlpriv->caam_ipg);
23955 clk_disable_unprepare(ctrlpriv->caam_mem);
23956 clk_disable_unprepare(ctrlpriv->caam_aclk);
23957 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23958 -
23959 + if (ctrlpriv->caam_emi_slow)
23960 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23961 return 0;
23962 }
23963
23964 @@ -370,11 +375,8 @@ static void kick_trng(struct platform_de
23965 */
23966 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
23967 >> RTSDCTL_ENT_DLY_SHIFT;
23968 - if (ent_delay <= val) {
23969 - /* put RNG4 into run mode */
23970 - clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
23971 - return;
23972 - }
23973 + if (ent_delay <= val)
23974 + goto start_rng;
23975
23976 val = rd_reg32(&r4tst->rtsdctl);
23977 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
23978 @@ -386,15 +388,12 @@ static void kick_trng(struct platform_de
23979 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
23980 /* read the control register */
23981 val = rd_reg32(&r4tst->rtmctl);
23982 +start_rng:
23983 /*
23984 * select raw sampling in both entropy shifter
23985 - * and statistical checker
23986 + * and statistical checker; ; put RNG4 into run mode
23987 */
23988 - clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
23989 - /* put RNG4 into run mode */
23990 - clrsetbits_32(&val, RTMCTL_PRGM, 0);
23991 - /* write back the control register */
23992 - wr_reg32(&r4tst->rtmctl, val);
23993 + clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
23994 }
23995
23996 /**
23997 @@ -415,28 +414,26 @@ int caam_get_era(void)
23998 }
23999 EXPORT_SYMBOL(caam_get_era);
24000
24001 -#ifdef CONFIG_DEBUG_FS
24002 -static int caam_debugfs_u64_get(void *data, u64 *val)
24003 -{
24004 - *val = caam64_to_cpu(*(u64 *)data);
24005 - return 0;
24006 -}
24007 -
24008 -static int caam_debugfs_u32_get(void *data, u64 *val)
24009 -{
24010 - *val = caam32_to_cpu(*(u32 *)data);
24011 - return 0;
24012 -}
24013 -
24014 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
24015 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
24016 -#endif
24017 +static const struct of_device_id caam_match[] = {
24018 + {
24019 + .compatible = "fsl,sec-v4.0",
24020 + },
24021 + {
24022 + .compatible = "fsl,sec4.0",
24023 + },
24024 + {},
24025 +};
24026 +MODULE_DEVICE_TABLE(of, caam_match);
24027
24028 /* Probe routine for CAAM top (controller) level */
24029 static int caam_probe(struct platform_device *pdev)
24030 {
24031 - int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
24032 + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
24033 u64 caam_id;
24034 + static const struct soc_device_attribute imx_soc[] = {
24035 + {.family = "Freescale i.MX"},
24036 + {},
24037 + };
24038 struct device *dev;
24039 struct device_node *nprop, *np;
24040 struct caam_ctrl __iomem *ctrl;
24041 @@ -456,9 +453,10 @@ static int caam_probe(struct platform_de
24042
24043 dev = &pdev->dev;
24044 dev_set_drvdata(dev, ctrlpriv);
24045 - ctrlpriv->pdev = pdev;
24046 nprop = pdev->dev.of_node;
24047
24048 + caam_imx = (bool)soc_device_match(imx_soc);
24049 +
24050 /* Enable clocking */
24051 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
24052 if (IS_ERR(clk)) {
24053 @@ -487,14 +485,16 @@ static int caam_probe(struct platform_de
24054 }
24055 ctrlpriv->caam_aclk = clk;
24056
24057 - clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
24058 - if (IS_ERR(clk)) {
24059 - ret = PTR_ERR(clk);
24060 - dev_err(&pdev->dev,
24061 - "can't identify CAAM emi_slow clk: %d\n", ret);
24062 - return ret;
24063 + if (!of_machine_is_compatible("fsl,imx6ul")) {
24064 + clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
24065 + if (IS_ERR(clk)) {
24066 + ret = PTR_ERR(clk);
24067 + dev_err(&pdev->dev,
24068 + "can't identify CAAM emi_slow clk: %d\n", ret);
24069 + return ret;
24070 + }
24071 + ctrlpriv->caam_emi_slow = clk;
24072 }
24073 - ctrlpriv->caam_emi_slow = clk;
24074
24075 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
24076 if (ret < 0) {
24077 @@ -515,11 +515,13 @@ static int caam_probe(struct platform_de
24078 goto disable_caam_mem;
24079 }
24080
24081 - ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
24082 - if (ret < 0) {
24083 - dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
24084 - ret);
24085 - goto disable_caam_aclk;
24086 + if (ctrlpriv->caam_emi_slow) {
24087 + ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
24088 + if (ret < 0) {
24089 + dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
24090 + ret);
24091 + goto disable_caam_aclk;
24092 + }
24093 }
24094
24095 /* Get configuration properties from device tree */
24096 @@ -546,13 +548,13 @@ static int caam_probe(struct platform_de
24097 else
24098 BLOCK_OFFSET = PG_SIZE_64K;
24099
24100 - ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
24101 - ctrlpriv->assure = (struct caam_assurance __force *)
24102 - ((uint8_t *)ctrl +
24103 + ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
24104 + ctrlpriv->assure = (struct caam_assurance __iomem __force *)
24105 + ((__force uint8_t *)ctrl +
24106 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
24107 );
24108 - ctrlpriv->deco = (struct caam_deco __force *)
24109 - ((uint8_t *)ctrl +
24110 + ctrlpriv->deco = (struct caam_deco __iomem __force *)
24111 + ((__force uint8_t *)ctrl +
24112 BLOCK_OFFSET * DECO_BLOCK_NUMBER
24113 );
24114
24115 @@ -561,12 +563,17 @@ static int caam_probe(struct platform_de
24116
24117 /*
24118 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
24119 - * long pointers in master configuration register
24120 + * long pointers in master configuration register.
24121 + * In case of DPAA 2.x, Management Complex firmware performs
24122 + * the configuration.
24123 */
24124 - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
24125 - MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
24126 - MCFGR_WDENABLE | MCFGR_LARGE_BURST |
24127 - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
24128 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
24129 + if (!caam_dpaa2)
24130 + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
24131 + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
24132 + MCFGR_WDENABLE | MCFGR_LARGE_BURST |
24133 + (sizeof(dma_addr_t) == sizeof(u64) ?
24134 + MCFGR_LONG_PTR : 0));
24135
24136 /*
24137 * Read the Compile Time paramters and SCFGR to determine
24138 @@ -594,64 +601,69 @@ static int caam_probe(struct platform_de
24139 JRSTART_JR1_START | JRSTART_JR2_START |
24140 JRSTART_JR3_START);
24141
24142 - if (sizeof(dma_addr_t) == sizeof(u64))
24143 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
24144 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
24145 + if (sizeof(dma_addr_t) == sizeof(u64)) {
24146 + if (caam_dpaa2)
24147 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
24148 + else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
24149 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
24150 else
24151 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
24152 - else
24153 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
24154 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
24155 + } else {
24156 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
24157 + }
24158 + if (ret) {
24159 + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
24160 + goto iounmap_ctrl;
24161 + }
24162
24163 - /*
24164 - * Detect and enable JobRs
24165 - * First, find out how many ring spec'ed, allocate references
24166 - * for all, then go probe each one.
24167 - */
24168 - rspec = 0;
24169 - for_each_available_child_of_node(nprop, np)
24170 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
24171 - of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
24172 - rspec++;
24173 + ctrlpriv->era = caam_get_era();
24174
24175 - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
24176 - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
24177 - if (ctrlpriv->jrpdev == NULL) {
24178 - ret = -ENOMEM;
24179 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
24180 + if (ret) {
24181 + dev_err(dev, "JR platform devices creation error\n");
24182 goto iounmap_ctrl;
24183 }
24184
24185 +#ifdef CONFIG_DEBUG_FS
24186 + /*
24187 + * FIXME: needs better naming distinction, as some amalgamation of
24188 + * "caam" and nprop->full_name. The OF name isn't distinctive,
24189 + * but does separate instances
24190 + */
24191 + perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
24192 +
24193 + ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
24194 + ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
24195 +#endif
24196 ring = 0;
24197 - ctrlpriv->total_jobrs = 0;
24198 for_each_available_child_of_node(nprop, np)
24199 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
24200 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
24201 - ctrlpriv->jrpdev[ring] =
24202 - of_platform_device_create(np, NULL, dev);
24203 - if (!ctrlpriv->jrpdev[ring]) {
24204 - pr_warn("JR%d Platform device creation error\n",
24205 - ring);
24206 - continue;
24207 - }
24208 - ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
24209 - ((uint8_t *)ctrl +
24210 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
24211 + ((__force uint8_t *)ctrl +
24212 (ring + JR_BLOCK_NUMBER) *
24213 BLOCK_OFFSET
24214 );
24215 ctrlpriv->total_jobrs++;
24216 ring++;
24217 - }
24218 + }
24219
24220 - /* Check to see if QI present. If so, enable */
24221 - ctrlpriv->qi_present =
24222 - !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
24223 - CTPR_MS_QI_MASK);
24224 - if (ctrlpriv->qi_present) {
24225 - ctrlpriv->qi = (struct caam_queue_if __force *)
24226 - ((uint8_t *)ctrl +
24227 + /* Check to see if (DPAA 1.x) QI present. If so, enable */
24228 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
24229 + if (ctrlpriv->qi_present && !caam_dpaa2) {
24230 + ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
24231 + ((__force uint8_t *)ctrl +
24232 BLOCK_OFFSET * QI_BLOCK_NUMBER
24233 );
24234 /* This is all that's required to physically enable QI */
24235 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
24236 +
24237 + /* If QMAN driver is present, init CAAM-QI backend */
24238 +#ifdef CONFIG_CAAM_QI
24239 + ret = caam_qi_init(pdev);
24240 + if (ret)
24241 + dev_err(dev, "caam qi i/f init failed: %d\n", ret);
24242 +#endif
24243 }
24244
24245 /* If no QI and no rings specified, quit and go home */
24246 @@ -666,8 +678,10 @@ static int caam_probe(struct platform_de
24247 /*
24248 * If SEC has RNG version >= 4 and RNG state handle has not been
24249 * already instantiated, do RNG instantiation
24250 + * In case of DPAA 2.x, RNG is managed by MC firmware.
24251 */
24252 - if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
24253 + if (!caam_dpaa2 &&
24254 + (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
24255 ctrlpriv->rng4_sh_init =
24256 rd_reg32(&ctrl->r4tst[0].rdsta);
24257 /*
24258 @@ -734,78 +748,47 @@ static int caam_probe(struct platform_de
24259
24260 /* Report "alive" for developer to see */
24261 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
24262 - caam_get_era());
24263 - dev_info(dev, "job rings = %d, qi = %d\n",
24264 - ctrlpriv->total_jobrs, ctrlpriv->qi_present);
24265 + ctrlpriv->era);
24266 + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
24267 + ctrlpriv->total_jobrs, ctrlpriv->qi_present,
24268 + caam_dpaa2 ? "yes" : "no");
24269
24270 #ifdef CONFIG_DEBUG_FS
24271 - /*
24272 - * FIXME: needs better naming distinction, as some amalgamation of
24273 - * "caam" and nprop->full_name. The OF name isn't distinctive,
24274 - * but does separate instances
24275 - */
24276 - perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
24277 -
24278 - ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
24279 - ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
24280 -
24281 - /* Controller-level - performance monitor counters */
24282 -
24283 - ctrlpriv->ctl_rq_dequeued =
24284 - debugfs_create_file("rq_dequeued",
24285 - S_IRUSR | S_IRGRP | S_IROTH,
24286 - ctrlpriv->ctl, &perfmon->req_dequeued,
24287 - &caam_fops_u64_ro);
24288 - ctrlpriv->ctl_ob_enc_req =
24289 - debugfs_create_file("ob_rq_encrypted",
24290 - S_IRUSR | S_IRGRP | S_IROTH,
24291 - ctrlpriv->ctl, &perfmon->ob_enc_req,
24292 - &caam_fops_u64_ro);
24293 - ctrlpriv->ctl_ib_dec_req =
24294 - debugfs_create_file("ib_rq_decrypted",
24295 - S_IRUSR | S_IRGRP | S_IROTH,
24296 - ctrlpriv->ctl, &perfmon->ib_dec_req,
24297 - &caam_fops_u64_ro);
24298 - ctrlpriv->ctl_ob_enc_bytes =
24299 - debugfs_create_file("ob_bytes_encrypted",
24300 - S_IRUSR | S_IRGRP | S_IROTH,
24301 - ctrlpriv->ctl, &perfmon->ob_enc_bytes,
24302 - &caam_fops_u64_ro);
24303 - ctrlpriv->ctl_ob_prot_bytes =
24304 - debugfs_create_file("ob_bytes_protected",
24305 - S_IRUSR | S_IRGRP | S_IROTH,
24306 - ctrlpriv->ctl, &perfmon->ob_prot_bytes,
24307 - &caam_fops_u64_ro);
24308 - ctrlpriv->ctl_ib_dec_bytes =
24309 - debugfs_create_file("ib_bytes_decrypted",
24310 - S_IRUSR | S_IRGRP | S_IROTH,
24311 - ctrlpriv->ctl, &perfmon->ib_dec_bytes,
24312 - &caam_fops_u64_ro);
24313 - ctrlpriv->ctl_ib_valid_bytes =
24314 - debugfs_create_file("ib_bytes_validated",
24315 - S_IRUSR | S_IRGRP | S_IROTH,
24316 - ctrlpriv->ctl, &perfmon->ib_valid_bytes,
24317 - &caam_fops_u64_ro);
24318 + debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
24319 + ctrlpriv->ctl, &perfmon->req_dequeued,
24320 + &caam_fops_u64_ro);
24321 + debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
24322 + ctrlpriv->ctl, &perfmon->ob_enc_req,
24323 + &caam_fops_u64_ro);
24324 + debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
24325 + ctrlpriv->ctl, &perfmon->ib_dec_req,
24326 + &caam_fops_u64_ro);
24327 + debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
24328 + ctrlpriv->ctl, &perfmon->ob_enc_bytes,
24329 + &caam_fops_u64_ro);
24330 + debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
24331 + ctrlpriv->ctl, &perfmon->ob_prot_bytes,
24332 + &caam_fops_u64_ro);
24333 + debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
24334 + ctrlpriv->ctl, &perfmon->ib_dec_bytes,
24335 + &caam_fops_u64_ro);
24336 + debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
24337 + ctrlpriv->ctl, &perfmon->ib_valid_bytes,
24338 + &caam_fops_u64_ro);
24339
24340 /* Controller level - global status values */
24341 - ctrlpriv->ctl_faultaddr =
24342 - debugfs_create_file("fault_addr",
24343 - S_IRUSR | S_IRGRP | S_IROTH,
24344 - ctrlpriv->ctl, &perfmon->faultaddr,
24345 - &caam_fops_u32_ro);
24346 - ctrlpriv->ctl_faultdetail =
24347 - debugfs_create_file("fault_detail",
24348 - S_IRUSR | S_IRGRP | S_IROTH,
24349 - ctrlpriv->ctl, &perfmon->faultdetail,
24350 - &caam_fops_u32_ro);
24351 - ctrlpriv->ctl_faultstatus =
24352 - debugfs_create_file("fault_status",
24353 - S_IRUSR | S_IRGRP | S_IROTH,
24354 - ctrlpriv->ctl, &perfmon->status,
24355 - &caam_fops_u32_ro);
24356 + debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
24357 + ctrlpriv->ctl, &perfmon->faultaddr,
24358 + &caam_fops_u32_ro);
24359 + debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
24360 + ctrlpriv->ctl, &perfmon->faultdetail,
24361 + &caam_fops_u32_ro);
24362 + debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
24363 + ctrlpriv->ctl, &perfmon->status,
24364 + &caam_fops_u32_ro);
24365
24366 /* Internal covering keys (useful in non-secure mode only) */
24367 - ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
24368 + ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
24369 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
24370 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
24371 S_IRUSR |
24372 @@ -813,7 +796,7 @@ static int caam_probe(struct platform_de
24373 ctrlpriv->ctl,
24374 &ctrlpriv->ctl_kek_wrap);
24375
24376 - ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
24377 + ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
24378 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
24379 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
24380 S_IRUSR |
24381 @@ -821,7 +804,7 @@ static int caam_probe(struct platform_de
24382 ctrlpriv->ctl,
24383 &ctrlpriv->ctl_tkek_wrap);
24384
24385 - ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
24386 + ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
24387 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
24388 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
24389 S_IRUSR |
24390 @@ -832,13 +815,17 @@ static int caam_probe(struct platform_de
24391 return 0;
24392
24393 caam_remove:
24394 +#ifdef CONFIG_DEBUG_FS
24395 + debugfs_remove_recursive(ctrlpriv->dfs_root);
24396 +#endif
24397 caam_remove(pdev);
24398 return ret;
24399
24400 iounmap_ctrl:
24401 iounmap(ctrl);
24402 disable_caam_emi_slow:
24403 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
24404 + if (ctrlpriv->caam_emi_slow)
24405 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
24406 disable_caam_aclk:
24407 clk_disable_unprepare(ctrlpriv->caam_aclk);
24408 disable_caam_mem:
24409 @@ -848,17 +835,6 @@ disable_caam_ipg:
24410 return ret;
24411 }
24412
24413 -static struct of_device_id caam_match[] = {
24414 - {
24415 - .compatible = "fsl,sec-v4.0",
24416 - },
24417 - {
24418 - .compatible = "fsl,sec4.0",
24419 - },
24420 - {},
24421 -};
24422 -MODULE_DEVICE_TABLE(of, caam_match);
24423 -
24424 static struct platform_driver caam_driver = {
24425 .driver = {
24426 .name = "caam",
24427 --- a/drivers/crypto/caam/ctrl.h
24428 +++ b/drivers/crypto/caam/ctrl.h
24429 @@ -10,4 +10,6 @@
24430 /* Prototypes for backend-level services exposed to APIs */
24431 int caam_get_era(void);
24432
24433 +extern bool caam_dpaa2;
24434 +
24435 #endif /* CTRL_H */
24436 --- a/drivers/crypto/caam/desc.h
24437 +++ b/drivers/crypto/caam/desc.h
24438 @@ -22,12 +22,6 @@
24439 #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
24440 #define SEC4_SG_OFFSET_MASK 0x00001fff
24441
24442 -struct sec4_sg_entry {
24443 - u64 ptr;
24444 - u32 len;
24445 - u32 bpid_offset;
24446 -};
24447 -
24448 /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
24449 #define MAX_CAAM_DESCSIZE 64
24450
24451 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
24452 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
24453 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
24454 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
24455 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
24456 #define CMD_STORE (0x0a << CMD_SHIFT)
24457 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
24458 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
24459 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
24460 #define HDR_ZRO 0x00008000
24461
24462 /* Start Index or SharedDesc Length */
24463 -#define HDR_START_IDX_MASK 0x3f
24464 #define HDR_START_IDX_SHIFT 16
24465 +#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
24466
24467 /* If shared descriptor header, 6-bit length */
24468 #define HDR_DESCLEN_SHR_MASK 0x3f
24469 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
24470 #define HDR_PROP_DNR 0x00000800
24471
24472 /* JobDesc/SharedDesc share property */
24473 -#define HDR_SD_SHARE_MASK 0x03
24474 #define HDR_SD_SHARE_SHIFT 8
24475 -#define HDR_JD_SHARE_MASK 0x07
24476 +#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
24477 #define HDR_JD_SHARE_SHIFT 8
24478 +#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
24479
24480 #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
24481 #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
24482 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
24483 #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
24484 #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
24485 #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
24486 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
24487 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
24488 #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
24489 #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
24490 #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
24491 @@ -360,6 +355,7 @@ struct sec4_sg_entry {
24492 #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
24493 #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
24494 #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
24495 +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
24496
24497 /* Other types. Need to OR in last/flush bits as desired */
24498 #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
24499 @@ -400,7 +396,7 @@ struct sec4_sg_entry {
24500 #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
24501 #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
24502 #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
24503 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
24504 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
24505 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
24506 #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
24507 #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
24508 @@ -413,6 +409,7 @@ struct sec4_sg_entry {
24509 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
24510 #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
24511 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
24512 +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
24513 #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
24514
24515 /*
24516 @@ -449,6 +446,18 @@ struct sec4_sg_entry {
24517 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
24518 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
24519 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
24520 +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
24521 +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
24522 +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
24523 +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
24524 +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
24525 +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
24526 +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
24527 +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
24528 +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
24529 +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
24530 +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
24531 +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
24532
24533 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
24534 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
24535 @@ -1098,6 +1107,22 @@ struct sec4_sg_entry {
24536 /* MacSec protinfos */
24537 #define OP_PCL_MACSEC 0x0001
24538
24539 +/* Derived Key Protocol (DKP) Protinfo */
24540 +#define OP_PCL_DKP_SRC_SHIFT 14
24541 +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
24542 +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
24543 +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
24544 +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
24545 +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
24546 +#define OP_PCL_DKP_DST_SHIFT 12
24547 +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
24548 +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
24549 +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
24550 +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
24551 +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
24552 +#define OP_PCL_DKP_KEY_SHIFT 0
24553 +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
24554 +
24555 /* PKI unidirectional protocol protinfo bits */
24556 #define OP_PCL_PKPROT_TEST 0x0008
24557 #define OP_PCL_PKPROT_DECRYPT 0x0004
24558 @@ -1107,8 +1132,8 @@ struct sec4_sg_entry {
24559 /* For non-protocol/alg-only op commands */
24560 #define OP_ALG_TYPE_SHIFT 24
24561 #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
24562 -#define OP_ALG_TYPE_CLASS1 2
24563 -#define OP_ALG_TYPE_CLASS2 4
24564 +#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
24565 +#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
24566
24567 #define OP_ALG_ALGSEL_SHIFT 16
24568 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
24569 @@ -1249,7 +1274,7 @@ struct sec4_sg_entry {
24570 #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
24571
24572 /* PKHA mode copy-memory functions */
24573 -#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
24574 +#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
24575 #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
24576 #define OP_ALG_PKMODE_DST_REG_SHIFT 10
24577 #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
24578 @@ -1445,10 +1470,11 @@ struct sec4_sg_entry {
24579 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
24580 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
24581 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
24582 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
24583 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
24584 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
24585 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
24586 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
24587 +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
24588
24589 /* Destination selectors */
24590 #define MATH_DEST_SHIFT 8
24591 @@ -1457,6 +1483,7 @@ struct sec4_sg_entry {
24592 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
24593 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
24594 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
24595 +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
24596 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
24597 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
24598 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
24599 @@ -1629,4 +1656,31 @@ struct sec4_sg_entry {
24600 /* Frame Descriptor Command for Replacement Job Descriptor */
24601 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
24602
24603 +/* CHA Control Register bits */
24604 +#define CCTRL_RESET_CHA_ALL 0x1
24605 +#define CCTRL_RESET_CHA_AESA 0x2
24606 +#define CCTRL_RESET_CHA_DESA 0x4
24607 +#define CCTRL_RESET_CHA_AFHA 0x8
24608 +#define CCTRL_RESET_CHA_KFHA 0x10
24609 +#define CCTRL_RESET_CHA_SF8A 0x20
24610 +#define CCTRL_RESET_CHA_PKHA 0x40
24611 +#define CCTRL_RESET_CHA_MDHA 0x80
24612 +#define CCTRL_RESET_CHA_CRCA 0x100
24613 +#define CCTRL_RESET_CHA_RNG 0x200
24614 +#define CCTRL_RESET_CHA_SF9A 0x400
24615 +#define CCTRL_RESET_CHA_ZUCE 0x800
24616 +#define CCTRL_RESET_CHA_ZUCA 0x1000
24617 +#define CCTRL_UNLOAD_PK_A0 0x10000
24618 +#define CCTRL_UNLOAD_PK_A1 0x20000
24619 +#define CCTRL_UNLOAD_PK_A2 0x40000
24620 +#define CCTRL_UNLOAD_PK_A3 0x80000
24621 +#define CCTRL_UNLOAD_PK_B0 0x100000
24622 +#define CCTRL_UNLOAD_PK_B1 0x200000
24623 +#define CCTRL_UNLOAD_PK_B2 0x400000
24624 +#define CCTRL_UNLOAD_PK_B3 0x800000
24625 +#define CCTRL_UNLOAD_PK_N 0x1000000
24626 +#define CCTRL_UNLOAD_PK_A 0x4000000
24627 +#define CCTRL_UNLOAD_PK_B 0x8000000
24628 +#define CCTRL_UNLOAD_SBOX 0x10000000
24629 +
24630 #endif /* DESC_H */
24631 --- a/drivers/crypto/caam/desc_constr.h
24632 +++ b/drivers/crypto/caam/desc_constr.h
24633 @@ -4,6 +4,9 @@
24634 * Copyright 2008-2012 Freescale Semiconductor, Inc.
24635 */
24636
24637 +#ifndef DESC_CONSTR_H
24638 +#define DESC_CONSTR_H
24639 +
24640 #include "desc.h"
24641 #include "regs.h"
24642
24643 @@ -33,38 +36,39 @@
24644
24645 extern bool caam_little_end;
24646
24647 -static inline int desc_len(u32 *desc)
24648 +static inline int desc_len(u32 * const desc)
24649 {
24650 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
24651 }
24652
24653 -static inline int desc_bytes(void *desc)
24654 +static inline int desc_bytes(void * const desc)
24655 {
24656 return desc_len(desc) * CAAM_CMD_SZ;
24657 }
24658
24659 -static inline u32 *desc_end(u32 *desc)
24660 +static inline u32 *desc_end(u32 * const desc)
24661 {
24662 return desc + desc_len(desc);
24663 }
24664
24665 -static inline void *sh_desc_pdb(u32 *desc)
24666 +static inline void *sh_desc_pdb(u32 * const desc)
24667 {
24668 return desc + 1;
24669 }
24670
24671 -static inline void init_desc(u32 *desc, u32 options)
24672 +static inline void init_desc(u32 * const desc, u32 options)
24673 {
24674 *desc = cpu_to_caam32((options | HDR_ONE) + 1);
24675 }
24676
24677 -static inline void init_sh_desc(u32 *desc, u32 options)
24678 +static inline void init_sh_desc(u32 * const desc, u32 options)
24679 {
24680 PRINT_POS;
24681 init_desc(desc, CMD_SHARED_DESC_HDR | options);
24682 }
24683
24684 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
24685 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
24686 + size_t pdb_bytes)
24687 {
24688 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
24689
24690 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32
24691 options);
24692 }
24693
24694 -static inline void init_job_desc(u32 *desc, u32 options)
24695 +static inline void init_job_desc(u32 * const desc, u32 options)
24696 {
24697 init_desc(desc, CMD_DESC_HDR | options);
24698 }
24699
24700 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
24701 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
24702 + size_t pdb_bytes)
24703 {
24704 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
24705
24706 init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
24707 }
24708
24709 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
24710 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
24711 {
24712 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
24713
24714 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc,
24715 CAAM_PTR_SZ / CAAM_CMD_SZ);
24716 }
24717
24718 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
24719 - u32 options)
24720 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
24721 + int len, u32 options)
24722 {
24723 PRINT_POS;
24724 init_job_desc(desc, HDR_SHARED | options |
24725 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(
24726 append_ptr(desc, ptr);
24727 }
24728
24729 -static inline void append_data(u32 *desc, void *data, int len)
24730 +static inline void append_data(u32 * const desc, const void *data, int len)
24731 {
24732 u32 *offset = desc_end(desc);
24733
24734 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc
24735 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
24736 }
24737
24738 -static inline void append_cmd(u32 *desc, u32 command)
24739 +static inline void append_cmd(u32 * const desc, u32 command)
24740 {
24741 u32 *cmd = desc_end(desc);
24742
24743 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc,
24744
24745 #define append_u32 append_cmd
24746
24747 -static inline void append_u64(u32 *desc, u64 data)
24748 +static inline void append_u64(u32 * const desc, u64 data)
24749 {
24750 u32 *offset = desc_end(desc);
24751
24752 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc,
24753 }
24754
24755 /* Write command without affecting header, and return pointer to next word */
24756 -static inline u32 *write_cmd(u32 *desc, u32 command)
24757 +static inline u32 *write_cmd(u32 * const desc, u32 command)
24758 {
24759 *desc = cpu_to_caam32(command);
24760
24761 return desc + 1;
24762 }
24763
24764 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
24765 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
24766 u32 command)
24767 {
24768 append_cmd(desc, command | len);
24769 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *d
24770 }
24771
24772 /* Write length after pointer, rather than inside command */
24773 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
24774 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
24775 unsigned int len, u32 command)
24776 {
24777 append_cmd(desc, command);
24778 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen
24779 append_cmd(desc, len);
24780 }
24781
24782 -static inline void append_cmd_data(u32 *desc, void *data, int len,
24783 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
24784 u32 command)
24785 {
24786 append_cmd(desc, command | IMMEDIATE | len);
24787 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *
24788 }
24789
24790 #define APPEND_CMD_RET(cmd, op) \
24791 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
24792 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
24793 { \
24794 u32 *cmd = desc_end(desc); \
24795 PRINT_POS; \
24796 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *des
24797 }
24798 APPEND_CMD_RET(jump, JUMP)
24799 APPEND_CMD_RET(move, MOVE)
24800 +APPEND_CMD_RET(moveb, MOVEB)
24801
24802 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
24803 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
24804 {
24805 *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
24806 (desc_len(desc) - (jump_cmd - desc)));
24807 }
24808
24809 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
24810 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
24811 {
24812 u32 val = caam32_to_cpu(*move_cmd);
24813
24814 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32
24815 }
24816
24817 #define APPEND_CMD(cmd, op) \
24818 -static inline void append_##cmd(u32 *desc, u32 options) \
24819 +static inline void append_##cmd(u32 * const desc, u32 options) \
24820 { \
24821 PRINT_POS; \
24822 append_cmd(desc, CMD_##op | options); \
24823 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *des
24824 APPEND_CMD(operation, OPERATION)
24825
24826 #define APPEND_CMD_LEN(cmd, op) \
24827 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
24828 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
24829 + u32 options) \
24830 { \
24831 PRINT_POS; \
24832 append_cmd(desc, CMD_##op | len | options); \
24833 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_L
24834 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
24835
24836 #define APPEND_CMD_PTR(cmd, op) \
24837 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
24838 - u32 options) \
24839 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
24840 + unsigned int len, u32 options) \
24841 { \
24842 PRINT_POS; \
24843 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
24844 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
24845 APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
24846 APPEND_CMD_PTR(fifo_store, FIFO_STORE)
24847
24848 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
24849 - u32 options)
24850 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
24851 + unsigned int len, u32 options)
24852 {
24853 u32 cmd_src;
24854
24855 @@ -249,7 +256,8 @@ static inline void append_store(u32 *des
24856 }
24857
24858 #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
24859 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
24860 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
24861 + dma_addr_t ptr, \
24862 unsigned int len, \
24863 u32 options) \
24864 { \
24865 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
24866 APPEND_SEQ_PTR_INTLEN(out, OUT)
24867
24868 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
24869 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
24870 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
24871 unsigned int len, u32 options) \
24872 { \
24873 PRINT_POS; \
24874 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
24875 APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
24876
24877 #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
24878 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
24879 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
24880 unsigned int len, u32 options) \
24881 { \
24882 PRINT_POS; \
24883 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_O
24884 * the size of its type
24885 */
24886 #define APPEND_CMD_PTR_LEN(cmd, op, type) \
24887 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
24888 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
24889 type len, u32 options) \
24890 { \
24891 PRINT_POS; \
24892 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
24893 * from length of immediate data provided, e.g., split keys
24894 */
24895 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
24896 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
24897 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
24898 unsigned int data_len, \
24899 unsigned int len, u32 options) \
24900 { \
24901 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm
24902 APPEND_CMD_PTR_TO_IMM2(key, KEY);
24903
24904 #define APPEND_CMD_RAW_IMM(cmd, op, type) \
24905 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
24906 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
24907 u32 options) \
24908 { \
24909 PRINT_POS; \
24910 @@ -426,3 +434,107 @@ do { \
24911 APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
24912 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
24913 APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
24914 +
24915 +/**
24916 + * struct alginfo - Container for algorithm details
24917 + * @algtype: algorithm selector; for valid values, see documentation of the
24918 + * functions where it is used.
24919 + * @keylen: length of the provided algorithm key, in bytes
24920 + * @keylen_pad: padded length of the provided algorithm key, in bytes
24921 + * @key: address where algorithm key resides; virtual address if key_inline
24922 + * is true, dma (bus) address if key_inline is false.
24923 + * @key_inline: true - key can be inlined in the descriptor; false - key is
24924 + * referenced by the descriptor
24925 + */
24926 +struct alginfo {
24927 + u32 algtype;
24928 + unsigned int keylen;
24929 + unsigned int keylen_pad;
24930 + union {
24931 + dma_addr_t key_dma;
24932 + const void *key_virt;
24933 + };
24934 + bool key_inline;
24935 +};
24936 +
24937 +/**
24938 + * desc_inline_query() - Provide indications on which data items can be inlined
24939 + * and which shall be referenced in a shared descriptor.
24940 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
24941 + * excluding the data items to be inlined (or corresponding
24942 + * pointer if an item is not inlined). Each cnstr_* function that
24943 + * generates descriptors should have a define mentioning
24944 + * corresponding length.
24945 + * @jd_len: Maximum length of the job descriptor(s) that will be used
24946 + * together with the shared descriptor.
24947 + * @data_len: Array of lengths of the data items trying to be inlined
24948 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
24949 + * otherwise.
24950 + * @count: Number of data items (size of @data_len array); must be <= 32
24951 + *
24952 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
24953 + * check @inl_mask for details.
24954 + */
24955 +static inline int desc_inline_query(unsigned int sd_base_len,
24956 + unsigned int jd_len, unsigned int *data_len,
24957 + u32 *inl_mask, unsigned int count)
24958 +{
24959 + int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
24960 + unsigned int i;
24961 +
24962 + *inl_mask = 0;
24963 + for (i = 0; (i < count) && (rem_bytes > 0); i++) {
24964 + if (rem_bytes - (int)(data_len[i] +
24965 + (count - i - 1) * CAAM_PTR_SZ) >= 0) {
24966 + rem_bytes -= data_len[i];
24967 + *inl_mask |= (1 << i);
24968 + } else {
24969 + rem_bytes -= CAAM_PTR_SZ;
24970 + }
24971 + }
24972 +
24973 + return (rem_bytes >= 0) ? 0 : -1;
24974 +}
24975 +
24976 +/**
24977 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
24978 + * @desc: pointer to buffer used for descriptor construction
24979 + * @adata: pointer to authentication transform definitions.
24980 + * keylen should be the length of initial key, while keylen_pad
24981 + * the length of the derived (split) key.
24982 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
24983 + * SHA256, SHA384, SHA512}.
24984 + */
24985 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
24986 +{
24987 + u32 protid;
24988 +
24989 + /*
24990 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
24991 + * to OP_PCLID_DKP_{MD5, SHA*}
24992 + */
24993 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
24994 + (0x20 << OP_ALG_ALGSEL_SHIFT);
24995 +
24996 + if (adata->key_inline) {
24997 + int words;
24998 +
24999 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
25000 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
25001 + adata->keylen);
25002 + append_data(desc, adata->key_virt, adata->keylen);
25003 +
25004 + /* Reserve space in descriptor buffer for the derived key */
25005 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
25006 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
25007 + if (words)
25008 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
25009 + } else {
25010 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
25011 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
25012 + adata->keylen);
25013 + append_ptr(desc, adata->key_dma);
25014 + }
25015 +}
25016 +
25017 +#endif /* DESC_CONSTR_H */
25018 --- /dev/null
25019 +++ b/drivers/crypto/caam/dpseci.c
25020 @@ -0,0 +1,858 @@
25021 +/*
25022 + * Copyright 2013-2016 Freescale Semiconductor Inc.
25023 + * Copyright 2017 NXP
25024 + *
25025 + * Redistribution and use in source and binary forms, with or without
25026 + * modification, are permitted provided that the following conditions are met:
25027 + * * Redistributions of source code must retain the above copyright
25028 + * notice, this list of conditions and the following disclaimer.
25029 + * * Redistributions in binary form must reproduce the above copyright
25030 + * notice, this list of conditions and the following disclaimer in the
25031 + * documentation and/or other materials provided with the distribution.
25032 + * * Neither the names of the above-listed copyright holders nor the
25033 + * names of any contributors may be used to endorse or promote products
25034 + * derived from this software without specific prior written permission.
25035 + *
25036 + *
25037 + * ALTERNATIVELY, this software may be distributed under the terms of the
25038 + * GNU General Public License ("GPL") as published by the Free Software
25039 + * Foundation, either version 2 of that License or (at your option) any
25040 + * later version.
25041 + *
25042 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25043 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25044 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25045 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25046 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25047 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25048 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25049 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25050 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25051 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25052 + * POSSIBILITY OF SUCH DAMAGE.
25053 + */
25054 +
25055 +#include <linux/fsl/mc.h>
25056 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
25057 +#include "dpseci.h"
25058 +#include "dpseci_cmd.h"
25059 +
25060 +/**
25061 + * dpseci_open() - Open a control session for the specified object
25062 + * @mc_io: Pointer to MC portal's I/O object
25063 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25064 + * @dpseci_id: DPSECI unique ID
25065 + * @token: Returned token; use in subsequent API calls
25066 + *
25067 + * This function can be used to open a control session for an already created
25068 + * object; an object may have been declared in the DPL or by calling the
25069 + * dpseci_create() function.
25070 + * This function returns a unique authentication token, associated with the
25071 + * specific object ID and the specific MC portal; this token must be used in all
25072 + * subsequent commands for this specific object.
25073 + *
25074 + * Return: '0' on success, error code otherwise
25075 + */
25076 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
25077 + u16 *token)
25078 +{
25079 + struct fsl_mc_command cmd = { 0 };
25080 + struct dpseci_cmd_open *cmd_params;
25081 + int err;
25082 +
25083 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
25084 + cmd_flags,
25085 + 0);
25086 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
25087 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
25088 + err = mc_send_command(mc_io, &cmd);
25089 + if (err)
25090 + return err;
25091 +
25092 + *token = mc_cmd_hdr_read_token(&cmd);
25093 +
25094 + return 0;
25095 +}
25096 +
25097 +/**
25098 + * dpseci_close() - Close the control session of the object
25099 + * @mc_io: Pointer to MC portal's I/O object
25100 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25101 + * @token: Token of DPSECI object
25102 + *
25103 + * After this function is called, no further operations are allowed on the
25104 + * object without opening a new control session.
25105 + *
25106 + * Return: '0' on success, error code otherwise
25107 + */
25108 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
25109 +{
25110 + struct fsl_mc_command cmd = { 0 };
25111 +
25112 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
25113 + cmd_flags,
25114 + token);
25115 + return mc_send_command(mc_io, &cmd);
25116 +}
25117 +
25118 +/**
25119 + * dpseci_create() - Create the DPSECI object
25120 + * @mc_io: Pointer to MC portal's I/O object
25121 + * @dprc_token: Parent container token; '0' for default container
25122 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25123 + * @cfg: Configuration structure
25124 + * @obj_id: returned object id
25125 + *
25126 + * Create the DPSECI object, allocate required resources and perform required
25127 + * initialization.
25128 + *
25129 + * The object can be created either by declaring it in the DPL file, or by
25130 + * calling this function.
25131 + *
25132 + * The function accepts an authentication token of a parent container that this
25133 + * object should be assigned to. The token can be '0' so the object will be
25134 + * assigned to the default container.
25135 + * The newly created object can be opened with the returned object id and using
25136 + * the container's associated tokens and MC portals.
25137 + *
25138 + * Return: '0' on success, error code otherwise
25139 + */
25140 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25141 + const struct dpseci_cfg *cfg, u32 *obj_id)
25142 +{
25143 + struct fsl_mc_command cmd = { 0 };
25144 + struct dpseci_cmd_create *cmd_params;
25145 + int i, err;
25146 +
25147 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
25148 + cmd_flags,
25149 + dprc_token);
25150 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
25151 + for (i = 0; i < 8; i++)
25152 + cmd_params->priorities[i] = cfg->priorities[i];
25153 + cmd_params->num_tx_queues = cfg->num_tx_queues;
25154 + cmd_params->num_rx_queues = cfg->num_rx_queues;
25155 + cmd_params->options = cpu_to_le32(cfg->options);
25156 + err = mc_send_command(mc_io, &cmd);
25157 + if (err)
25158 + return err;
25159 +
25160 + *obj_id = mc_cmd_read_object_id(&cmd);
25161 +
25162 + return 0;
25163 +}
25164 +
25165 +/**
25166 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
25167 + * @mc_io: Pointer to MC portal's I/O object
25168 + * @dprc_token: Parent container token; '0' for default container
25169 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25170 + * @object_id: The object id; it must be a valid id within the container that
25171 + * created this object
25172 + *
25173 + * The function accepts the authentication token of the parent container that
25174 + * created the object (not the one that currently owns the object). The object
25175 + * is searched within parent using the provided 'object_id'.
25176 + * All tokens to the object must be closed before calling destroy.
25177 + *
25178 + * Return: '0' on success, error code otherwise
25179 + */
25180 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25181 + u32 object_id)
25182 +{
25183 + struct fsl_mc_command cmd = { 0 };
25184 + struct dpseci_cmd_destroy *cmd_params;
25185 +
25186 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
25187 + cmd_flags,
25188 + dprc_token);
25189 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
25190 + cmd_params->object_id = cpu_to_le32(object_id);
25191 +
25192 + return mc_send_command(mc_io, &cmd);
25193 +}
25194 +
25195 +/**
25196 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
25197 + * @mc_io: Pointer to MC portal's I/O object
25198 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25199 + * @token: Token of DPSECI object
25200 + *
25201 + * Return: '0' on success, error code otherwise
25202 + */
25203 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
25204 +{
25205 + struct fsl_mc_command cmd = { 0 };
25206 +
25207 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
25208 + cmd_flags,
25209 + token);
25210 + return mc_send_command(mc_io, &cmd);
25211 +}
25212 +
25213 +/**
25214 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
25215 + * @mc_io: Pointer to MC portal's I/O object
25216 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25217 + * @token: Token of DPSECI object
25218 + *
25219 + * Return: '0' on success, error code otherwise
25220 + */
25221 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
25222 +{
25223 + struct fsl_mc_command cmd = { 0 };
25224 +
25225 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
25226 + cmd_flags,
25227 + token);
25228 +
25229 + return mc_send_command(mc_io, &cmd);
25230 +}
25231 +
25232 +/**
25233 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
25234 + * @mc_io: Pointer to MC portal's I/O object
25235 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25236 + * @token: Token of DPSECI object
25237 + * @en: Returns '1' if object is enabled; '0' otherwise
25238 + *
25239 + * Return: '0' on success, error code otherwise
25240 + */
25241 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25242 + int *en)
25243 +{
25244 + struct fsl_mc_command cmd = { 0 };
25245 + struct dpseci_rsp_is_enabled *rsp_params;
25246 + int err;
25247 +
25248 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
25249 + cmd_flags,
25250 + token);
25251 + err = mc_send_command(mc_io, &cmd);
25252 + if (err)
25253 + return err;
25254 +
25255 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
25256 + *en = le32_to_cpu(rsp_params->is_enabled);
25257 +
25258 + return 0;
25259 +}
25260 +
25261 +/**
25262 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
25263 + * @mc_io: Pointer to MC portal's I/O object
25264 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25265 + * @token: Token of DPSECI object
25266 + *
25267 + * Return: '0' on success, error code otherwise
25268 + */
25269 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
25270 +{
25271 + struct fsl_mc_command cmd = { 0 };
25272 +
25273 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
25274 + cmd_flags,
25275 + token);
25276 +
25277 + return mc_send_command(mc_io, &cmd);
25278 +}
25279 +
25280 +/**
25281 + * dpseci_get_irq_enable() - Get overall interrupt state
25282 + * @mc_io: Pointer to MC portal's I/O object
25283 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25284 + * @token: Token of DPSECI object
25285 + * @irq_index: The interrupt index to configure
25286 + * @en: Returned Interrupt state - enable = 1, disable = 0
25287 + *
25288 + * Return: '0' on success, error code otherwise
25289 + */
25290 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25291 + u8 irq_index, u8 *en)
25292 +{
25293 + struct fsl_mc_command cmd = { 0 };
25294 + struct dpseci_cmd_irq_enable *cmd_params;
25295 + struct dpseci_rsp_get_irq_enable *rsp_params;
25296 + int err;
25297 +
25298 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
25299 + cmd_flags,
25300 + token);
25301 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
25302 + cmd_params->irq_index = irq_index;
25303 + err = mc_send_command(mc_io, &cmd);
25304 + if (err)
25305 + return err;
25306 +
25307 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
25308 + *en = rsp_params->enable_state;
25309 +
25310 + return 0;
25311 +}
25312 +
25313 +/**
25314 + * dpseci_set_irq_enable() - Set overall interrupt state.
25315 + * @mc_io: Pointer to MC portal's I/O object
25316 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25317 + * @token: Token of DPSECI object
25318 + * @irq_index: The interrupt index to configure
25319 + * @en: Interrupt state - enable = 1, disable = 0
25320 + *
25321 + * Allows GPP software to control when interrupts are generated.
25322 + * Each interrupt can have up to 32 causes. The enable/disable control's the
25323 + * overall interrupt state. If the interrupt is disabled no causes will cause
25324 + * an interrupt.
25325 + *
25326 + * Return: '0' on success, error code otherwise
25327 + */
25328 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25329 + u8 irq_index, u8 en)
25330 +{
25331 + struct fsl_mc_command cmd = { 0 };
25332 + struct dpseci_cmd_irq_enable *cmd_params;
25333 +
25334 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
25335 + cmd_flags,
25336 + token);
25337 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
25338 + cmd_params->irq_index = irq_index;
25339 + cmd_params->enable_state = en;
25340 +
25341 + return mc_send_command(mc_io, &cmd);
25342 +}
25343 +
25344 +/**
25345 + * dpseci_get_irq_mask() - Get interrupt mask.
25346 + * @mc_io: Pointer to MC portal's I/O object
25347 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25348 + * @token: Token of DPSECI object
25349 + * @irq_index: The interrupt index to configure
25350 + * @mask: Returned event mask to trigger interrupt
25351 + *
25352 + * Every interrupt can have up to 32 causes and the interrupt model supports
25353 + * masking/unmasking each cause independently.
25354 + *
25355 + * Return: '0' on success, error code otherwise
25356 + */
25357 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25358 + u8 irq_index, u32 *mask)
25359 +{
25360 + struct fsl_mc_command cmd = { 0 };
25361 + struct dpseci_cmd_irq_mask *cmd_params;
25362 + int err;
25363 +
25364 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
25365 + cmd_flags,
25366 + token);
25367 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
25368 + cmd_params->irq_index = irq_index;
25369 + err = mc_send_command(mc_io, &cmd);
25370 + if (err)
25371 + return err;
25372 +
25373 + *mask = le32_to_cpu(cmd_params->mask);
25374 +
25375 + return 0;
25376 +}
25377 +
25378 +/**
25379 + * dpseci_set_irq_mask() - Set interrupt mask.
25380 + * @mc_io: Pointer to MC portal's I/O object
25381 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25382 + * @token: Token of DPSECI object
25383 + * @irq_index: The interrupt index to configure
25384 + * @mask: event mask to trigger interrupt;
25385 + * each bit:
25386 + * 0 = ignore event
25387 + * 1 = consider event for asserting IRQ
25388 + *
25389 + * Every interrupt can have up to 32 causes and the interrupt model supports
25390 + * masking/unmasking each cause independently
25391 + *
25392 + * Return: '0' on success, error code otherwise
25393 + */
25394 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25395 + u8 irq_index, u32 mask)
25396 +{
25397 + struct fsl_mc_command cmd = { 0 };
25398 + struct dpseci_cmd_irq_mask *cmd_params;
25399 +
25400 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
25401 + cmd_flags,
25402 + token);
25403 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
25404 + cmd_params->mask = cpu_to_le32(mask);
25405 + cmd_params->irq_index = irq_index;
25406 +
25407 + return mc_send_command(mc_io, &cmd);
25408 +}
25409 +
25410 +/**
25411 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
25412 + * @mc_io: Pointer to MC portal's I/O object
25413 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25414 + * @token: Token of DPSECI object
25415 + * @irq_index: The interrupt index to configure
25416 + * @status: Returned interrupts status - one bit per cause:
25417 + * 0 = no interrupt pending
25418 + * 1 = interrupt pending
25419 + *
25420 + * Return: '0' on success, error code otherwise
25421 + */
25422 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25423 + u8 irq_index, u32 *status)
25424 +{
25425 + struct fsl_mc_command cmd = { 0 };
25426 + struct dpseci_cmd_irq_status *cmd_params;
25427 + int err;
25428 +
25429 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
25430 + cmd_flags,
25431 + token);
25432 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
25433 + cmd_params->status = cpu_to_le32(*status);
25434 + cmd_params->irq_index = irq_index;
25435 + err = mc_send_command(mc_io, &cmd);
25436 + if (err)
25437 + return err;
25438 +
25439 + *status = le32_to_cpu(cmd_params->status);
25440 +
25441 + return 0;
25442 +}
25443 +
25444 +/**
25445 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
25446 + * @mc_io: Pointer to MC portal's I/O object
25447 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25448 + * @token: Token of DPSECI object
25449 + * @irq_index: The interrupt index to configure
25450 + * @status: bits to clear (W1C) - one bit per cause:
25451 + * 0 = don't change
25452 + * 1 = clear status bit
25453 + *
25454 + * Return: '0' on success, error code otherwise
25455 + */
25456 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25457 + u8 irq_index, u32 status)
25458 +{
25459 + struct fsl_mc_command cmd = { 0 };
25460 + struct dpseci_cmd_irq_status *cmd_params;
25461 +
25462 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
25463 + cmd_flags,
25464 + token);
25465 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
25466 + cmd_params->status = cpu_to_le32(status);
25467 + cmd_params->irq_index = irq_index;
25468 +
25469 + return mc_send_command(mc_io, &cmd);
25470 +}
25471 +
25472 +/**
25473 + * dpseci_get_attributes() - Retrieve DPSECI attributes
25474 + * @mc_io: Pointer to MC portal's I/O object
25475 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25476 + * @token: Token of DPSECI object
25477 + * @attr: Returned object's attributes
25478 + *
25479 + * Return: '0' on success, error code otherwise
25480 + */
25481 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25482 + struct dpseci_attr *attr)
25483 +{
25484 + struct fsl_mc_command cmd = { 0 };
25485 + struct dpseci_rsp_get_attributes *rsp_params;
25486 + int err;
25487 +
25488 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
25489 + cmd_flags,
25490 + token);
25491 + err = mc_send_command(mc_io, &cmd);
25492 + if (err)
25493 + return err;
25494 +
25495 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
25496 + attr->id = le32_to_cpu(rsp_params->id);
25497 + attr->num_tx_queues = rsp_params->num_tx_queues;
25498 + attr->num_rx_queues = rsp_params->num_rx_queues;
25499 + attr->options = le32_to_cpu(rsp_params->options);
25500 +
25501 + return 0;
25502 +}
25503 +
25504 +/**
25505 + * dpseci_set_rx_queue() - Set Rx queue configuration
25506 + * @mc_io: Pointer to MC portal's I/O object
25507 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25508 + * @token: Token of DPSECI object
25509 + * @queue: Select the queue relative to number of priorities configured at
25510 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
25511 + * Rx queues identically.
25512 + * @cfg: Rx queue configuration
25513 + *
25514 + * Return: '0' on success, error code otherwise
25515 + */
25516 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25517 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
25518 +{
25519 + struct fsl_mc_command cmd = { 0 };
25520 + struct dpseci_cmd_queue *cmd_params;
25521 +
25522 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
25523 + cmd_flags,
25524 + token);
25525 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25526 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
25527 + cmd_params->priority = cfg->dest_cfg.priority;
25528 + cmd_params->queue = queue;
25529 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
25530 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
25531 + cmd_params->options = cpu_to_le32(cfg->options);
25532 + cmd_params->order_preservation_en =
25533 + cpu_to_le32(cfg->order_preservation_en);
25534 +
25535 + return mc_send_command(mc_io, &cmd);
25536 +}
25537 +
25538 +/**
25539 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
25540 + * @mc_io: Pointer to MC portal's I/O object
25541 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25542 + * @token: Token of DPSECI object
25543 + * @queue: Select the queue relative to number of priorities configured at
25544 + * DPSECI creation
25545 + * @attr: Returned Rx queue attributes
25546 + *
25547 + * Return: '0' on success, error code otherwise
25548 + */
25549 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25550 + u8 queue, struct dpseci_rx_queue_attr *attr)
25551 +{
25552 + struct fsl_mc_command cmd = { 0 };
25553 + struct dpseci_cmd_queue *cmd_params;
25554 + int err;
25555 +
25556 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
25557 + cmd_flags,
25558 + token);
25559 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25560 + cmd_params->queue = queue;
25561 + err = mc_send_command(mc_io, &cmd);
25562 + if (err)
25563 + return err;
25564 +
25565 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
25566 + attr->dest_cfg.priority = cmd_params->priority;
25567 + attr->dest_cfg.dest_type = cmd_params->dest_type;
25568 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
25569 + attr->fqid = le32_to_cpu(cmd_params->fqid);
25570 + attr->order_preservation_en =
25571 + le32_to_cpu(cmd_params->order_preservation_en);
25572 +
25573 + return 0;
25574 +}
25575 +
25576 +/**
25577 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
25578 + * @mc_io: Pointer to MC portal's I/O object
25579 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25580 + * @token: Token of DPSECI object
25581 + * @queue: Select the queue relative to number of priorities configured at
25582 + * DPSECI creation
25583 + * @attr: Returned Tx queue attributes
25584 + *
25585 + * Return: '0' on success, error code otherwise
25586 + */
25587 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25588 + u8 queue, struct dpseci_tx_queue_attr *attr)
25589 +{
25590 + struct fsl_mc_command cmd = { 0 };
25591 + struct dpseci_cmd_queue *cmd_params;
25592 + struct dpseci_rsp_get_tx_queue *rsp_params;
25593 + int err;
25594 +
25595 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
25596 + cmd_flags,
25597 + token);
25598 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25599 + cmd_params->queue = queue;
25600 + err = mc_send_command(mc_io, &cmd);
25601 + if (err)
25602 + return err;
25603 +
25604 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
25605 + attr->fqid = le32_to_cpu(rsp_params->fqid);
25606 + attr->priority = rsp_params->priority;
25607 +
25608 + return 0;
25609 +}
25610 +
25611 +/**
25612 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
25613 + * @mc_io: Pointer to MC portal's I/O object
25614 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25615 + * @token: Token of DPSECI object
25616 + * @attr: Returned SEC attributes
25617 + *
25618 + * Return: '0' on success, error code otherwise
25619 + */
25620 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25621 + struct dpseci_sec_attr *attr)
25622 +{
25623 + struct fsl_mc_command cmd = { 0 };
25624 + struct dpseci_rsp_get_sec_attr *rsp_params;
25625 + int err;
25626 +
25627 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
25628 + cmd_flags,
25629 + token);
25630 + err = mc_send_command(mc_io, &cmd);
25631 + if (err)
25632 + return err;
25633 +
25634 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
25635 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
25636 + attr->major_rev = rsp_params->major_rev;
25637 + attr->minor_rev = rsp_params->minor_rev;
25638 + attr->era = rsp_params->era;
25639 + attr->deco_num = rsp_params->deco_num;
25640 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
25641 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
25642 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
25643 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
25644 + attr->crc_acc_num = rsp_params->crc_acc_num;
25645 + attr->pk_acc_num = rsp_params->pk_acc_num;
25646 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
25647 + attr->rng_acc_num = rsp_params->rng_acc_num;
25648 + attr->md_acc_num = rsp_params->md_acc_num;
25649 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
25650 + attr->des_acc_num = rsp_params->des_acc_num;
25651 + attr->aes_acc_num = rsp_params->aes_acc_num;
25652 +
25653 + return 0;
25654 +}
25655 +
25656 +/**
25657 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
25658 + * @mc_io: Pointer to MC portal's I/O object
25659 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25660 + * @token: Token of DPSECI object
25661 + * @counters: Returned SEC counters
25662 + *
25663 + * Return: '0' on success, error code otherwise
25664 + */
25665 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25666 + struct dpseci_sec_counters *counters)
25667 +{
25668 + struct fsl_mc_command cmd = { 0 };
25669 + struct dpseci_rsp_get_sec_counters *rsp_params;
25670 + int err;
25671 +
25672 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
25673 + cmd_flags,
25674 + token);
25675 + err = mc_send_command(mc_io, &cmd);
25676 + if (err)
25677 + return err;
25678 +
25679 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
25680 + counters->dequeued_requests =
25681 + le64_to_cpu(rsp_params->dequeued_requests);
25682 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
25683 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
25684 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
25685 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
25686 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
25687 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
25688 +
25689 + return 0;
25690 +}
25691 +
25692 +/**
25693 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
25694 + * @mc_io: Pointer to MC portal's I/O object
25695 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25696 + * @major_ver: Major version of data path sec API
25697 + * @minor_ver: Minor version of data path sec API
25698 + *
25699 + * Return: '0' on success, error code otherwise
25700 + */
25701 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
25702 + u16 *major_ver, u16 *minor_ver)
25703 +{
25704 + struct fsl_mc_command cmd = { 0 };
25705 + struct dpseci_rsp_get_api_version *rsp_params;
25706 + int err;
25707 +
25708 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
25709 + cmd_flags, 0);
25710 + err = mc_send_command(mc_io, &cmd);
25711 + if (err)
25712 + return err;
25713 +
25714 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
25715 + *major_ver = le16_to_cpu(rsp_params->major);
25716 + *minor_ver = le16_to_cpu(rsp_params->minor);
25717 +
25718 + return 0;
25719 +}
25720 +
25721 +/**
25722 + * dpseci_set_opr() - Set Order Restoration configuration
25723 + * @mc_io: Pointer to MC portal's I/O object
25724 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25725 + * @token: Token of DPSECI object
25726 + * @index: The queue index
25727 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
25728 + * OPR_OPT_RETIRE
25729 + * @cfg: Configuration options for the OPR
25730 + *
25731 + * Return: '0' on success, error code otherwise
25732 + */
25733 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25734 + u8 options, struct opr_cfg *cfg)
25735 +{
25736 + struct fsl_mc_command cmd = { 0 };
25737 + struct dpseci_cmd_opr *cmd_params;
25738 +
25739 + cmd.header = mc_encode_cmd_header(
25740 + DPSECI_CMDID_SET_OPR,
25741 + cmd_flags,
25742 + token);
25743 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
25744 + cmd_params->index = index;
25745 + cmd_params->options = options;
25746 + cmd_params->oloe = cfg->oloe;
25747 + cmd_params->oeane = cfg->oeane;
25748 + cmd_params->olws = cfg->olws;
25749 + cmd_params->oa = cfg->oa;
25750 + cmd_params->oprrws = cfg->oprrws;
25751 +
25752 + return mc_send_command(mc_io, &cmd);
25753 +}
25754 +
25755 +/**
25756 + * dpseci_get_opr() - Retrieve Order Restoration config and query
25757 + * @mc_io: Pointer to MC portal's I/O object
25758 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25759 + * @token: Token of DPSECI object
25760 + * @index: The queue index
25761 + * @cfg: Returned OPR configuration
25762 + * @qry: Returned OPR query
25763 + *
25764 + * Return: '0' on success, error code otherwise
25765 + */
25766 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25767 + struct opr_cfg *cfg, struct opr_qry *qry)
25768 +{
25769 + struct fsl_mc_command cmd = { 0 };
25770 + struct dpseci_cmd_opr *cmd_params;
25771 + struct dpseci_rsp_get_opr *rsp_params;
25772 + int err;
25773 +
25774 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
25775 + cmd_flags,
25776 + token);
25777 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
25778 + cmd_params->index = index;
25779 + err = mc_send_command(mc_io, &cmd);
25780 + if (err)
25781 + return err;
25782 +
25783 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
25784 + qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
25785 + qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
25786 + cfg->oloe = rsp_params->oloe;
25787 + cfg->oeane = rsp_params->oeane;
25788 + cfg->olws = rsp_params->olws;
25789 + cfg->oa = rsp_params->oa;
25790 + cfg->oprrws = rsp_params->oprrws;
25791 + qry->nesn = le16_to_cpu(rsp_params->nesn);
25792 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
25793 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
25794 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
25795 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
25796 + qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
25797 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
25798 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
25799 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
25800 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
25801 +
25802 + return 0;
25803 +}
25804 +
25805 +/**
25806 + * dpseci_set_congestion_notification() - Set congestion group
25807 + * notification configuration
25808 + * @mc_io: Pointer to MC portal's I/O object
25809 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25810 + * @token: Token of DPSECI object
25811 + * @cfg: congestion notification configuration
25812 + *
25813 + * Return: '0' on success, error code otherwise
25814 + */
25815 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25816 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
25817 +{
25818 + struct fsl_mc_command cmd = { 0 };
25819 + struct dpseci_cmd_congestion_notification *cmd_params;
25820 +
25821 + cmd.header = mc_encode_cmd_header(
25822 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
25823 + cmd_flags,
25824 + token);
25825 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
25826 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
25827 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
25828 + cmd_params->priority = cfg->dest_cfg.priority;
25829 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
25830 + cfg->dest_cfg.dest_type);
25831 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
25832 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
25833 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
25834 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
25835 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
25836 +
25837 + return mc_send_command(mc_io, &cmd);
25838 +}
25839 +
25840 +/**
25841 + * dpseci_get_congestion_notification() - Get congestion group notification
25842 + * configuration
25843 + * @mc_io: Pointer to MC portal's I/O object
25844 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25845 + * @token: Token of DPSECI object
25846 + * @cfg: congestion notification configuration
25847 + *
25848 + * Return: '0' on success, error code otherwise
25849 + */
25850 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25851 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
25852 +{
25853 + struct fsl_mc_command cmd = { 0 };
25854 + struct dpseci_cmd_congestion_notification *rsp_params;
25855 + int err;
25856 +
25857 + cmd.header = mc_encode_cmd_header(
25858 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
25859 + cmd_flags,
25860 + token);
25861 + err = mc_send_command(mc_io, &cmd);
25862 + if (err)
25863 + return err;
25864 +
25865 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
25866 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
25867 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
25868 + cfg->dest_cfg.priority = rsp_params->priority;
25869 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
25870 + CGN_DEST_TYPE);
25871 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
25872 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
25873 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
25874 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
25875 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
25876 +
25877 + return 0;
25878 +}
25879 --- /dev/null
25880 +++ b/drivers/crypto/caam/dpseci.h
25881 @@ -0,0 +1,395 @@
25882 +/*
25883 + * Copyright 2013-2016 Freescale Semiconductor Inc.
25884 + * Copyright 2017 NXP
25885 + *
25886 + * Redistribution and use in source and binary forms, with or without
25887 + * modification, are permitted provided that the following conditions are met:
25888 + * * Redistributions of source code must retain the above copyright
25889 + * notice, this list of conditions and the following disclaimer.
25890 + * * Redistributions in binary form must reproduce the above copyright
25891 + * notice, this list of conditions and the following disclaimer in the
25892 + * documentation and/or other materials provided with the distribution.
25893 + * * Neither the names of the above-listed copyright holders nor the
25894 + * names of any contributors may be used to endorse or promote products
25895 + * derived from this software without specific prior written permission.
25896 + *
25897 + *
25898 + * ALTERNATIVELY, this software may be distributed under the terms of the
25899 + * GNU General Public License ("GPL") as published by the Free Software
25900 + * Foundation, either version 2 of that License or (at your option) any
25901 + * later version.
25902 + *
25903 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25904 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25905 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25906 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25907 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25908 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25909 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25910 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25911 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25912 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25913 + * POSSIBILITY OF SUCH DAMAGE.
25914 + */
25915 +#ifndef _DPSECI_H_
25916 +#define _DPSECI_H_
25917 +
25918 +/*
25919 + * Data Path SEC Interface API
25920 + * Contains initialization APIs and runtime control APIs for DPSECI
25921 + */
25922 +
25923 +struct fsl_mc_io;
25924 +struct opr_cfg;
25925 +struct opr_qry;
25926 +
25927 +/**
25928 + * General DPSECI macros
25929 + */
25930 +
25931 +/**
25932 + * Maximum number of Tx/Rx priorities per DPSECI object
25933 + */
25934 +#define DPSECI_PRIO_NUM 8
25935 +
25936 +/**
25937 + * All queues considered; see dpseci_set_rx_queue()
25938 + */
25939 +#define DPSECI_ALL_QUEUES (u8)(-1)
25940 +
25941 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
25942 + u16 *token);
25943 +
25944 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25945 +
25946 +/**
25947 + * Enable the Congestion Group support
25948 + */
25949 +#define DPSECI_OPT_HAS_CG 0x000020
25950 +
25951 +/**
25952 + * Enable the Order Restoration support
25953 + */
25954 +#define DPSECI_OPT_HAS_OPR 0x000040
25955 +
25956 +/**
25957 + * Order Point Records are shared for the entire DPSECI
25958 + */
25959 +#define DPSECI_OPT_OPR_SHARED 0x000080
25960 +
25961 +/**
25962 + * struct dpseci_cfg - Structure representing DPSECI configuration
25963 + * @options: Any combination of the following options:
25964 + * DPSECI_OPT_HAS_CG
25965 + * DPSECI_OPT_HAS_OPR
25966 + * DPSECI_OPT_OPR_SHARED
25967 + * @num_tx_queues: num of queues towards the SEC
25968 + * @num_rx_queues: num of queues back from the SEC
25969 + * @priorities: Priorities for the SEC hardware processing;
25970 + * each place in the array is the priority of the tx queue
25971 + * towards the SEC;
25972 + * valid priorities are configured with values 1-8;
25973 + */
25974 +struct dpseci_cfg {
25975 + u32 options;
25976 + u8 num_tx_queues;
25977 + u8 num_rx_queues;
25978 + u8 priorities[DPSECI_PRIO_NUM];
25979 +};
25980 +
25981 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25982 + const struct dpseci_cfg *cfg, u32 *obj_id);
25983 +
25984 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25985 + u32 object_id);
25986 +
25987 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25988 +
25989 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25990 +
25991 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25992 + int *en);
25993 +
25994 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25995 +
25996 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25997 + u8 irq_index, u8 *en);
25998 +
25999 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26000 + u8 irq_index, u8 en);
26001 +
26002 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26003 + u8 irq_index, u32 *mask);
26004 +
26005 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26006 + u8 irq_index, u32 mask);
26007 +
26008 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26009 + u8 irq_index, u32 *status);
26010 +
26011 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26012 + u8 irq_index, u32 status);
26013 +
26014 +/**
26015 + * struct dpseci_attr - Structure representing DPSECI attributes
26016 + * @id: DPSECI object ID
26017 + * @num_tx_queues: number of queues towards the SEC
26018 + * @num_rx_queues: number of queues back from the SEC
26019 + * @options: any combination of the following options:
26020 + * DPSECI_OPT_HAS_CG
26021 + * DPSECI_OPT_HAS_OPR
26022 + * DPSECI_OPT_OPR_SHARED
26023 + */
26024 +struct dpseci_attr {
26025 + int id;
26026 + u8 num_tx_queues;
26027 + u8 num_rx_queues;
26028 + u32 options;
26029 +};
26030 +
26031 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26032 + struct dpseci_attr *attr);
26033 +
26034 +/**
26035 + * enum dpseci_dest - DPSECI destination types
26036 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
26037 + * and does not generate FQDAN notifications; user is expected to dequeue
26038 + * from the queue based on polling or other user-defined method
26039 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
26040 + * notifications to the specified DPIO; user is expected to dequeue from
26041 + * the queue only after notification is received
26042 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
26043 + * FQDAN notifications, but is connected to the specified DPCON object;
26044 + * user is expected to dequeue from the DPCON channel
26045 + */
26046 +enum dpseci_dest {
26047 + DPSECI_DEST_NONE = 0,
26048 + DPSECI_DEST_DPIO,
26049 + DPSECI_DEST_DPCON
26050 +};
26051 +
26052 +/**
26053 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
26054 + * @dest_type: Destination type
26055 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
26056 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
26057 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
26058 + * not relevant for 'DPSECI_DEST_NONE' option
26059 + */
26060 +struct dpseci_dest_cfg {
26061 + enum dpseci_dest dest_type;
26062 + int dest_id;
26063 + u8 priority;
26064 +};
26065 +
26066 +/**
26067 + * DPSECI queue modification options
26068 + */
26069 +
26070 +/**
26071 + * Select to modify the user's context associated with the queue
26072 + */
26073 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
26074 +
26075 +/**
26076 + * Select to modify the queue's destination
26077 + */
26078 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
26079 +
26080 +/**
26081 + * Select to modify the queue's order preservation
26082 + */
26083 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
26084 +
26085 +/**
26086 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
26087 + * @options: Flags representing the suggested modifications to the queue;
26088 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
26089 + * @order_preservation_en: order preservation configuration for the rx queue
26090 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
26091 + * @user_ctx: User context value provided in the frame descriptor of each
26092 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
26093 + * in 'options'
26094 + * @dest_cfg: Queue destination parameters; valid only if
26095 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
26096 + */
26097 +struct dpseci_rx_queue_cfg {
26098 + u32 options;
26099 + int order_preservation_en;
26100 + u64 user_ctx;
26101 + struct dpseci_dest_cfg dest_cfg;
26102 +};
26103 +
26104 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26105 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
26106 +
26107 +/**
26108 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
26109 + * @user_ctx: User context value provided in the frame descriptor of each
26110 + * dequeued frame
26111 + * @order_preservation_en: Status of the order preservation configuration on the
26112 + * queue
26113 + * @dest_cfg: Queue destination configuration
26114 + * @fqid: Virtual FQID value to be used for dequeue operations
26115 + */
26116 +struct dpseci_rx_queue_attr {
26117 + u64 user_ctx;
26118 + int order_preservation_en;
26119 + struct dpseci_dest_cfg dest_cfg;
26120 + u32 fqid;
26121 +};
26122 +
26123 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26124 + u8 queue, struct dpseci_rx_queue_attr *attr);
26125 +
26126 +/**
26127 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
26128 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
26129 + * @priority: SEC hardware processing priority for the queue
26130 + */
26131 +struct dpseci_tx_queue_attr {
26132 + u32 fqid;
26133 + u8 priority;
26134 +};
26135 +
26136 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26137 + u8 queue, struct dpseci_tx_queue_attr *attr);
26138 +
26139 +/**
26140 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
26141 + * hardware accelerator
26142 + * @ip_id: ID for SEC
26143 + * @major_rev: Major revision number for SEC
26144 + * @minor_rev: Minor revision number for SEC
26145 + * @era: SEC Era
26146 + * @deco_num: The number of copies of the DECO that are implemented in this
26147 + * version of SEC
26148 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
26149 + * version of SEC
26150 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
26151 + * version of SEC
26152 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
26153 + * implemented in this version of SEC
26154 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
26155 + * implemented in this version of SEC
26156 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
26157 + * this version of SEC
26158 + * @pk_acc_num: The number of copies of the Public Key module that are
26159 + * implemented in this version of SEC
26160 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
26161 + * implemented in this version of SEC
26162 + * @rng_acc_num: The number of copies of the Random Number Generator that are
26163 + * implemented in this version of SEC
26164 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
26165 + * implemented in this version of SEC
26166 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
26167 + * in this version of SEC
26168 + * @des_acc_num: The number of copies of the DES module that are implemented in
26169 + * this version of SEC
26170 + * @aes_acc_num: The number of copies of the AES module that are implemented in
26171 + * this version of SEC
26172 + **/
26173 +struct dpseci_sec_attr {
26174 + u16 ip_id;
26175 + u8 major_rev;
26176 + u8 minor_rev;
26177 + u8 era;
26178 + u8 deco_num;
26179 + u8 zuc_auth_acc_num;
26180 + u8 zuc_enc_acc_num;
26181 + u8 snow_f8_acc_num;
26182 + u8 snow_f9_acc_num;
26183 + u8 crc_acc_num;
26184 + u8 pk_acc_num;
26185 + u8 kasumi_acc_num;
26186 + u8 rng_acc_num;
26187 + u8 md_acc_num;
26188 + u8 arc4_acc_num;
26189 + u8 des_acc_num;
26190 + u8 aes_acc_num;
26191 +};
26192 +
26193 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26194 + struct dpseci_sec_attr *attr);
26195 +
26196 +/**
26197 + * struct dpseci_sec_counters - Structure representing global SEC counters and
26198 + * not per dpseci counters
26199 + * @dequeued_requests: Number of Requests Dequeued
26200 + * @ob_enc_requests: Number of Outbound Encrypt Requests
26201 + * @ib_dec_requests: Number of Inbound Decrypt Requests
26202 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
26203 + * @ob_prot_bytes: Number of Outbound Bytes Protected
26204 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
26205 + * @ib_valid_bytes: Number of Inbound Bytes Validated
26206 + */
26207 +struct dpseci_sec_counters {
26208 + u64 dequeued_requests;
26209 + u64 ob_enc_requests;
26210 + u64 ib_dec_requests;
26211 + u64 ob_enc_bytes;
26212 + u64 ob_prot_bytes;
26213 + u64 ib_dec_bytes;
26214 + u64 ib_valid_bytes;
26215 +};
26216 +
26217 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
26218 + struct dpseci_sec_counters *counters);
26219 +
26220 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
26221 + u16 *major_ver, u16 *minor_ver);
26222 +
26223 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
26224 + u8 options, struct opr_cfg *cfg);
26225 +
26226 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
26227 + struct opr_cfg *cfg, struct opr_qry *qry);
26228 +
26229 +/**
26230 + * enum dpseci_congestion_unit - DPSECI congestion units
26231 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
26232 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
26233 + */
26234 +enum dpseci_congestion_unit {
26235 + DPSECI_CONGESTION_UNIT_BYTES = 0,
26236 + DPSECI_CONGESTION_UNIT_FRAMES
26237 +};
26238 +
26239 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
26240 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
26241 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
26242 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
26243 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
26244 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
26245 +
26246 +/**
26247 + * struct dpseci_congestion_notification_cfg - congestion notification
26248 + * configuration
26249 + * @units: units type
26250 + * @threshold_entry: above this threshold we enter a congestion state.
26251 + * set it to '0' to disable it
26252 + * @threshold_exit: below this threshold we exit the congestion state.
26253 + * @message_ctx: The context that will be part of the CSCN message
26254 + * @message_iova: I/O virtual address (must be in DMA-able memory),
26255 + * must be 16B aligned;
26256 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
26257 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
26258 + * values
26259 + */
26260 +struct dpseci_congestion_notification_cfg {
26261 + enum dpseci_congestion_unit units;
26262 + u32 threshold_entry;
26263 + u32 threshold_exit;
26264 + u64 message_ctx;
26265 + u64 message_iova;
26266 + struct dpseci_dest_cfg dest_cfg;
26267 + u16 notification_mode;
26268 +};
26269 +
26270 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
26271 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
26272 +
26273 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
26274 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
26275 +
26276 +#endif /* _DPSECI_H_ */
26277 --- /dev/null
26278 +++ b/drivers/crypto/caam/dpseci_cmd.h
26279 @@ -0,0 +1,261 @@
26280 +/*
26281 + * Copyright 2013-2016 Freescale Semiconductor Inc.
26282 + * Copyright 2017 NXP
26283 + *
26284 + * Redistribution and use in source and binary forms, with or without
26285 + * modification, are permitted provided that the following conditions are met:
26286 + * * Redistributions of source code must retain the above copyright
26287 + * notice, this list of conditions and the following disclaimer.
26288 + * * Redistributions in binary form must reproduce the above copyright
26289 + * notice, this list of conditions and the following disclaimer in the
26290 + * documentation and/or other materials provided with the distribution.
26291 + * * Neither the names of the above-listed copyright holders nor the
26292 + * names of any contributors may be used to endorse or promote products
26293 + * derived from this software without specific prior written permission.
26294 + *
26295 + *
26296 + * ALTERNATIVELY, this software may be distributed under the terms of the
26297 + * GNU General Public License ("GPL") as published by the Free Software
26298 + * Foundation, either version 2 of that License or (at your option) any
26299 + * later version.
26300 + *
26301 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26302 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26303 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26304 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
26305 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26306 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26307 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26308 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26309 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26310 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26311 + * POSSIBILITY OF SUCH DAMAGE.
26312 + */
26313 +
26314 +#ifndef _DPSECI_CMD_H_
26315 +#define _DPSECI_CMD_H_
26316 +
26317 +/* DPSECI Version */
26318 +#define DPSECI_VER_MAJOR 5
26319 +#define DPSECI_VER_MINOR 1
26320 +
26321 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
26322 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
26323 +
26324 +/* Command IDs */
26325 +
26326 +#define DPSECI_CMDID_CLOSE 0x8001
26327 +#define DPSECI_CMDID_OPEN 0x8091
26328 +#define DPSECI_CMDID_CREATE 0x9092
26329 +#define DPSECI_CMDID_DESTROY 0x9891
26330 +#define DPSECI_CMDID_GET_API_VERSION 0xa091
26331 +
26332 +#define DPSECI_CMDID_ENABLE 0x0021
26333 +#define DPSECI_CMDID_DISABLE 0x0031
26334 +#define DPSECI_CMDID_GET_ATTR 0x0041
26335 +#define DPSECI_CMDID_RESET 0x0051
26336 +#define DPSECI_CMDID_IS_ENABLED 0x0061
26337 +
26338 +#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121
26339 +#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131
26340 +#define DPSECI_CMDID_SET_IRQ_MASK 0x0141
26341 +#define DPSECI_CMDID_GET_IRQ_MASK 0x0151
26342 +#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161
26343 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171
26344 +
26345 +#define DPSECI_CMDID_SET_RX_QUEUE 0x1941
26346 +#define DPSECI_CMDID_GET_RX_QUEUE 0x1961
26347 +#define DPSECI_CMDID_GET_TX_QUEUE 0x1971
26348 +#define DPSECI_CMDID_GET_SEC_ATTR 0x1981
26349 +#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991
26350 +#define DPSECI_CMDID_SET_OPR 0x19A1
26351 +#define DPSECI_CMDID_GET_OPR 0x19B1
26352 +
26353 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701
26354 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711
26355 +
26356 +/* Macros for accessing command fields smaller than 1 byte */
26357 +#define DPSECI_MASK(field) \
26358 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
26359 + DPSECI_##field##_SHIFT)
26360 +
26361 +#define dpseci_set_field(var, field, val) \
26362 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
26363 +
26364 +#define dpseci_get_field(var, field) \
26365 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
26366 +
26367 +struct dpseci_cmd_open {
26368 + __le32 dpseci_id;
26369 +};
26370 +
26371 +struct dpseci_cmd_create {
26372 + u8 priorities[8];
26373 + u8 num_tx_queues;
26374 + u8 num_rx_queues;
26375 + __le16 pad;
26376 + __le32 options;
26377 +};
26378 +
26379 +struct dpseci_cmd_destroy {
26380 + __le32 object_id;
26381 +};
26382 +
26383 +struct dpseci_rsp_is_enabled {
26384 + __le32 is_enabled;
26385 +};
26386 +
26387 +struct dpseci_cmd_irq_enable {
26388 + u8 enable_state;
26389 + u8 pad[3];
26390 + u8 irq_index;
26391 +};
26392 +
26393 +struct dpseci_rsp_get_irq_enable {
26394 + u8 enable_state;
26395 +};
26396 +
26397 +struct dpseci_cmd_irq_mask {
26398 + __le32 mask;
26399 + u8 irq_index;
26400 +};
26401 +
26402 +struct dpseci_cmd_irq_status {
26403 + __le32 status;
26404 + u8 irq_index;
26405 +};
26406 +
26407 +struct dpseci_rsp_get_attributes {
26408 + __le32 id;
26409 + __le32 pad0;
26410 + u8 num_tx_queues;
26411 + u8 num_rx_queues;
26412 + u8 pad1[6];
26413 + __le32 options;
26414 +};
26415 +
26416 +struct dpseci_cmd_queue {
26417 + __le32 dest_id;
26418 + u8 priority;
26419 + u8 queue;
26420 + u8 dest_type;
26421 + u8 pad;
26422 + __le64 user_ctx;
26423 + union {
26424 + __le32 options;
26425 + __le32 fqid;
26426 + };
26427 + __le32 order_preservation_en;
26428 +};
26429 +
26430 +struct dpseci_rsp_get_tx_queue {
26431 + __le32 pad;
26432 + __le32 fqid;
26433 + u8 priority;
26434 +};
26435 +
26436 +struct dpseci_rsp_get_sec_attr {
26437 + __le16 ip_id;
26438 + u8 major_rev;
26439 + u8 minor_rev;
26440 + u8 era;
26441 + u8 pad0[3];
26442 + u8 deco_num;
26443 + u8 zuc_auth_acc_num;
26444 + u8 zuc_enc_acc_num;
26445 + u8 pad1;
26446 + u8 snow_f8_acc_num;
26447 + u8 snow_f9_acc_num;
26448 + u8 crc_acc_num;
26449 + u8 pad2;
26450 + u8 pk_acc_num;
26451 + u8 kasumi_acc_num;
26452 + u8 rng_acc_num;
26453 + u8 pad3;
26454 + u8 md_acc_num;
26455 + u8 arc4_acc_num;
26456 + u8 des_acc_num;
26457 + u8 aes_acc_num;
26458 +};
26459 +
26460 +struct dpseci_rsp_get_sec_counters {
26461 + __le64 dequeued_requests;
26462 + __le64 ob_enc_requests;
26463 + __le64 ib_dec_requests;
26464 + __le64 ob_enc_bytes;
26465 + __le64 ob_prot_bytes;
26466 + __le64 ib_dec_bytes;
26467 + __le64 ib_valid_bytes;
26468 +};
26469 +
26470 +struct dpseci_rsp_get_api_version {
26471 + __le16 major;
26472 + __le16 minor;
26473 +};
26474 +
26475 +struct dpseci_cmd_opr {
26476 + __le16 pad;
26477 + u8 index;
26478 + u8 options;
26479 + u8 pad1[7];
26480 + u8 oloe;
26481 + u8 oeane;
26482 + u8 olws;
26483 + u8 oa;
26484 + u8 oprrws;
26485 +};
26486 +
26487 +#define DPSECI_OPR_RIP_SHIFT 0
26488 +#define DPSECI_OPR_RIP_SIZE 1
26489 +#define DPSECI_OPR_ENABLE_SHIFT 1
26490 +#define DPSECI_OPR_ENABLE_SIZE 1
26491 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1
26492 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
26493 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1
26494 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
26495 +
26496 +struct dpseci_rsp_get_opr {
26497 + __le64 pad;
26498 + u8 rip_enable;
26499 + u8 pad0[2];
26500 + u8 oloe;
26501 + u8 oeane;
26502 + u8 olws;
26503 + u8 oa;
26504 + u8 oprrws;
26505 + __le16 nesn;
26506 + __le16 pad1;
26507 + __le16 ndsn;
26508 + __le16 pad2;
26509 + __le16 ea_tseq;
26510 + u8 tseq_nlis;
26511 + u8 pad3;
26512 + __le16 ea_hseq;
26513 + u8 hseq_nlis;
26514 + u8 pad4;
26515 + __le16 ea_hptr;
26516 + __le16 pad5;
26517 + __le16 ea_tptr;
26518 + __le16 pad6;
26519 + __le16 opr_vid;
26520 + __le16 pad7;
26521 + __le16 opr_id;
26522 +};
26523 +
26524 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
26525 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
26526 +#define DPSECI_CGN_UNITS_SHIFT 4
26527 +#define DPSECI_CGN_UNITS_SIZE 2
26528 +
26529 +struct dpseci_cmd_congestion_notification {
26530 + __le32 dest_id;
26531 + __le16 notification_mode;
26532 + u8 priority;
26533 + u8 options;
26534 + __le64 message_iova;
26535 + __le64 message_ctx;
26536 + __le32 threshold_entry;
26537 + __le32 threshold_exit;
26538 +};
26539 +
26540 +#endif /* _DPSECI_CMD_H_ */
26541 --- a/drivers/crypto/caam/error.c
26542 +++ b/drivers/crypto/caam/error.c
26543 @@ -6,11 +6,54 @@
26544
26545 #include "compat.h"
26546 #include "regs.h"
26547 -#include "intern.h"
26548 #include "desc.h"
26549 -#include "jr.h"
26550 #include "error.h"
26551
26552 +#ifdef DEBUG
26553 +
26554 +#include <linux/highmem.h>
26555 +
26556 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26557 + int rowsize, int groupsize, struct scatterlist *sg,
26558 + size_t tlen, bool ascii)
26559 +{
26560 + struct scatterlist *it;
26561 + void *it_page;
26562 + size_t len;
26563 + void *buf;
26564 +
26565 + for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
26566 + /*
26567 + * make sure the scatterlist's page
26568 + * has a valid virtual memory mapping
26569 + */
26570 + it_page = kmap_atomic(sg_page(it));
26571 + if (unlikely(!it_page)) {
26572 + pr_err("caam_dump_sg: kmap failed\n");
26573 + return;
26574 + }
26575 +
26576 + buf = it_page + it->offset;
26577 + len = min_t(size_t, tlen, it->length);
26578 + print_hex_dump(level, prefix_str, prefix_type, rowsize,
26579 + groupsize, buf, len, ascii);
26580 + tlen -= len;
26581 +
26582 + kunmap_atomic(it_page);
26583 + }
26584 +}
26585 +
26586 +#else
26587 +
26588 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26589 + int rowsize, int groupsize, struct scatterlist *sg,
26590 + size_t tlen, bool ascii)
26591 +{}
26592 +
26593 +#endif
26594 +
26595 +EXPORT_SYMBOL(caam_dump_sg);
26596 +
26597 static const struct {
26598 u8 value;
26599 const char *error_text;
26600 @@ -69,6 +112,54 @@ static const struct {
26601 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
26602 };
26603
26604 +static const struct {
26605 + u8 value;
26606 + const char *error_text;
26607 +} qi_error_list[] = {
26608 + { 0x1F, "Job terminated by FQ or ICID flush" },
26609 + { 0x20, "FD format error"},
26610 + { 0x21, "FD command format error"},
26611 + { 0x23, "FL format error"},
26612 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
26613 + { 0x30, "Max. buffer size too small"},
26614 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
26615 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
26616 + { 0x33, "Size over/underflow (allocate mode)"},
26617 + { 0x34, "Size over/underflow (reuse mode)"},
26618 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
26619 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
26620 + { 0x41, "SBC frame format not supported (allocate mode)"},
26621 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
26622 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
26623 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
26624 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
26625 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
26626 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
26627 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
26628 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
26629 + { 0x51, "Unsupported IF reuse mode"},
26630 + { 0x52, "Unsupported FL use mode"},
26631 + { 0x53, "Unsupported RJD use mode"},
26632 + { 0x54, "Unsupported inline descriptor use mode"},
26633 + { 0xC0, "Table buffer pool 0 depletion"},
26634 + { 0xC1, "Table buffer pool 1 depletion"},
26635 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
26636 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
26637 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
26638 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
26639 + { 0xD0, "FLC read error"},
26640 + { 0xD1, "FL read error"},
26641 + { 0xD2, "FL write error"},
26642 + { 0xD3, "OF SGT write error"},
26643 + { 0xD4, "PTA read error"},
26644 + { 0xD5, "PTA write error"},
26645 + { 0xD6, "OF SGT F-bit write error"},
26646 + { 0xD7, "ASA write error"},
26647 + { 0xE1, "FLC[ICR]=0 ICID error"},
26648 + { 0xE2, "FLC[ICR]=1 ICID error"},
26649 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
26650 +};
26651 +
26652 static const char * const cha_id_list[] = {
26653 "",
26654 "AES",
26655 @@ -146,10 +237,9 @@ static void report_ccb_status(struct dev
26656 strlen(rng_err_id_list[err_id])) {
26657 /* RNG-only error */
26658 err_str = rng_err_id_list[err_id];
26659 - } else if (err_id < ARRAY_SIZE(err_id_list))
26660 + } else {
26661 err_str = err_id_list[err_id];
26662 - else
26663 - snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
26664 + }
26665
26666 /*
26667 * CCB ICV check failures are part of normal operation life;
26668 @@ -198,6 +288,27 @@ static void report_deco_status(struct de
26669 status, error, idx_str, idx, err_str, err_err_code);
26670 }
26671
26672 +static void report_qi_status(struct device *qidev, const u32 status,
26673 + const char *error)
26674 +{
26675 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
26676 + const char *err_str = "unidentified error value 0x";
26677 + char err_err_code[3] = { 0 };
26678 + int i;
26679 +
26680 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
26681 + if (qi_error_list[i].value == err_id)
26682 + break;
26683 +
26684 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
26685 + err_str = qi_error_list[i].error_text;
26686 + else
26687 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
26688 +
26689 + dev_err(qidev, "%08x: %s: %s%s\n",
26690 + status, error, err_str, err_err_code);
26691 +}
26692 +
26693 static void report_jr_status(struct device *jrdev, const u32 status,
26694 const char *error)
26695 {
26696 @@ -212,7 +323,7 @@ static void report_cond_code_status(stru
26697 status, error, __func__);
26698 }
26699
26700 -void caam_jr_strstatus(struct device *jrdev, u32 status)
26701 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
26702 {
26703 static const struct stat_src {
26704 void (*report_ssed)(struct device *jrdev, const u32 status,
26705 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jr
26706 { report_ccb_status, "CCB" },
26707 { report_jump_status, "Jump" },
26708 { report_deco_status, "DECO" },
26709 - { NULL, "Queue Manager Interface" },
26710 + { report_qi_status, "Queue Manager Interface" },
26711 { report_jr_status, "Job Ring" },
26712 { report_cond_code_status, "Condition Code" },
26713 { NULL, NULL },
26714 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jr
26715 else
26716 dev_err(jrdev, "%d: unknown error source\n", ssrc);
26717 }
26718 -EXPORT_SYMBOL(caam_jr_strstatus);
26719 +EXPORT_SYMBOL(caam_strstatus);
26720 --- a/drivers/crypto/caam/error.h
26721 +++ b/drivers/crypto/caam/error.h
26722 @@ -7,5 +7,13 @@
26723 #ifndef CAAM_ERROR_H
26724 #define CAAM_ERROR_H
26725 #define CAAM_ERROR_STR_MAX 302
26726 -void caam_jr_strstatus(struct device *jrdev, u32 status);
26727 +
26728 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
26729 +
26730 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
26731 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
26732 +
26733 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26734 + int rowsize, int groupsize, struct scatterlist *sg,
26735 + size_t tlen, bool ascii);
26736 #endif /* CAAM_ERROR_H */
26737 --- a/drivers/crypto/caam/intern.h
26738 +++ b/drivers/crypto/caam/intern.h
26739 @@ -64,10 +64,9 @@ struct caam_drv_private_jr {
26740 * Driver-private storage for a single CAAM block instance
26741 */
26742 struct caam_drv_private {
26743 -
26744 - struct device *dev;
26745 - struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
26746 - struct platform_device *pdev;
26747 +#ifdef CONFIG_CAAM_QI
26748 + struct device *qidev;
26749 +#endif
26750
26751 /* Physical-presence section */
26752 struct caam_ctrl __iomem *ctrl; /* controller region */
26753 @@ -84,6 +83,7 @@ struct caam_drv_private {
26754 u8 qi_present; /* Nonzero if QI present in device */
26755 int secvio_irq; /* Security violation interrupt number */
26756 int virt_en; /* Virtualization enabled in CAAM */
26757 + int era; /* CAAM Era (internal HW revision) */
26758
26759 #define RNG4_MAX_HANDLES 2
26760 /* RNG4 block */
26761 @@ -103,11 +103,6 @@ struct caam_drv_private {
26762 #ifdef CONFIG_DEBUG_FS
26763 struct dentry *dfs_root;
26764 struct dentry *ctl; /* controller dir */
26765 - struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
26766 - struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
26767 - struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
26768 - struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
26769 -
26770 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
26771 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
26772 #endif
26773 @@ -115,4 +110,22 @@ struct caam_drv_private {
26774
26775 void caam_jr_algapi_init(struct device *dev);
26776 void caam_jr_algapi_remove(struct device *dev);
26777 +
26778 +#ifdef CONFIG_DEBUG_FS
26779 +static int caam_debugfs_u64_get(void *data, u64 *val)
26780 +{
26781 + *val = caam64_to_cpu(*(u64 *)data);
26782 + return 0;
26783 +}
26784 +
26785 +static int caam_debugfs_u32_get(void *data, u64 *val)
26786 +{
26787 + *val = caam32_to_cpu(*(u32 *)data);
26788 + return 0;
26789 +}
26790 +
26791 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
26792 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
26793 +#endif
26794 +
26795 #endif /* INTERN_H */
26796 --- a/drivers/crypto/caam/jr.c
26797 +++ b/drivers/crypto/caam/jr.c
26798 @@ -9,6 +9,7 @@
26799 #include <linux/of_address.h>
26800
26801 #include "compat.h"
26802 +#include "ctrl.h"
26803 #include "regs.h"
26804 #include "jr.h"
26805 #include "desc.h"
26806 @@ -22,6 +23,14 @@ struct jr_driver_data {
26807
26808 static struct jr_driver_data driver_data;
26809
26810 +static int jr_driver_probed;
26811 +
26812 +int caam_jr_driver_probed(void)
26813 +{
26814 + return jr_driver_probed;
26815 +}
26816 +EXPORT_SYMBOL(caam_jr_driver_probed);
26817 +
26818 static int caam_reset_hw_jr(struct device *dev)
26819 {
26820 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
26821 @@ -118,6 +127,8 @@ static int caam_jr_remove(struct platfor
26822 dev_err(jrdev, "Failed to shut down job ring\n");
26823 irq_dispose_mapping(jrpriv->irq);
26824
26825 + jr_driver_probed--;
26826 +
26827 return ret;
26828 }
26829
26830 @@ -281,6 +292,36 @@ struct device *caam_jr_alloc(void)
26831 EXPORT_SYMBOL(caam_jr_alloc);
26832
26833 /**
26834 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
26835 + *
26836 + * returns : pointer to the newly allocated physical
26837 + * JobR dev can be written to if successful.
26838 + **/
26839 +struct device *caam_jridx_alloc(int idx)
26840 +{
26841 + struct caam_drv_private_jr *jrpriv;
26842 + struct device *dev = ERR_PTR(-ENODEV);
26843 +
26844 + spin_lock(&driver_data.jr_alloc_lock);
26845 +
26846 + if (list_empty(&driver_data.jr_list))
26847 + goto end;
26848 +
26849 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
26850 + if (jrpriv->ridx == idx) {
26851 + atomic_inc(&jrpriv->tfm_count);
26852 + dev = jrpriv->dev;
26853 + break;
26854 + }
26855 + }
26856 +
26857 +end:
26858 + spin_unlock(&driver_data.jr_alloc_lock);
26859 + return dev;
26860 +}
26861 +EXPORT_SYMBOL(caam_jridx_alloc);
26862 +
26863 +/**
26864 * caam_jr_free() - Free the Job Ring
26865 * @rdev - points to the dev that identifies the Job ring to
26866 * be released.
26867 @@ -497,15 +538,28 @@ static int caam_jr_probe(struct platform
26868 return -ENOMEM;
26869 }
26870
26871 - jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
26872 + jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
26873
26874 - if (sizeof(dma_addr_t) == sizeof(u64))
26875 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
26876 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
26877 + if (sizeof(dma_addr_t) == sizeof(u64)) {
26878 + if (caam_dpaa2)
26879 + error = dma_set_mask_and_coherent(jrdev,
26880 + DMA_BIT_MASK(49));
26881 + else if (of_device_is_compatible(nprop,
26882 + "fsl,sec-v5.0-job-ring"))
26883 + error = dma_set_mask_and_coherent(jrdev,
26884 + DMA_BIT_MASK(40));
26885 else
26886 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
26887 - else
26888 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
26889 + error = dma_set_mask_and_coherent(jrdev,
26890 + DMA_BIT_MASK(36));
26891 + } else {
26892 + error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
26893 + }
26894 + if (error) {
26895 + dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
26896 + error);
26897 + iounmap(ctrl);
26898 + return error;
26899 + }
26900
26901 /* Identify the interrupt */
26902 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
26903 @@ -525,10 +579,12 @@ static int caam_jr_probe(struct platform
26904
26905 atomic_set(&jrpriv->tfm_count, 0);
26906
26907 + jr_driver_probed++;
26908 +
26909 return 0;
26910 }
26911
26912 -static struct of_device_id caam_jr_match[] = {
26913 +static const struct of_device_id caam_jr_match[] = {
26914 {
26915 .compatible = "fsl,sec-v4.0-job-ring",
26916 },
26917 --- a/drivers/crypto/caam/jr.h
26918 +++ b/drivers/crypto/caam/jr.h
26919 @@ -8,7 +8,9 @@
26920 #define JR_H
26921
26922 /* Prototypes for backend-level services exposed to APIs */
26923 +int caam_jr_driver_probed(void);
26924 struct device *caam_jr_alloc(void);
26925 +struct device *caam_jridx_alloc(int idx);
26926 void caam_jr_free(struct device *rdev);
26927 int caam_jr_enqueue(struct device *dev, u32 *desc,
26928 void (*cbk)(struct device *dev, u32 *desc, u32 status,
26929 --- a/drivers/crypto/caam/key_gen.c
26930 +++ b/drivers/crypto/caam/key_gen.c
26931 @@ -41,15 +41,29 @@ Split key generation--------------------
26932 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
26933 @0xffe04000
26934 */
26935 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
26936 - int split_key_pad_len, const u8 *key_in, u32 keylen,
26937 - u32 alg_op)
26938 +int gen_split_key(struct device *jrdev, u8 *key_out,
26939 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
26940 + int max_keylen)
26941 {
26942 u32 *desc;
26943 struct split_key_result result;
26944 dma_addr_t dma_addr_in, dma_addr_out;
26945 int ret = -ENOMEM;
26946
26947 + adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
26948 + adata->keylen_pad = split_key_pad_len(adata->algtype &
26949 + OP_ALG_ALGSEL_MASK);
26950 +
26951 +#ifdef DEBUG
26952 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
26953 + adata->keylen, adata->keylen_pad);
26954 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
26955 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
26956 +#endif
26957 +
26958 + if (adata->keylen_pad > max_keylen)
26959 + return -EINVAL;
26960 +
26961 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
26962 if (!desc) {
26963 dev_err(jrdev, "unable to allocate key input memory\n");
26964 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev,
26965 goto out_free;
26966 }
26967
26968 - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
26969 + dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
26970 DMA_FROM_DEVICE);
26971 if (dma_mapping_error(jrdev, dma_addr_out)) {
26972 dev_err(jrdev, "unable to map key output memory\n");
26973 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev,
26974 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
26975
26976 /* Sets MDHA up into an HMAC-INIT */
26977 - append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
26978 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
26979 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
26980 + OP_ALG_AS_INIT);
26981
26982 /*
26983 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
26984 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev,
26985 * FIFO_STORE with the explicit split-key content store
26986 * (0x26 output type)
26987 */
26988 - append_fifo_store(desc, dma_addr_out, split_key_len,
26989 + append_fifo_store(desc, dma_addr_out, adata->keylen,
26990 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
26991
26992 #ifdef DEBUG
26993 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev,
26994 #ifdef DEBUG
26995 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
26996 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
26997 - split_key_pad_len, 1);
26998 + adata->keylen_pad, 1);
26999 #endif
27000 }
27001
27002 - dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
27003 + dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
27004 DMA_FROM_DEVICE);
27005 out_unmap_in:
27006 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
27007 --- a/drivers/crypto/caam/key_gen.h
27008 +++ b/drivers/crypto/caam/key_gen.h
27009 @@ -5,6 +5,36 @@
27010 *
27011 */
27012
27013 +/**
27014 + * split_key_len - Compute MDHA split key length for a given algorithm
27015 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
27016 + * SHA224, SHA384, SHA512.
27017 + *
27018 + * Return: MDHA split key length
27019 + */
27020 +static inline u32 split_key_len(u32 hash)
27021 +{
27022 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
27023 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
27024 + u32 idx;
27025 +
27026 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
27027 +
27028 + return (u32)(mdpadlen[idx] * 2);
27029 +}
27030 +
27031 +/**
27032 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
27033 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
27034 + * SHA224, SHA384, SHA512.
27035 + *
27036 + * Return: MDHA split key pad length
27037 + */
27038 +static inline u32 split_key_pad_len(u32 hash)
27039 +{
27040 + return ALIGN(split_key_len(hash), 16);
27041 +}
27042 +
27043 struct split_key_result {
27044 struct completion completion;
27045 int err;
27046 @@ -12,6 +42,6 @@ struct split_key_result {
27047
27048 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
27049
27050 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
27051 - int split_key_pad_len, const u8 *key_in, u32 keylen,
27052 - u32 alg_op);
27053 +int gen_split_key(struct device *jrdev, u8 *key_out,
27054 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
27055 + int max_keylen);
27056 --- a/drivers/crypto/caam/pdb.h
27057 +++ b/drivers/crypto/caam/pdb.h
27058 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
27059 #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
27060 #define RSA_PDB_D_SHIFT 12
27061 #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
27062 +#define RSA_PDB_Q_SHIFT 12
27063 +#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
27064
27065 #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
27066 #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
27067 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
27068 #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
27069
27070 #define RSA_PRIV_KEY_FRM_1 0
27071 +#define RSA_PRIV_KEY_FRM_2 1
27072 +#define RSA_PRIV_KEY_FRM_3 2
27073
27074 /**
27075 * RSA Encrypt Protocol Data Block
27076 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
27077 dma_addr_t d_dma;
27078 } __packed;
27079
27080 +/**
27081 + * RSA Decrypt PDB - Private Key Form #2
27082 + * @sgf : scatter-gather field
27083 + * @g_dma : dma address of encrypted input data
27084 + * @f_dma : dma address of output data
27085 + * @d_dma : dma address of RSA private exponent
27086 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
27087 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
27088 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
27089 + * as internal state buffer. It is assumed to be as long as p.
27090 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
27091 + * as internal state buffer. It is assumed to be as long as q.
27092 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
27093 + */
27094 +struct rsa_priv_f2_pdb {
27095 + u32 sgf;
27096 + dma_addr_t g_dma;
27097 + dma_addr_t f_dma;
27098 + dma_addr_t d_dma;
27099 + dma_addr_t p_dma;
27100 + dma_addr_t q_dma;
27101 + dma_addr_t tmp1_dma;
27102 + dma_addr_t tmp2_dma;
27103 + u32 p_q_len;
27104 +} __packed;
27105 +
27106 +/**
27107 + * RSA Decrypt PDB - Private Key Form #3
27108 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
27109 + * the RSA modulus.
27110 + * @sgf : scatter-gather field
27111 + * @g_dma : dma address of encrypted input data
27112 + * @f_dma : dma address of output data
27113 + * @c_dma : dma address of RSA CRT coefficient
27114 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
27115 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
27116 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
27117 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
27118 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
27119 + * as internal state buffer. It is assumed to be as long as p.
27120 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
27121 + * as internal state buffer. It is assumed to be as long as q.
27122 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
27123 + */
27124 +struct rsa_priv_f3_pdb {
27125 + u32 sgf;
27126 + dma_addr_t g_dma;
27127 + dma_addr_t f_dma;
27128 + dma_addr_t c_dma;
27129 + dma_addr_t p_dma;
27130 + dma_addr_t q_dma;
27131 + dma_addr_t dp_dma;
27132 + dma_addr_t dq_dma;
27133 + dma_addr_t tmp1_dma;
27134 + dma_addr_t tmp2_dma;
27135 + u32 p_q_len;
27136 +} __packed;
27137 +
27138 #endif
27139 --- a/drivers/crypto/caam/pkc_desc.c
27140 +++ b/drivers/crypto/caam/pkc_desc.c
27141 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, st
27142 append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
27143 RSA_PRIV_KEY_FRM_1);
27144 }
27145 +
27146 +/* Descriptor for RSA Private operation - Private Key Form #2 */
27147 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
27148 +{
27149 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
27150 + append_cmd(desc, pdb->sgf);
27151 + append_ptr(desc, pdb->g_dma);
27152 + append_ptr(desc, pdb->f_dma);
27153 + append_ptr(desc, pdb->d_dma);
27154 + append_ptr(desc, pdb->p_dma);
27155 + append_ptr(desc, pdb->q_dma);
27156 + append_ptr(desc, pdb->tmp1_dma);
27157 + append_ptr(desc, pdb->tmp2_dma);
27158 + append_cmd(desc, pdb->p_q_len);
27159 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
27160 + RSA_PRIV_KEY_FRM_2);
27161 +}
27162 +
27163 +/* Descriptor for RSA Private operation - Private Key Form #3 */
27164 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
27165 +{
27166 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
27167 + append_cmd(desc, pdb->sgf);
27168 + append_ptr(desc, pdb->g_dma);
27169 + append_ptr(desc, pdb->f_dma);
27170 + append_ptr(desc, pdb->c_dma);
27171 + append_ptr(desc, pdb->p_dma);
27172 + append_ptr(desc, pdb->q_dma);
27173 + append_ptr(desc, pdb->dp_dma);
27174 + append_ptr(desc, pdb->dq_dma);
27175 + append_ptr(desc, pdb->tmp1_dma);
27176 + append_ptr(desc, pdb->tmp2_dma);
27177 + append_cmd(desc, pdb->p_q_len);
27178 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
27179 + RSA_PRIV_KEY_FRM_3);
27180 +}
27181 --- /dev/null
27182 +++ b/drivers/crypto/caam/qi.c
27183 @@ -0,0 +1,804 @@
27184 +/*
27185 + * CAAM/SEC 4.x QI transport/backend driver
27186 + * Queue Interface backend functionality
27187 + *
27188 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
27189 + * Copyright 2016-2017 NXP
27190 + */
27191 +
27192 +#include <linux/cpumask.h>
27193 +#include <linux/kthread.h>
27194 +#include <linux/fsl_qman.h>
27195 +
27196 +#include "regs.h"
27197 +#include "qi.h"
27198 +#include "desc.h"
27199 +#include "intern.h"
27200 +#include "desc_constr.h"
27201 +
27202 +#define PREHDR_RSLS_SHIFT 31
27203 +
27204 +/*
27205 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
27206 + * so that resources used by the in-flight buffers do not become a memory hog.
27207 + */
27208 +#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
27209 +
27210 +#define CAAM_QI_ENQUEUE_RETRIES 10000
27211 +
27212 +#define CAAM_NAPI_WEIGHT 63
27213 +
27214 +/*
27215 + * caam_napi - struct holding CAAM NAPI-related params
27216 + * @irqtask: IRQ task for QI backend
27217 + * @p: QMan portal
27218 + */
27219 +struct caam_napi {
27220 + struct napi_struct irqtask;
27221 + struct qman_portal *p;
27222 +};
27223 +
27224 +/*
27225 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
27226 + * responses expected on each cpu.
27227 + * @caam_napi: CAAM NAPI params
27228 + * @net_dev: netdev used by NAPI
27229 + * @rsp_fq: response FQ from CAAM
27230 + */
27231 +struct caam_qi_pcpu_priv {
27232 + struct caam_napi caam_napi;
27233 + struct net_device net_dev;
27234 + struct qman_fq *rsp_fq;
27235 +} ____cacheline_aligned;
27236 +
27237 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
27238 +static DEFINE_PER_CPU(int, last_cpu);
27239 +
27240 +/*
27241 + * caam_qi_priv - CAAM QI backend private params
27242 + * @cgr: QMan congestion group
27243 + * @qi_pdev: platform device for QI backend
27244 + */
27245 +struct caam_qi_priv {
27246 + struct qman_cgr cgr;
27247 + struct platform_device *qi_pdev;
27248 +};
27249 +
27250 +static struct caam_qi_priv qipriv ____cacheline_aligned;
27251 +
27252 +/*
27253 + * This is written by only one core - the one that initialized the CGR - and
27254 + * read by multiple cores (all the others).
27255 + */
27256 +bool caam_congested __read_mostly;
27257 +EXPORT_SYMBOL(caam_congested);
27258 +
27259 +#ifdef CONFIG_DEBUG_FS
27260 +/*
27261 + * This is a counter for the number of times the congestion group (where all
27262 + * the request and response queueus are) reached congestion. Incremented
27263 + * each time the congestion callback is called with congested == true.
27264 + */
27265 +static u64 times_congested;
27266 +#endif
27267 +
27268 +/*
27269 + * CPU from where the module initialised. This is required because QMan driver
27270 + * requires CGRs to be removed from same CPU from where they were originally
27271 + * allocated.
27272 + */
27273 +static int mod_init_cpu;
27274 +
27275 +/*
27276 + * This is a a cache of buffers, from which the users of CAAM QI driver
27277 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
27278 + * doing malloc on the hotpath.
27279 + * NOTE: A more elegant solution would be to have some headroom in the frames
27280 + * being processed. This could be added by the dpaa-ethernet driver.
27281 + * This would pose a problem for userspace application processing which
27282 + * cannot know of this limitation. So for now, this will work.
27283 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
27284 + */
27285 +static struct kmem_cache *qi_cache;
27286 +
27287 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
27288 +{
27289 + struct qm_fd fd;
27290 + int ret;
27291 + int num_retries = 0;
27292 +
27293 + fd.cmd = 0;
27294 + fd.format = qm_fd_compound;
27295 + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
27296 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
27297 + DMA_BIDIRECTIONAL);
27298 + if (dma_mapping_error(qidev, fd.addr)) {
27299 + dev_err(qidev, "DMA mapping error for QI enqueue request\n");
27300 + return -EIO;
27301 + }
27302 +
27303 + do {
27304 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
27305 + if (likely(!ret))
27306 + return 0;
27307 +
27308 + if (ret != -EBUSY)
27309 + break;
27310 + num_retries++;
27311 + } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
27312 +
27313 + dev_err(qidev, "qman_enqueue failed: %d\n", ret);
27314 +
27315 + return ret;
27316 +}
27317 +EXPORT_SYMBOL(caam_qi_enqueue);
27318 +
27319 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
27320 + const struct qm_mr_entry *msg)
27321 +{
27322 + const struct qm_fd *fd;
27323 + struct caam_drv_req *drv_req;
27324 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
27325 +
27326 + fd = &msg->ern.fd;
27327 +
27328 + if (fd->format != qm_fd_compound) {
27329 + dev_err(qidev, "Non-compound FD from CAAM\n");
27330 + return;
27331 + }
27332 +
27333 + drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
27334 + if (!drv_req) {
27335 + dev_err(qidev,
27336 + "Can't find original request for CAAM response\n");
27337 + return;
27338 + }
27339 +
27340 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
27341 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
27342 +
27343 + drv_req->cbk(drv_req, -EIO);
27344 +}
27345 +
27346 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
27347 + struct qman_fq *rsp_fq,
27348 + dma_addr_t hwdesc,
27349 + int fq_sched_flag)
27350 +{
27351 + int ret;
27352 + struct qman_fq *req_fq;
27353 + struct qm_mcc_initfq opts;
27354 +
27355 + req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
27356 + if (!req_fq)
27357 + return ERR_PTR(-ENOMEM);
27358 +
27359 + req_fq->cb.ern = caam_fq_ern_cb;
27360 + req_fq->cb.fqs = NULL;
27361 +
27362 + ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
27363 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
27364 + req_fq);
27365 + if (ret) {
27366 + dev_err(qidev, "Failed to create session req FQ\n");
27367 + goto create_req_fq_fail;
27368 + }
27369 +
27370 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
27371 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
27372 + QM_INITFQ_WE_CGID;
27373 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
27374 + opts.fqd.dest.channel = qm_channel_caam;
27375 + opts.fqd.dest.wq = 2;
27376 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
27377 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
27378 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
27379 + opts.fqd.cgid = qipriv.cgr.cgrid;
27380 +
27381 + ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
27382 + if (ret) {
27383 + dev_err(qidev, "Failed to init session req FQ\n");
27384 + goto init_req_fq_fail;
27385 + }
27386 +
27387 + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
27388 + smp_processor_id());
27389 + return req_fq;
27390 +
27391 +init_req_fq_fail:
27392 + qman_destroy_fq(req_fq, 0);
27393 +create_req_fq_fail:
27394 + kfree(req_fq);
27395 + return ERR_PTR(ret);
27396 +}
27397 +
27398 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
27399 +{
27400 + int ret;
27401 +
27402 + ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
27403 + QMAN_VOLATILE_FLAG_FINISH,
27404 + QM_VDQCR_PRECEDENCE_VDQCR |
27405 + QM_VDQCR_NUMFRAMES_TILLEMPTY);
27406 + if (ret) {
27407 + dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
27408 + return ret;
27409 + }
27410 +
27411 + do {
27412 + struct qman_portal *p;
27413 +
27414 + p = qman_get_affine_portal(smp_processor_id());
27415 + qman_p_poll_dqrr(p, 16);
27416 + } while (fq->flags & QMAN_FQ_STATE_NE);
27417 +
27418 + return 0;
27419 +}
27420 +
27421 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
27422 +{
27423 + u32 flags;
27424 + int ret;
27425 +
27426 + ret = qman_retire_fq(fq, &flags);
27427 + if (ret < 0) {
27428 + dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
27429 + return ret;
27430 + }
27431 +
27432 + if (!ret)
27433 + goto empty_fq;
27434 +
27435 + /* Async FQ retirement condition */
27436 + if (ret == 1) {
27437 + /* Retry till FQ gets in retired state */
27438 + do {
27439 + msleep(20);
27440 + } while (fq->state != qman_fq_state_retired);
27441 +
27442 + WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
27443 + WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
27444 + }
27445 +
27446 +empty_fq:
27447 + if (fq->flags & QMAN_FQ_STATE_NE) {
27448 + ret = empty_retired_fq(qidev, fq);
27449 + if (ret) {
27450 + dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
27451 + fq->fqid);
27452 + return ret;
27453 + }
27454 + }
27455 +
27456 + ret = qman_oos_fq(fq);
27457 + if (ret)
27458 + dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
27459 +
27460 + qman_destroy_fq(fq, 0);
27461 + kfree(fq);
27462 +
27463 + return ret;
27464 +}
27465 +
27466 +static int empty_caam_fq(struct qman_fq *fq)
27467 +{
27468 + int ret;
27469 + struct qm_mcr_queryfq_np np;
27470 +
27471 + /* Wait till the older CAAM FQ get empty */
27472 + do {
27473 + ret = qman_query_fq_np(fq, &np);
27474 + if (ret)
27475 + return ret;
27476 +
27477 + if (!np.frm_cnt)
27478 + break;
27479 +
27480 + msleep(20);
27481 + } while (1);
27482 +
27483 + /*
27484 + * Give extra time for pending jobs from this FQ in holding tanks
27485 + * to get processed
27486 + */
27487 + msleep(20);
27488 + return 0;
27489 +}
27490 +
27491 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
27492 +{
27493 + int ret;
27494 + u32 num_words;
27495 + struct qman_fq *new_fq, *old_fq;
27496 + struct device *qidev = drv_ctx->qidev;
27497 +
27498 + num_words = desc_len(sh_desc);
27499 + if (num_words > MAX_SDLEN) {
27500 + dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
27501 + return -EINVAL;
27502 + }
27503 +
27504 + /* Note down older req FQ */
27505 + old_fq = drv_ctx->req_fq;
27506 +
27507 + /* Create a new req FQ in parked state */
27508 + new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
27509 + drv_ctx->context_a, 0);
27510 + if (unlikely(IS_ERR_OR_NULL(new_fq))) {
27511 + dev_err(qidev, "FQ allocation for shdesc update failed\n");
27512 + return PTR_ERR(new_fq);
27513 + }
27514 +
27515 + /* Hook up new FQ to context so that new requests keep queuing */
27516 + drv_ctx->req_fq = new_fq;
27517 +
27518 + /* Empty and remove the older FQ */
27519 + ret = empty_caam_fq(old_fq);
27520 + if (ret) {
27521 + dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
27522 +
27523 + /* We can revert to older FQ */
27524 + drv_ctx->req_fq = old_fq;
27525 +
27526 + if (kill_fq(qidev, new_fq))
27527 + dev_warn(qidev, "New CAAM FQ kill failed\n");
27528 +
27529 + return ret;
27530 + }
27531 +
27532 + /*
27533 + * Re-initialise pre-header. Set RSLS and SDLEN.
27534 + * Update the shared descriptor for driver context.
27535 + */
27536 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
27537 + num_words);
27538 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
27539 + dma_sync_single_for_device(qidev, drv_ctx->context_a,
27540 + sizeof(drv_ctx->sh_desc) +
27541 + sizeof(drv_ctx->prehdr),
27542 + DMA_BIDIRECTIONAL);
27543 +
27544 + /* Put the new FQ in scheduled state */
27545 + ret = qman_schedule_fq(new_fq);
27546 + if (ret) {
27547 + dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
27548 +
27549 + /*
27550 + * We can kill new FQ and revert to old FQ.
27551 + * Since the desc is already modified, it is success case
27552 + */
27553 +
27554 + drv_ctx->req_fq = old_fq;
27555 +
27556 + if (kill_fq(qidev, new_fq))
27557 + dev_warn(qidev, "New CAAM FQ kill failed\n");
27558 + } else if (kill_fq(qidev, old_fq)) {
27559 + dev_warn(qidev, "Old CAAM FQ kill failed\n");
27560 + }
27561 +
27562 + return 0;
27563 +}
27564 +EXPORT_SYMBOL(caam_drv_ctx_update);
27565 +
27566 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
27567 + int *cpu,
27568 + u32 *sh_desc)
27569 +{
27570 + size_t size;
27571 + u32 num_words;
27572 + dma_addr_t hwdesc;
27573 + struct caam_drv_ctx *drv_ctx;
27574 + const cpumask_t *cpus = qman_affine_cpus();
27575 +
27576 + num_words = desc_len(sh_desc);
27577 + if (num_words > MAX_SDLEN) {
27578 + dev_err(qidev, "Invalid descriptor len: %d words\n",
27579 + num_words);
27580 + return ERR_PTR(-EINVAL);
27581 + }
27582 +
27583 + drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
27584 + if (!drv_ctx)
27585 + return ERR_PTR(-ENOMEM);
27586 +
27587 + /*
27588 + * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
27589 + * and dma-map them.
27590 + */
27591 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
27592 + num_words);
27593 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
27594 + size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
27595 + hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
27596 + DMA_BIDIRECTIONAL);
27597 + if (dma_mapping_error(qidev, hwdesc)) {
27598 + dev_err(qidev, "DMA map error for preheader + shdesc\n");
27599 + kfree(drv_ctx);
27600 + return ERR_PTR(-ENOMEM);
27601 + }
27602 + drv_ctx->context_a = hwdesc;
27603 +
27604 + /* If given CPU does not own the portal, choose another one that does */
27605 + if (!cpumask_test_cpu(*cpu, cpus)) {
27606 + int *pcpu = &get_cpu_var(last_cpu);
27607 +
27608 + *pcpu = cpumask_next(*pcpu, cpus);
27609 + if (*pcpu >= nr_cpu_ids)
27610 + *pcpu = cpumask_first(cpus);
27611 + *cpu = *pcpu;
27612 +
27613 + put_cpu_var(last_cpu);
27614 + }
27615 + drv_ctx->cpu = *cpu;
27616 +
27617 + /* Find response FQ hooked with this CPU */
27618 + drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
27619 +
27620 + /* Attach request FQ */
27621 + drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
27622 + QMAN_INITFQ_FLAG_SCHED);
27623 + if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
27624 + dev_err(qidev, "create_caam_req_fq failed\n");
27625 + dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
27626 + kfree(drv_ctx);
27627 + return ERR_PTR(-ENOMEM);
27628 + }
27629 +
27630 + drv_ctx->qidev = qidev;
27631 + return drv_ctx;
27632 +}
27633 +EXPORT_SYMBOL(caam_drv_ctx_init);
27634 +
27635 +void *qi_cache_alloc(gfp_t flags)
27636 +{
27637 + return kmem_cache_alloc(qi_cache, flags);
27638 +}
27639 +EXPORT_SYMBOL(qi_cache_alloc);
27640 +
27641 +void qi_cache_free(void *obj)
27642 +{
27643 + kmem_cache_free(qi_cache, obj);
27644 +}
27645 +EXPORT_SYMBOL(qi_cache_free);
27646 +
27647 +static int caam_qi_poll(struct napi_struct *napi, int budget)
27648 +{
27649 + struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
27650 +
27651 + int cleaned = qman_p_poll_dqrr(np->p, budget);
27652 +
27653 + if (cleaned < budget) {
27654 + napi_complete(napi);
27655 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
27656 + }
27657 +
27658 + return cleaned;
27659 +}
27660 +
27661 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
27662 +{
27663 + if (IS_ERR_OR_NULL(drv_ctx))
27664 + return;
27665 +
27666 + /* Remove request FQ */
27667 + if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
27668 + dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
27669 +
27670 + dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
27671 + sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
27672 + DMA_BIDIRECTIONAL);
27673 + kfree(drv_ctx);
27674 +}
27675 +EXPORT_SYMBOL(caam_drv_ctx_rel);
27676 +
27677 +int caam_qi_shutdown(struct device *qidev)
27678 +{
27679 + int i, ret;
27680 + struct caam_qi_priv *priv = dev_get_drvdata(qidev);
27681 + const cpumask_t *cpus = qman_affine_cpus();
27682 + struct cpumask old_cpumask = current->cpus_allowed;
27683 +
27684 + for_each_cpu(i, cpus) {
27685 + struct napi_struct *irqtask;
27686 +
27687 + irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
27688 + napi_disable(irqtask);
27689 + netif_napi_del(irqtask);
27690 +
27691 + if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
27692 + dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
27693 + }
27694 +
27695 + /*
27696 + * QMan driver requires CGRs to be deleted from same CPU from where they
27697 + * were instantiated. Hence we get the module removal execute from the
27698 + * same CPU from where it was originally inserted.
27699 + */
27700 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
27701 +
27702 + ret = qman_delete_cgr(&priv->cgr);
27703 + if (ret)
27704 + dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
27705 + else
27706 + qman_release_cgrid(priv->cgr.cgrid);
27707 +
27708 + kmem_cache_destroy(qi_cache);
27709 +
27710 + /* Now that we're done with the CGRs, restore the cpus allowed mask */
27711 + set_cpus_allowed_ptr(current, &old_cpumask);
27712 +
27713 + platform_device_unregister(priv->qi_pdev);
27714 + return ret;
27715 +}
27716 +
27717 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
27718 +{
27719 + caam_congested = congested;
27720 +
27721 + if (congested) {
27722 +#ifdef CONFIG_DEBUG_FS
27723 + times_congested++;
27724 +#endif
27725 + pr_debug_ratelimited("CAAM entered congestion\n");
27726 +
27727 + } else {
27728 + pr_debug_ratelimited("CAAM exited congestion\n");
27729 + }
27730 +}
27731 +
27732 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
27733 +{
27734 + /*
27735 + * In case of threaded ISR, for RT kernels in_irq() does not return
27736 + * appropriate value, so use in_serving_softirq to distinguish between
27737 + * softirq and irq contexts.
27738 + */
27739 + if (unlikely(in_irq() || !in_serving_softirq())) {
27740 + /* Disable QMan IRQ source and invoke NAPI */
27741 + qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
27742 + np->p = p;
27743 + napi_schedule(&np->irqtask);
27744 + return 1;
27745 + }
27746 + return 0;
27747 +}
27748 +
27749 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
27750 + struct qman_fq *rsp_fq,
27751 + const struct qm_dqrr_entry *dqrr)
27752 +{
27753 + struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
27754 + struct caam_drv_req *drv_req;
27755 + const struct qm_fd *fd;
27756 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
27757 +
27758 + if (caam_qi_napi_schedule(p, caam_napi))
27759 + return qman_cb_dqrr_stop;
27760 +
27761 + fd = &dqrr->fd;
27762 + if (unlikely(fd->status)) {
27763 + u32 ssrc = fd->status & JRSTA_SSRC_MASK;
27764 + u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
27765 +
27766 + if (ssrc != JRSTA_SSRC_CCB_ERROR ||
27767 + err_id != JRSTA_CCBERR_ERRID_ICVCHK)
27768 + dev_err(qidev, "Error: %#x in CAAM response FD\n",
27769 + fd->status);
27770 + }
27771 +
27772 + if (unlikely(fd->format != fd->format)) {
27773 + dev_err(qidev, "Non-compound FD from CAAM\n");
27774 + return qman_cb_dqrr_consume;
27775 + }
27776 +
27777 + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
27778 + if (unlikely(!drv_req)) {
27779 + dev_err(qidev,
27780 + "Can't find original request for caam response\n");
27781 + return qman_cb_dqrr_consume;
27782 + }
27783 +
27784 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
27785 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
27786 +
27787 + drv_req->cbk(drv_req, fd->status);
27788 + return qman_cb_dqrr_consume;
27789 +}
27790 +
27791 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
27792 +{
27793 + struct qm_mcc_initfq opts;
27794 + struct qman_fq *fq;
27795 + int ret;
27796 +
27797 + fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
27798 + if (!fq)
27799 + return -ENOMEM;
27800 +
27801 + fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
27802 +
27803 + ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
27804 + QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
27805 + if (ret) {
27806 + dev_err(qidev, "Rsp FQ create failed\n");
27807 + kfree(fq);
27808 + return -ENODEV;
27809 + }
27810 +
27811 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
27812 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
27813 + QM_INITFQ_WE_CGID;
27814 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
27815 + QM_FQCTRL_CGE;
27816 + opts.fqd.dest.channel = qman_affine_channel(cpu);
27817 + opts.fqd.dest.wq = 3;
27818 + opts.fqd.cgid = qipriv.cgr.cgrid;
27819 + opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
27820 + QM_STASHING_EXCL_DATA;
27821 + opts.fqd.context_a.stashing.data_cl = 1;
27822 + opts.fqd.context_a.stashing.context_cl = 1;
27823 +
27824 + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
27825 + if (ret) {
27826 + dev_err(qidev, "Rsp FQ init failed\n");
27827 + kfree(fq);
27828 + return -ENODEV;
27829 + }
27830 +
27831 + per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
27832 +
27833 + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
27834 + return 0;
27835 +}
27836 +
27837 +static int init_cgr(struct device *qidev)
27838 +{
27839 + int ret;
27840 + struct qm_mcc_initcgr opts;
27841 + const u64 cpus = *(u64 *)qman_affine_cpus();
27842 + const int num_cpus = hweight64(cpus);
27843 + const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
27844 +
27845 + ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
27846 + if (ret) {
27847 + dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
27848 + return ret;
27849 + }
27850 +
27851 + qipriv.cgr.cb = cgr_cb;
27852 + memset(&opts, 0, sizeof(opts));
27853 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
27854 + opts.cgr.cscn_en = QM_CGR_EN;
27855 + opts.cgr.mode = QMAN_CGR_MODE_FRAME;
27856 + qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
27857 +
27858 + ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
27859 + if (ret) {
27860 + dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
27861 + qipriv.cgr.cgrid);
27862 + return ret;
27863 + }
27864 +
27865 + dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
27866 + return 0;
27867 +}
27868 +
27869 +static int alloc_rsp_fqs(struct device *qidev)
27870 +{
27871 + int ret, i;
27872 + const cpumask_t *cpus = qman_affine_cpus();
27873 +
27874 + /*Now create response FQs*/
27875 + for_each_cpu(i, cpus) {
27876 + ret = alloc_rsp_fq_cpu(qidev, i);
27877 + if (ret) {
27878 + dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
27879 + return ret;
27880 + }
27881 + }
27882 +
27883 + return 0;
27884 +}
27885 +
27886 +static void free_rsp_fqs(void)
27887 +{
27888 + int i;
27889 + const cpumask_t *cpus = qman_affine_cpus();
27890 +
27891 + for_each_cpu(i, cpus)
27892 + kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
27893 +}
27894 +
27895 +int caam_qi_init(struct platform_device *caam_pdev)
27896 +{
27897 + int err, i;
27898 + struct platform_device *qi_pdev;
27899 + struct device *ctrldev = &caam_pdev->dev, *qidev;
27900 + struct caam_drv_private *ctrlpriv;
27901 + const cpumask_t *cpus = qman_affine_cpus();
27902 + struct cpumask old_cpumask = current->cpus_allowed;
27903 + static struct platform_device_info qi_pdev_info = {
27904 + .name = "caam_qi",
27905 + .id = PLATFORM_DEVID_NONE
27906 + };
27907 +
27908 + /*
27909 + * QMAN requires CGRs to be removed from same CPU+portal from where it
27910 + * was originally allocated. Hence we need to note down the
27911 + * initialisation CPU and use the same CPU for module exit.
27912 + * We select the first CPU to from the list of portal owning CPUs.
27913 + * Then we pin module init to this CPU.
27914 + */
27915 + mod_init_cpu = cpumask_first(cpus);
27916 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
27917 +
27918 + qi_pdev_info.parent = ctrldev;
27919 + qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
27920 + qi_pdev = platform_device_register_full(&qi_pdev_info);
27921 + if (IS_ERR(qi_pdev))
27922 + return PTR_ERR(qi_pdev);
27923 + arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
27924 +
27925 + ctrlpriv = dev_get_drvdata(ctrldev);
27926 + qidev = &qi_pdev->dev;
27927 +
27928 + qipriv.qi_pdev = qi_pdev;
27929 + dev_set_drvdata(qidev, &qipriv);
27930 +
27931 + /* Initialize the congestion detection */
27932 + err = init_cgr(qidev);
27933 + if (err) {
27934 + dev_err(qidev, "CGR initialization failed: %d\n", err);
27935 + platform_device_unregister(qi_pdev);
27936 + return err;
27937 + }
27938 +
27939 + /* Initialise response FQs */
27940 + err = alloc_rsp_fqs(qidev);
27941 + if (err) {
27942 + dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
27943 + free_rsp_fqs();
27944 + platform_device_unregister(qi_pdev);
27945 + return err;
27946 + }
27947 +
27948 + /*
27949 + * Enable the NAPI contexts on each of the core which has an affine
27950 + * portal.
27951 + */
27952 + for_each_cpu(i, cpus) {
27953 + struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
27954 + struct caam_napi *caam_napi = &priv->caam_napi;
27955 + struct napi_struct *irqtask = &caam_napi->irqtask;
27956 + struct net_device *net_dev = &priv->net_dev;
27957 +
27958 + net_dev->dev = *qidev;
27959 + INIT_LIST_HEAD(&net_dev->napi_list);
27960 +
27961 + netif_napi_add(net_dev, irqtask, caam_qi_poll,
27962 + CAAM_NAPI_WEIGHT);
27963 +
27964 + napi_enable(irqtask);
27965 + }
27966 +
27967 + /* Hook up QI device to parent controlling caam device */
27968 + ctrlpriv->qidev = qidev;
27969 +
27970 + qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
27971 + SLAB_CACHE_DMA, NULL);
27972 + if (!qi_cache) {
27973 + dev_err(qidev, "Can't allocate CAAM cache\n");
27974 + free_rsp_fqs();
27975 + platform_device_unregister(qi_pdev);
27976 + return -ENOMEM;
27977 + }
27978 +
27979 + /* Done with the CGRs; restore the cpus allowed mask */
27980 + set_cpus_allowed_ptr(current, &old_cpumask);
27981 +#ifdef CONFIG_DEBUG_FS
27982 + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
27983 + &times_congested, &caam_fops_u64_ro);
27984 +#endif
27985 + dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
27986 + return 0;
27987 +}
27988 --- /dev/null
27989 +++ b/drivers/crypto/caam/qi.h
27990 @@ -0,0 +1,204 @@
27991 +/*
27992 + * Public definitions for the CAAM/QI (Queue Interface) backend.
27993 + *
27994 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
27995 + * Copyright 2016-2017 NXP
27996 + */
27997 +
27998 +#ifndef __QI_H__
27999 +#define __QI_H__
28000 +
28001 +#include <linux/fsl_qman.h>
28002 +#include "compat.h"
28003 +#include "desc.h"
28004 +#include "desc_constr.h"
28005 +
28006 +/*
28007 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
28008 + * (as pointed by context_a of to-CAAM FQ).
28009 + * When the job descriptor is executed by DECO, the whole job descriptor
28010 + * together with shared descriptor gets loaded in DECO buffer, which is
28011 + * 64 words (each 32-bit) long.
28012 + *
28013 + * The job descriptor constructed by CAAM hardware has the following layout:
28014 + *
28015 + * HEADER (1 word)
28016 + * Shdesc ptr (1 or 2 words)
28017 + * SEQ_OUT_PTR (1 word)
28018 + * Out ptr (1 or 2 words)
28019 + * Out length (1 word)
28020 + * SEQ_IN_PTR (1 word)
28021 + * In ptr (1 or 2 words)
28022 + * In length (1 word)
28023 + *
28024 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
28025 + *
28026 + * Apart from shdesc contents, the total number of words that get loaded in DECO
28027 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
28028 + * storing shared descriptor.
28029 + */
28030 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
28031 +
28032 +/* Length of a single buffer in the QI driver memory cache */
28033 +#define CAAM_QI_MEMCACHE_SIZE 768
28034 +
28035 +extern bool caam_congested __read_mostly;
28036 +
28037 +/*
28038 + * This is the request structure the driver application should fill while
28039 + * submitting a job to driver.
28040 + */
28041 +struct caam_drv_req;
28042 +
28043 +/*
28044 + * caam_qi_cbk - application's callback function invoked by the driver when the
28045 + * request has been successfully processed.
28046 + * @drv_req: original request that was submitted
28047 + * @status: completion status of request (0 - success, non-zero - error code)
28048 + */
28049 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
28050 +
28051 +enum optype {
28052 + ENCRYPT,
28053 + DECRYPT,
28054 + GIVENCRYPT,
28055 + NUM_OP
28056 +};
28057 +
28058 +/**
28059 + * caam_drv_ctx - CAAM/QI backend driver context
28060 + *
28061 + * The jobs are processed by the driver against a driver context.
28062 + * With every cryptographic context, a driver context is attached.
28063 + * The driver context contains data for private use by driver.
28064 + * For the applications, this is an opaque structure.
28065 + *
28066 + * @prehdr: preheader placed before shrd desc
28067 + * @sh_desc: shared descriptor
28068 + * @context_a: shared descriptor dma address
28069 + * @req_fq: to-CAAM request frame queue
28070 + * @rsp_fq: from-CAAM response frame queue
28071 + * @cpu: cpu on which to receive CAAM response
28072 + * @op_type: operation type
28073 + * @qidev: device pointer for CAAM/QI backend
28074 + */
28075 +struct caam_drv_ctx {
28076 + u32 prehdr[2];
28077 + u32 sh_desc[MAX_SDLEN];
28078 + dma_addr_t context_a;
28079 + struct qman_fq *req_fq;
28080 + struct qman_fq *rsp_fq;
28081 + int cpu;
28082 + enum optype op_type;
28083 + struct device *qidev;
28084 +} ____cacheline_aligned;
28085 +
28086 +/**
28087 + * caam_drv_req - The request structure the driver application should fill while
28088 + * submitting a job to driver.
28089 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
28090 + * buffers.
28091 + * @cbk: callback function to invoke when job is completed
28092 + * @app_ctx: arbitrary context attached with request by the application
28093 + *
28094 + * The fields mentioned below should not be used by application.
28095 + * These are for private use by driver.
28096 + *
28097 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
28098 + * @hwaddr: DMA address for the S/G table.
28099 + */
28100 +struct caam_drv_req {
28101 + struct qm_sg_entry fd_sgt[2];
28102 + struct caam_drv_ctx *drv_ctx;
28103 + caam_qi_cbk cbk;
28104 + void *app_ctx;
28105 +} ____cacheline_aligned;
28106 +
28107 +/**
28108 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
28109 + *
28110 + * A CAAM/QI driver context must be attached with each cryptographic context.
28111 + * This function allocates memory for CAAM/QI context and returns a handle to
28112 + * the application. This handle must be submitted along with each enqueue
28113 + * request to the driver by the application.
28114 + *
28115 + * @cpu: CPU where the application prefers to the driver to receive CAAM
28116 + * responses. The request completion callback would be issued from this
28117 + * CPU.
28118 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
28119 + * context.
28120 + *
28121 + * Returns a driver context on success or negative error code on failure.
28122 + */
28123 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
28124 + u32 *sh_desc);
28125 +
28126 +/**
28127 + * caam_qi_enqueue - Submit a request to QI backend driver.
28128 + *
28129 + * The request structure must be properly filled as described above.
28130 + *
28131 + * @qidev: device pointer for QI backend
28132 + * @req: CAAM QI request structure
28133 + *
28134 + * Returns 0 on success or negative error code on failure.
28135 + */
28136 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
28137 +
28138 +/**
28139 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
28140 + * or too many CAAM responses are pending to be processed.
28141 + * @drv_ctx: driver context for which job is to be submitted
28142 + *
28143 + * Returns caam congestion status 'true/false'
28144 + */
28145 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
28146 +
28147 +/**
28148 + * caam_drv_ctx_update - Update QI driver context
28149 + *
28150 + * Invoked when shared descriptor is required to be change in driver context.
28151 + *
28152 + * @drv_ctx: driver context to be updated
28153 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
28154 + *
28155 + * Returns 0 on success or negative error code on failure.
28156 + */
28157 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
28158 +
28159 +/**
28160 + * caam_drv_ctx_rel - Release a QI driver context
28161 + * @drv_ctx: context to be released
28162 + */
28163 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
28164 +
28165 +int caam_qi_init(struct platform_device *pdev);
28166 +int caam_qi_shutdown(struct device *dev);
28167 +
28168 +/**
28169 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
28170 + *
28171 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
28172 + * to be allocated on the hotpath. Instead of using malloc, one can use the
28173 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
28174 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
28175 + *
28176 + * @flags: flags that would be used for the equivalent malloc(..) call
28177 + *
28178 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
28179 + */
28180 +void *qi_cache_alloc(gfp_t flags);
28181 +
28182 +/**
28183 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
28184 + *
28185 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
28186 + * the buffer previously allocated by a qi_cache_alloc call.
28187 + * No checking is being done, the call is a passthrough call to
28188 + * kmem_cache_free(...)
28189 + *
28190 + * @obj: object previously allocated using qi_cache_alloc()
28191 + */
28192 +void qi_cache_free(void *obj);
28193 +
28194 +#endif /* __QI_H__ */
28195 --- a/drivers/crypto/caam/regs.h
28196 +++ b/drivers/crypto/caam/regs.h
28197 @@ -2,6 +2,7 @@
28198 * CAAM hardware register-level view
28199 *
28200 * Copyright 2008-2011 Freescale Semiconductor, Inc.
28201 + * Copyright 2017 NXP
28202 */
28203
28204 #ifndef REGS_H
28205 @@ -67,6 +68,7 @@
28206 */
28207
28208 extern bool caam_little_end;
28209 +extern bool caam_imx;
28210
28211 #define caam_to_cpu(len) \
28212 static inline u##len caam##len ## _to_cpu(u##len val) \
28213 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem
28214 #else /* CONFIG_64BIT */
28215 static inline void wr_reg64(void __iomem *reg, u64 data)
28216 {
28217 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
28218 - if (caam_little_end) {
28219 + if (!caam_imx && caam_little_end) {
28220 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
28221 wr_reg32((u32 __iomem *)(reg), data);
28222 - } else
28223 -#endif
28224 - {
28225 + } else {
28226 wr_reg32((u32 __iomem *)(reg), data >> 32);
28227 wr_reg32((u32 __iomem *)(reg) + 1, data);
28228 }
28229 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem
28230
28231 static inline u64 rd_reg64(void __iomem *reg)
28232 {
28233 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
28234 - if (caam_little_end)
28235 + if (!caam_imx && caam_little_end)
28236 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
28237 (u64)rd_reg32((u32 __iomem *)(reg)));
28238 - else
28239 -#endif
28240 - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
28241 - (u64)rd_reg32((u32 __iomem *)(reg) + 1));
28242 +
28243 + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
28244 + (u64)rd_reg32((u32 __iomem *)(reg) + 1));
28245 }
28246 #endif /* CONFIG_64BIT */
28247
28248 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
28249 +{
28250 + if (caam_imx)
28251 + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
28252 + (u64)cpu_to_caam32(upper_32_bits(value)));
28253 +
28254 + return cpu_to_caam64(value);
28255 +}
28256 +
28257 +static inline u64 caam_dma64_to_cpu(u64 value)
28258 +{
28259 + if (caam_imx)
28260 + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
28261 + (u64)caam32_to_cpu(upper_32_bits(value)));
28262 +
28263 + return caam64_to_cpu(value);
28264 +}
28265 +
28266 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
28267 -#ifdef CONFIG_SOC_IMX7D
28268 -#define cpu_to_caam_dma(value) \
28269 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
28270 - (u64)cpu_to_caam32(upper_32_bits(value)))
28271 -#define caam_dma_to_cpu(value) \
28272 - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
28273 - (u64)caam32_to_cpu(upper_32_bits(value)))
28274 -#else
28275 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
28276 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
28277 -#endif /* CONFIG_SOC_IMX7D */
28278 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
28279 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
28280 #else
28281 #define cpu_to_caam_dma(value) cpu_to_caam32(value)
28282 #define caam_dma_to_cpu(value) caam32_to_cpu(value)
28283 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
28284 -
28285 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
28286 -#define cpu_to_caam_dma64(value) \
28287 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
28288 - (u64)cpu_to_caam32(upper_32_bits(value)))
28289 -#else
28290 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
28291 -#endif
28292 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
28293
28294 /*
28295 * jr_outentry
28296 @@ -293,6 +291,7 @@ struct caam_perfmon {
28297 u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
28298 #define CTPR_MS_QI_SHIFT 25
28299 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
28300 +#define CTPR_MS_DPAA2 BIT(13)
28301 #define CTPR_MS_VIRT_EN_INCL 0x00000001
28302 #define CTPR_MS_VIRT_EN_POR 0x00000002
28303 #define CTPR_MS_PG_SZ_MASK 0x10
28304 @@ -628,6 +627,8 @@ struct caam_job_ring {
28305 #define JRSTA_DECOERR_INVSIGN 0x86
28306 #define JRSTA_DECOERR_DSASIGN 0x87
28307
28308 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
28309 +
28310 #define JRSTA_CCBERR_JUMP 0x08000000
28311 #define JRSTA_CCBERR_INDEX_MASK 0xff00
28312 #define JRSTA_CCBERR_INDEX_SHIFT 8
28313 --- /dev/null
28314 +++ b/drivers/crypto/caam/sg_sw_qm.h
28315 @@ -0,0 +1,126 @@
28316 +/*
28317 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
28318 + * Copyright 2016-2017 NXP
28319 + *
28320 + * Redistribution and use in source and binary forms, with or without
28321 + * modification, are permitted provided that the following conditions are met:
28322 + * * Redistributions of source code must retain the above copyright
28323 + * notice, this list of conditions and the following disclaimer.
28324 + * * Redistributions in binary form must reproduce the above copyright
28325 + * notice, this list of conditions and the following disclaimer in the
28326 + * documentation and/or other materials provided with the distribution.
28327 + * * Neither the name of Freescale Semiconductor nor the
28328 + * names of its contributors may be used to endorse or promote products
28329 + * derived from this software without specific prior written permission.
28330 + *
28331 + *
28332 + * ALTERNATIVELY, this software may be distributed under the terms of the
28333 + * GNU General Public License ("GPL") as published by the Free Software
28334 + * Foundation, either version 2 of that License or (at your option) any
28335 + * later version.
28336 + *
28337 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
28338 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28339 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28340 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
28341 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28342 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28343 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
28344 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28345 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28346 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28347 + */
28348 +
28349 +#ifndef __SG_SW_QM_H
28350 +#define __SG_SW_QM_H
28351 +
28352 +#include <linux/fsl_qman.h>
28353 +#include "regs.h"
28354 +
28355 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
28356 +{
28357 + dma_addr_t addr = qm_sg_ptr->opaque;
28358 +
28359 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
28360 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
28361 +}
28362 +
28363 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
28364 + u32 len, u16 offset)
28365 +{
28366 + qm_sg_ptr->addr = dma;
28367 + qm_sg_ptr->length = len;
28368 + qm_sg_ptr->__reserved2 = 0;
28369 + qm_sg_ptr->bpid = 0;
28370 + qm_sg_ptr->__reserved3 = 0;
28371 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
28372 +
28373 + cpu_to_hw_sg(qm_sg_ptr);
28374 +}
28375 +
28376 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
28377 + dma_addr_t dma, u32 len, u16 offset)
28378 +{
28379 + qm_sg_ptr->extension = 0;
28380 + qm_sg_ptr->final = 0;
28381 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
28382 +}
28383 +
28384 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
28385 + dma_addr_t dma, u32 len, u16 offset)
28386 +{
28387 + qm_sg_ptr->extension = 0;
28388 + qm_sg_ptr->final = 1;
28389 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
28390 +}
28391 +
28392 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
28393 + dma_addr_t dma, u32 len, u16 offset)
28394 +{
28395 + qm_sg_ptr->extension = 1;
28396 + qm_sg_ptr->final = 0;
28397 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
28398 +}
28399 +
28400 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
28401 + dma_addr_t dma, u32 len,
28402 + u16 offset)
28403 +{
28404 + qm_sg_ptr->extension = 1;
28405 + qm_sg_ptr->final = 1;
28406 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
28407 +}
28408 +
28409 +/*
28410 + * convert scatterlist to h/w link table format
28411 + * but does not have final bit; instead, returns last entry
28412 + */
28413 +static inline struct qm_sg_entry *
28414 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
28415 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
28416 +{
28417 + while (sg_count && sg) {
28418 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
28419 + sg_dma_len(sg), offset);
28420 + qm_sg_ptr++;
28421 + sg = sg_next(sg);
28422 + sg_count--;
28423 + }
28424 + return qm_sg_ptr - 1;
28425 +}
28426 +
28427 +/*
28428 + * convert scatterlist to h/w link table format
28429 + * scatterlist must have been previously dma mapped
28430 + */
28431 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
28432 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
28433 +{
28434 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
28435 +
28436 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
28437 + qm_sg_ptr->final = 1;
28438 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
28439 +}
28440 +
28441 +#endif /* __SG_SW_QM_H */
28442 --- /dev/null
28443 +++ b/drivers/crypto/caam/sg_sw_qm2.h
28444 @@ -0,0 +1,81 @@
28445 +/*
28446 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
28447 + * Copyright 2017 NXP
28448 + *
28449 + * Redistribution and use in source and binary forms, with or without
28450 + * modification, are permitted provided that the following conditions are met:
28451 + * * Redistributions of source code must retain the above copyright
28452 + * notice, this list of conditions and the following disclaimer.
28453 + * * Redistributions in binary form must reproduce the above copyright
28454 + * notice, this list of conditions and the following disclaimer in the
28455 + * documentation and/or other materials provided with the distribution.
28456 + * * Neither the names of the above-listed copyright holders nor the
28457 + * names of any contributors may be used to endorse or promote products
28458 + * derived from this software without specific prior written permission.
28459 + *
28460 + *
28461 + * ALTERNATIVELY, this software may be distributed under the terms of the
28462 + * GNU General Public License ("GPL") as published by the Free Software
28463 + * Foundation, either version 2 of that License or (at your option) any
28464 + * later version.
28465 + *
28466 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28467 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28468 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28469 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
28470 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28471 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28472 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28473 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28474 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28475 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28476 + * POSSIBILITY OF SUCH DAMAGE.
28477 + */
28478 +
28479 +#ifndef _SG_SW_QM2_H_
28480 +#define _SG_SW_QM2_H_
28481 +
28482 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
28483 +
28484 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
28485 + dma_addr_t dma, u32 len, u16 offset)
28486 +{
28487 + dpaa2_sg_set_addr(qm_sg_ptr, dma);
28488 + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
28489 + dpaa2_sg_set_final(qm_sg_ptr, false);
28490 + dpaa2_sg_set_len(qm_sg_ptr, len);
28491 + dpaa2_sg_set_bpid(qm_sg_ptr, 0);
28492 + dpaa2_sg_set_offset(qm_sg_ptr, offset);
28493 +}
28494 +
28495 +/*
28496 + * convert scatterlist to h/w link table format
28497 + * but does not have final bit; instead, returns last entry
28498 + */
28499 +static inline struct dpaa2_sg_entry *
28500 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
28501 + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
28502 +{
28503 + while (sg_count && sg) {
28504 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
28505 + sg_dma_len(sg), offset);
28506 + qm_sg_ptr++;
28507 + sg = sg_next(sg);
28508 + sg_count--;
28509 + }
28510 + return qm_sg_ptr - 1;
28511 +}
28512 +
28513 +/*
28514 + * convert scatterlist to h/w link table format
28515 + * scatterlist must have been previously dma mapped
28516 + */
28517 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
28518 + struct dpaa2_sg_entry *qm_sg_ptr,
28519 + u16 offset)
28520 +{
28521 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
28522 + dpaa2_sg_set_final(qm_sg_ptr, true);
28523 +}
28524 +
28525 +#endif /* _SG_SW_QM2_H_ */
28526 --- a/drivers/crypto/caam/sg_sw_sec4.h
28527 +++ b/drivers/crypto/caam/sg_sw_sec4.h
28528 @@ -5,9 +5,19 @@
28529 *
28530 */
28531
28532 +#ifndef _SG_SW_SEC4_H_
28533 +#define _SG_SW_SEC4_H_
28534 +
28535 +#include "ctrl.h"
28536 #include "regs.h"
28537 +#include "sg_sw_qm2.h"
28538 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
28539
28540 -struct sec4_sg_entry;
28541 +struct sec4_sg_entry {
28542 + u64 ptr;
28543 + u32 len;
28544 + u32 bpid_offset;
28545 +};
28546
28547 /*
28548 * convert single dma address to h/w link table format
28549 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
28550 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
28551 dma_addr_t dma, u32 len, u16 offset)
28552 {
28553 - sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
28554 - sec4_sg_ptr->len = cpu_to_caam32(len);
28555 - sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
28556 + if (caam_dpaa2) {
28557 + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
28558 + offset);
28559 + } else {
28560 + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
28561 + sec4_sg_ptr->len = cpu_to_caam32(len);
28562 + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
28563 + SEC4_SG_OFFSET_MASK);
28564 + }
28565 #ifdef DEBUG
28566 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
28567 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
28568 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, in
28569 return sec4_sg_ptr - 1;
28570 }
28571
28572 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
28573 +{
28574 + if (caam_dpaa2)
28575 + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
28576 + else
28577 + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
28578 +}
28579 +
28580 /*
28581 * convert scatterlist to h/w link table format
28582 * scatterlist must have been previously dma mapped
28583 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(st
28584 u16 offset)
28585 {
28586 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
28587 - sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
28588 -}
28589 -
28590 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
28591 - struct scatterlist *sg, unsigned int total,
28592 - struct sec4_sg_entry *sec4_sg_ptr)
28593 -{
28594 - do {
28595 - unsigned int len = min(sg_dma_len(sg), total);
28596 -
28597 - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
28598 - sec4_sg_ptr++;
28599 - sg = sg_next(sg);
28600 - total -= len;
28601 - } while (total);
28602 - return sec4_sg_ptr - 1;
28603 + sg_to_sec4_set_last(sec4_sg_ptr);
28604 }
28605
28606 -/* derive number of elements in scatterlist, but return 0 for 1 */
28607 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
28608 -{
28609 - int sg_nents = sg_nents_for_len(sg_list, nbytes);
28610 -
28611 - if (likely(sg_nents == 1))
28612 - return 0;
28613 -
28614 - return sg_nents;
28615 -}
28616 +#endif /* _SG_SW_SEC4_H_ */
28617 --- a/drivers/crypto/talitos.c
28618 +++ b/drivers/crypto/talitos.c
28619 @@ -1241,6 +1241,14 @@ static int ipsec_esp(struct talitos_edes
28620 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
28621 sg_count, areq->assoclen, tbl_off, elen);
28622
28623 + /*
28624 + * In case of SEC 2.x+, cipher in len must include only the ciphertext,
28625 + * while extent is used for ICV len.
28626 + */
28627 + if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
28628 + (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
28629 + desc->ptr[4].len = cpu_to_be16(cryptlen);
28630 +
28631 if (ret > 1) {
28632 tbl_off += ret;
28633 sync_needed = true;
28634 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
28635 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
28636 @@ -516,7 +516,7 @@ err:
28637
28638 /**
28639 * rsi_disconnect() - This function performs the reverse of the probe function,
28640 - * it deintialize the driver structure.
28641 + * it deinitialize the driver structure.
28642 * @pfunction: Pointer to the USB interface structure.
28643 *
28644 * Return: None.
28645 --- a/drivers/staging/wilc1000/linux_wlan.c
28646 +++ b/drivers/staging/wilc1000/linux_wlan.c
28647 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device
28648 vif = netdev_priv(dev);
28649 wilc = vif->wilc;
28650
28651 - /* Deintialize IRQ */
28652 + /* Deinitialize IRQ */
28653 if (wilc->dev_irq_num) {
28654 free_irq(wilc->dev_irq_num, wilc);
28655 gpio_free(wilc->gpio);
28656 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
28657 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
28658 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_devi
28659 del_timer_sync(&wilc_during_ip_timer);
28660
28661 if (s32Error)
28662 - netdev_err(net, "Error while deintializing host interface\n");
28663 + netdev_err(net, "Error while deinitializing host interface\n");
28664
28665 return s32Error;
28666 }
28667 --- /dev/null
28668 +++ b/include/crypto/acompress.h
28669 @@ -0,0 +1,269 @@
28670 +/*
28671 + * Asynchronous Compression operations
28672 + *
28673 + * Copyright (c) 2016, Intel Corporation
28674 + * Authors: Weigang Li <weigang.li@intel.com>
28675 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28676 + *
28677 + * This program is free software; you can redistribute it and/or modify it
28678 + * under the terms of the GNU General Public License as published by the Free
28679 + * Software Foundation; either version 2 of the License, or (at your option)
28680 + * any later version.
28681 + *
28682 + */
28683 +#ifndef _CRYPTO_ACOMP_H
28684 +#define _CRYPTO_ACOMP_H
28685 +#include <linux/crypto.h>
28686 +
28687 +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
28688 +
28689 +/**
28690 + * struct acomp_req - asynchronous (de)compression request
28691 + *
28692 + * @base: Common attributes for asynchronous crypto requests
28693 + * @src: Source Data
28694 + * @dst: Destination data
28695 + * @slen: Size of the input buffer
28696 + * @dlen: Size of the output buffer and number of bytes produced
28697 + * @flags: Internal flags
28698 + * @__ctx: Start of private context data
28699 + */
28700 +struct acomp_req {
28701 + struct crypto_async_request base;
28702 + struct scatterlist *src;
28703 + struct scatterlist *dst;
28704 + unsigned int slen;
28705 + unsigned int dlen;
28706 + u32 flags;
28707 + void *__ctx[] CRYPTO_MINALIGN_ATTR;
28708 +};
28709 +
28710 +/**
28711 + * struct crypto_acomp - user-instantiated objects which encapsulate
28712 + * algorithms and core processing logic
28713 + *
28714 + * @compress: Function performs a compress operation
28715 + * @decompress: Function performs a de-compress operation
28716 + * @dst_free: Frees destination buffer if allocated inside the
28717 + * algorithm
28718 + * @reqsize: Context size for (de)compression requests
28719 + * @base: Common crypto API algorithm data structure
28720 + */
28721 +struct crypto_acomp {
28722 + int (*compress)(struct acomp_req *req);
28723 + int (*decompress)(struct acomp_req *req);
28724 + void (*dst_free)(struct scatterlist *dst);
28725 + unsigned int reqsize;
28726 + struct crypto_tfm base;
28727 +};
28728 +
28729 +/**
28730 + * struct acomp_alg - asynchronous compression algorithm
28731 + *
28732 + * @compress: Function performs a compress operation
28733 + * @decompress: Function performs a de-compress operation
28734 + * @dst_free: Frees destination buffer if allocated inside the algorithm
28735 + * @init: Initialize the cryptographic transformation object.
28736 + * This function is used to initialize the cryptographic
28737 + * transformation object. This function is called only once at
28738 + * the instantiation time, right after the transformation context
28739 + * was allocated. In case the cryptographic hardware has some
28740 + * special requirements which need to be handled by software, this
28741 + * function shall check for the precise requirement of the
28742 + * transformation and put any software fallbacks in place.
28743 + * @exit: Deinitialize the cryptographic transformation object. This is a
28744 + * counterpart to @init, used to remove various changes set in
28745 + * @init.
28746 + *
28747 + * @reqsize: Context size for (de)compression requests
28748 + * @base: Common crypto API algorithm data structure
28749 + */
28750 +struct acomp_alg {
28751 + int (*compress)(struct acomp_req *req);
28752 + int (*decompress)(struct acomp_req *req);
28753 + void (*dst_free)(struct scatterlist *dst);
28754 + int (*init)(struct crypto_acomp *tfm);
28755 + void (*exit)(struct crypto_acomp *tfm);
28756 + unsigned int reqsize;
28757 + struct crypto_alg base;
28758 +};
28759 +
28760 +/**
28761 + * DOC: Asynchronous Compression API
28762 + *
28763 + * The Asynchronous Compression API is used with the algorithms of type
28764 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
28765 + */
28766 +
28767 +/**
28768 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
28769 + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
28770 + * compression algorithm e.g. "deflate"
28771 + * @type: specifies the type of the algorithm
28772 + * @mask: specifies the mask for the algorithm
28773 + *
28774 + * Allocate a handle for a compression algorithm. The returned struct
28775 + * crypto_acomp is the handle that is required for any subsequent
28776 + * API invocation for the compression operations.
28777 + *
28778 + * Return: allocated handle in case of success; IS_ERR() is true in case
28779 + * of an error, PTR_ERR() returns the error code.
28780 + */
28781 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
28782 + u32 mask);
28783 +
28784 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
28785 +{
28786 + return &tfm->base;
28787 +}
28788 +
28789 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
28790 +{
28791 + return container_of(alg, struct acomp_alg, base);
28792 +}
28793 +
28794 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
28795 +{
28796 + return container_of(tfm, struct crypto_acomp, base);
28797 +}
28798 +
28799 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
28800 +{
28801 + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
28802 +}
28803 +
28804 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
28805 +{
28806 + return tfm->reqsize;
28807 +}
28808 +
28809 +static inline void acomp_request_set_tfm(struct acomp_req *req,
28810 + struct crypto_acomp *tfm)
28811 +{
28812 + req->base.tfm = crypto_acomp_tfm(tfm);
28813 +}
28814 +
28815 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
28816 +{
28817 + return __crypto_acomp_tfm(req->base.tfm);
28818 +}
28819 +
28820 +/**
28821 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
28822 + *
28823 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
28824 + */
28825 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
28826 +{
28827 + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
28828 +}
28829 +
28830 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
28831 +{
28832 + type &= ~CRYPTO_ALG_TYPE_MASK;
28833 + type |= CRYPTO_ALG_TYPE_ACOMPRESS;
28834 + mask |= CRYPTO_ALG_TYPE_MASK;
28835 +
28836 + return crypto_has_alg(alg_name, type, mask);
28837 +}
28838 +
28839 +/**
28840 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
28841 + *
28842 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
28843 + *
28844 + * Return: allocated handle in case of success or NULL in case of an error
28845 + */
28846 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
28847 +
28848 +/**
28849 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
28850 + * request as well as the output buffer if allocated
28851 + * inside the algorithm
28852 + *
28853 + * @req: request to free
28854 + */
28855 +void acomp_request_free(struct acomp_req *req);
28856 +
28857 +/**
28858 + * acomp_request_set_callback() -- Sets an asynchronous callback
28859 + *
28860 + * Callback will be called when an asynchronous operation on a given
28861 + * request is finished.
28862 + *
28863 + * @req: request that the callback will be set for
28864 + * @flgs: specify for instance if the operation may backlog
28865 + * @cmlp: callback which will be called
28866 + * @data: private data used by the caller
28867 + */
28868 +static inline void acomp_request_set_callback(struct acomp_req *req,
28869 + u32 flgs,
28870 + crypto_completion_t cmpl,
28871 + void *data)
28872 +{
28873 + req->base.complete = cmpl;
28874 + req->base.data = data;
28875 + req->base.flags = flgs;
28876 +}
28877 +
28878 +/**
28879 + * acomp_request_set_params() -- Sets request parameters
28880 + *
28881 + * Sets parameters required by an acomp operation
28882 + *
28883 + * @req: asynchronous compress request
28884 + * @src: pointer to input buffer scatterlist
28885 + * @dst: pointer to output buffer scatterlist. If this is NULL, the
28886 + * acomp layer will allocate the output memory
28887 + * @slen: size of the input buffer
28888 + * @dlen: size of the output buffer. If dst is NULL, this can be used by
28889 + * the user to specify the maximum amount of memory to allocate
28890 + */
28891 +static inline void acomp_request_set_params(struct acomp_req *req,
28892 + struct scatterlist *src,
28893 + struct scatterlist *dst,
28894 + unsigned int slen,
28895 + unsigned int dlen)
28896 +{
28897 + req->src = src;
28898 + req->dst = dst;
28899 + req->slen = slen;
28900 + req->dlen = dlen;
28901 +
28902 + if (!req->dst)
28903 + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
28904 +}
28905 +
28906 +/**
28907 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
28908 + *
28909 + * Function invokes the asynchronous compress operation
28910 + *
28911 + * @req: asynchronous compress request
28912 + *
28913 + * Return: zero on success; error code in case of error
28914 + */
28915 +static inline int crypto_acomp_compress(struct acomp_req *req)
28916 +{
28917 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
28918 +
28919 + return tfm->compress(req);
28920 +}
28921 +
28922 +/**
28923 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
28924 + *
28925 + * Function invokes the asynchronous decompress operation
28926 + *
28927 + * @req: asynchronous compress request
28928 + *
28929 + * Return: zero on success; error code in case of error
28930 + */
28931 +static inline int crypto_acomp_decompress(struct acomp_req *req)
28932 +{
28933 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
28934 +
28935 + return tfm->decompress(req);
28936 +}
28937 +
28938 +#endif
28939 --- /dev/null
28940 +++ b/include/crypto/internal/acompress.h
28941 @@ -0,0 +1,81 @@
28942 +/*
28943 + * Asynchronous Compression operations
28944 + *
28945 + * Copyright (c) 2016, Intel Corporation
28946 + * Authors: Weigang Li <weigang.li@intel.com>
28947 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28948 + *
28949 + * This program is free software; you can redistribute it and/or modify it
28950 + * under the terms of the GNU General Public License as published by the Free
28951 + * Software Foundation; either version 2 of the License, or (at your option)
28952 + * any later version.
28953 + *
28954 + */
28955 +#ifndef _CRYPTO_ACOMP_INT_H
28956 +#define _CRYPTO_ACOMP_INT_H
28957 +#include <crypto/acompress.h>
28958 +
28959 +/*
28960 + * Transform internal helpers.
28961 + */
28962 +static inline void *acomp_request_ctx(struct acomp_req *req)
28963 +{
28964 + return req->__ctx;
28965 +}
28966 +
28967 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
28968 +{
28969 + return tfm->base.__crt_ctx;
28970 +}
28971 +
28972 +static inline void acomp_request_complete(struct acomp_req *req,
28973 + int err)
28974 +{
28975 + req->base.complete(&req->base, err);
28976 +}
28977 +
28978 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
28979 +{
28980 + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
28981 +}
28982 +
28983 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
28984 +{
28985 + struct acomp_req *req;
28986 +
28987 + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
28988 + if (likely(req))
28989 + acomp_request_set_tfm(req, tfm);
28990 + return req;
28991 +}
28992 +
28993 +static inline void __acomp_request_free(struct acomp_req *req)
28994 +{
28995 + kzfree(req);
28996 +}
28997 +
28998 +/**
28999 + * crypto_register_acomp() -- Register asynchronous compression algorithm
29000 + *
29001 + * Function registers an implementation of an asynchronous
29002 + * compression algorithm
29003 + *
29004 + * @alg: algorithm definition
29005 + *
29006 + * Return: zero on success; error code in case of error
29007 + */
29008 +int crypto_register_acomp(struct acomp_alg *alg);
29009 +
29010 +/**
29011 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
29012 + *
29013 + * Function unregisters an implementation of an asynchronous
29014 + * compression algorithm
29015 + *
29016 + * @alg: algorithm definition
29017 + *
29018 + * Return: zero on success; error code in case of error
29019 + */
29020 +int crypto_unregister_acomp(struct acomp_alg *alg);
29021 +
29022 +#endif
29023 --- /dev/null
29024 +++ b/include/crypto/internal/scompress.h
29025 @@ -0,0 +1,136 @@
29026 +/*
29027 + * Synchronous Compression operations
29028 + *
29029 + * Copyright 2015 LG Electronics Inc.
29030 + * Copyright (c) 2016, Intel Corporation
29031 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
29032 + *
29033 + * This program is free software; you can redistribute it and/or modify it
29034 + * under the terms of the GNU General Public License as published by the Free
29035 + * Software Foundation; either version 2 of the License, or (at your option)
29036 + * any later version.
29037 + *
29038 + */
29039 +#ifndef _CRYPTO_SCOMP_INT_H
29040 +#define _CRYPTO_SCOMP_INT_H
29041 +#include <linux/crypto.h>
29042 +
29043 +#define SCOMP_SCRATCH_SIZE 131072
29044 +
29045 +struct crypto_scomp {
29046 + struct crypto_tfm base;
29047 +};
29048 +
29049 +/**
29050 + * struct scomp_alg - synchronous compression algorithm
29051 + *
29052 + * @alloc_ctx: Function allocates algorithm specific context
29053 + * @free_ctx: Function frees context allocated with alloc_ctx
29054 + * @compress: Function performs a compress operation
29055 + * @decompress: Function performs a de-compress operation
29056 + * @init: Initialize the cryptographic transformation object.
29057 + * This function is used to initialize the cryptographic
29058 + * transformation object. This function is called only once at
29059 + * the instantiation time, right after the transformation context
29060 + * was allocated. In case the cryptographic hardware has some
29061 + * special requirements which need to be handled by software, this
29062 + * function shall check for the precise requirement of the
29063 + * transformation and put any software fallbacks in place.
29064 + * @exit: Deinitialize the cryptographic transformation object. This is a
29065 + * counterpart to @init, used to remove various changes set in
29066 + * @init.
29067 + * @base: Common crypto API algorithm data structure
29068 + */
29069 +struct scomp_alg {
29070 + void *(*alloc_ctx)(struct crypto_scomp *tfm);
29071 + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
29072 + int (*compress)(struct crypto_scomp *tfm, const u8 *src,
29073 + unsigned int slen, u8 *dst, unsigned int *dlen,
29074 + void *ctx);
29075 + int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
29076 + unsigned int slen, u8 *dst, unsigned int *dlen,
29077 + void *ctx);
29078 + struct crypto_alg base;
29079 +};
29080 +
29081 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
29082 +{
29083 + return container_of(alg, struct scomp_alg, base);
29084 +}
29085 +
29086 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
29087 +{
29088 + return container_of(tfm, struct crypto_scomp, base);
29089 +}
29090 +
29091 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
29092 +{
29093 + return &tfm->base;
29094 +}
29095 +
29096 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
29097 +{
29098 + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
29099 +}
29100 +
29101 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
29102 +{
29103 + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
29104 +}
29105 +
29106 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
29107 +{
29108 + return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
29109 +}
29110 +
29111 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
29112 + void *ctx)
29113 +{
29114 + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
29115 +}
29116 +
29117 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
29118 + const u8 *src, unsigned int slen,
29119 + u8 *dst, unsigned int *dlen, void *ctx)
29120 +{
29121 + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
29122 +}
29123 +
29124 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
29125 + const u8 *src, unsigned int slen,
29126 + u8 *dst, unsigned int *dlen,
29127 + void *ctx)
29128 +{
29129 + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
29130 + ctx);
29131 +}
29132 +
29133 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
29134 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
29135 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
29136 +
29137 +/**
29138 + * crypto_register_scomp() -- Register synchronous compression algorithm
29139 + *
29140 + * Function registers an implementation of a synchronous
29141 + * compression algorithm
29142 + *
29143 + * @alg: algorithm definition
29144 + *
29145 + * Return: zero on success; error code in case of error
29146 + */
29147 +int crypto_register_scomp(struct scomp_alg *alg);
29148 +
29149 +/**
29150 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
29151 + *
29152 + * Function unregisters an implementation of a synchronous
29153 + * compression algorithm
29154 + *
29155 + * @alg: algorithm definition
29156 + *
29157 + * Return: zero on success; error code in case of error
29158 + */
29159 +int crypto_unregister_scomp(struct scomp_alg *alg);
29160 +
29161 +#endif
29162 --- a/include/linux/crypto.h
29163 +++ b/include/linux/crypto.h
29164 @@ -50,6 +50,8 @@
29165 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
29166 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
29167 #define CRYPTO_ALG_TYPE_KPP 0x00000008
29168 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
29169 +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
29170 #define CRYPTO_ALG_TYPE_RNG 0x0000000c
29171 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
29172 #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
29173 @@ -60,6 +62,7 @@
29174 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
29175 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
29176 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
29177 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
29178
29179 #define CRYPTO_ALG_LARVAL 0x00000010
29180 #define CRYPTO_ALG_DEAD 0x00000020
29181 --- a/include/uapi/linux/cryptouser.h
29182 +++ b/include/uapi/linux/cryptouser.h
29183 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
29184 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
29185 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
29186 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
29187 + CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
29188 __CRYPTOCFGA_MAX
29189
29190 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
29191 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
29192 char type[CRYPTO_MAX_NAME];
29193 };
29194
29195 +struct crypto_report_acomp {
29196 + char type[CRYPTO_MAX_NAME];
29197 +};
29198 +
29199 #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
29200 sizeof(struct crypto_report_blkcipher))
29201 --- a/scripts/spelling.txt
29202 +++ b/scripts/spelling.txt
29203 @@ -305,6 +305,9 @@ defintion||definition
29204 defintions||definitions
29205 defualt||default
29206 defult||default
29207 +deintializing||deinitializing
29208 +deintialize||deinitialize
29209 +deintialized||deinitialized
29210 deivce||device
29211 delared||declared
29212 delare||declare
29213 --- a/sound/soc/amd/acp-pcm-dma.c
29214 +++ b/sound/soc/amd/acp-pcm-dma.c
29215 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mm
29216 return 0;
29217 }
29218
29219 -/* Deintialize ACP */
29220 +/* Deinitialize ACP */
29221 static int acp_deinit(void __iomem *acp_mmio)
29222 {
29223 u32 val;