kernel: bump 4.9 to 4.9.197
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 804-crypto-support-layerscape.patch
1 From a3310d64d7cb1ba0f9279e77d21f13a75fa66ab5 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:29:23 +0800
4 Subject: [PATCH 16/30] crypto: support layerscape
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 This is an integrated patch for layerscape sec support.
10
11 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
12 Signed-off-by: Fabio Estevam <festevam@gmail.com>
13 Signed-off-by: Arnd Bergmann <arnd@arndb.de>
14 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
15 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
16 Signed-off-by: Eric Biggers <ebiggers@google.com>
17 Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
18 Signed-off-by: Xulin Sun <xulin.sun@windriver.com>
19 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
20 Signed-off-by: Marcus Folkesson <marcus.folkesson@gmail.com>
21 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
22 Signed-off-by: Andrew Lutomirski <luto@kernel.org>
23 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
24 Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
25 Signed-off-by: Marcelo Cerri <marcelo.cerri@canonical.com>
26 Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
27 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
28 Signed-off-by: Laura Abbott <labbott@redhat.com>
29 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
30 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
31 ---
32 crypto/Kconfig | 30 +
33 crypto/Makefile | 4 +
34 crypto/acompress.c | 169 +
35 crypto/algboss.c | 12 +-
36 crypto/crypto_user.c | 19 +
37 crypto/scompress.c | 356 ++
38 crypto/tcrypt.c | 17 +-
39 crypto/testmgr.c | 1708 +++---
40 crypto/testmgr.h | 1125 ++--
41 crypto/tls.c | 607 +++
42 drivers/crypto/caam/Kconfig | 77 +-
43 drivers/crypto/caam/Makefile | 16 +-
44 drivers/crypto/caam/caamalg.c | 2171 ++------
45 drivers/crypto/caam/caamalg_desc.c | 1961 +++++++
46 drivers/crypto/caam/caamalg_desc.h | 127 +
47 drivers/crypto/caam/caamalg_qi.c | 2929 ++++++++++
48 drivers/crypto/caam/caamalg_qi2.c | 5920 +++++++++++++++++++++
49 drivers/crypto/caam/caamalg_qi2.h | 281 +
50 drivers/crypto/caam/caamhash.c | 550 +-
51 drivers/crypto/caam/caamhash_desc.c | 108 +
52 drivers/crypto/caam/caamhash_desc.h | 49 +
53 drivers/crypto/caam/caampkc.c | 471 +-
54 drivers/crypto/caam/caampkc.h | 58 +
55 drivers/crypto/caam/caamrng.c | 16 +-
56 drivers/crypto/caam/compat.h | 1 +
57 drivers/crypto/caam/ctrl.c | 358 +-
58 drivers/crypto/caam/ctrl.h | 2 +
59 drivers/crypto/caam/desc.h | 84 +-
60 drivers/crypto/caam/desc_constr.h | 180 +-
61 drivers/crypto/caam/dpseci.c | 859 +++
62 drivers/crypto/caam/dpseci.h | 395 ++
63 drivers/crypto/caam/dpseci_cmd.h | 261 +
64 drivers/crypto/caam/error.c | 127 +-
65 drivers/crypto/caam/error.h | 10 +-
66 drivers/crypto/caam/intern.h | 31 +-
67 drivers/crypto/caam/jr.c | 72 +-
68 drivers/crypto/caam/jr.h | 2 +
69 drivers/crypto/caam/key_gen.c | 32 +-
70 drivers/crypto/caam/key_gen.h | 36 +-
71 drivers/crypto/caam/pdb.h | 62 +
72 drivers/crypto/caam/pkc_desc.c | 36 +
73 drivers/crypto/caam/qi.c | 797 +++
74 drivers/crypto/caam/qi.h | 204 +
75 drivers/crypto/caam/regs.h | 63 +-
76 drivers/crypto/caam/sg_sw_qm.h | 126 +
77 drivers/crypto/caam/sg_sw_qm2.h | 81 +
78 drivers/crypto/caam/sg_sw_sec4.h | 60 +-
79 drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
80 drivers/staging/wilc1000/linux_wlan.c | 2 +-
81 drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
82 include/crypto/acompress.h | 269 +
83 include/crypto/internal/acompress.h | 81 +
84 include/crypto/internal/scompress.h | 136 +
85 include/linux/crypto.h | 3 +
86 include/uapi/linux/cryptouser.h | 5 +
87 scripts/spelling.txt | 3 +
88 sound/soc/amd/acp-pcm-dma.c | 2 +-
89 57 files changed, 19177 insertions(+), 3988 deletions(-)
90 create mode 100644 crypto/acompress.c
91 create mode 100644 crypto/scompress.c
92 create mode 100644 crypto/tls.c
93 create mode 100644 drivers/crypto/caam/caamalg_desc.c
94 create mode 100644 drivers/crypto/caam/caamalg_desc.h
95 create mode 100644 drivers/crypto/caam/caamalg_qi.c
96 create mode 100644 drivers/crypto/caam/caamalg_qi2.c
97 create mode 100644 drivers/crypto/caam/caamalg_qi2.h
98 create mode 100644 drivers/crypto/caam/caamhash_desc.c
99 create mode 100644 drivers/crypto/caam/caamhash_desc.h
100 create mode 100644 drivers/crypto/caam/dpseci.c
101 create mode 100644 drivers/crypto/caam/dpseci.h
102 create mode 100644 drivers/crypto/caam/dpseci_cmd.h
103 create mode 100644 drivers/crypto/caam/qi.c
104 create mode 100644 drivers/crypto/caam/qi.h
105 create mode 100644 drivers/crypto/caam/sg_sw_qm.h
106 create mode 100644 drivers/crypto/caam/sg_sw_qm2.h
107 create mode 100644 include/crypto/acompress.h
108 create mode 100644 include/crypto/internal/acompress.h
109 create mode 100644 include/crypto/internal/scompress.h
110
111 --- a/crypto/Kconfig
112 +++ b/crypto/Kconfig
113 @@ -102,6 +102,15 @@ config CRYPTO_KPP
114 select CRYPTO_ALGAPI
115 select CRYPTO_KPP2
116
117 +config CRYPTO_ACOMP2
118 + tristate
119 + select CRYPTO_ALGAPI2
120 +
121 +config CRYPTO_ACOMP
122 + tristate
123 + select CRYPTO_ALGAPI
124 + select CRYPTO_ACOMP2
125 +
126 config CRYPTO_RSA
127 tristate "RSA algorithm"
128 select CRYPTO_AKCIPHER
129 @@ -138,6 +147,7 @@ config CRYPTO_MANAGER2
130 select CRYPTO_BLKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
131 select CRYPTO_AKCIPHER2 if !CRYPTO_MANAGER_DISABLE_TESTS
132 select CRYPTO_KPP2 if !CRYPTO_MANAGER_DISABLE_TESTS
133 + select CRYPTO_ACOMP2 if !CRYPTO_MANAGER_DISABLE_TESTS
134
135 config CRYPTO_USER
136 tristate "Userspace cryptographic algorithm configuration"
137 @@ -295,6 +305,26 @@ config CRYPTO_ECHAINIV
138 a sequence number xored with a salt. This is the default
139 algorithm for CBC.
140
141 +config CRYPTO_TLS
142 + tristate "TLS support"
143 + select CRYPTO_AEAD
144 + select CRYPTO_BLKCIPHER
145 + select CRYPTO_MANAGER
146 + select CRYPTO_HASH
147 + select CRYPTO_NULL
148 + select CRYPTO_AUTHENC
149 + help
150 + Support for TLS 1.0 record encryption and decryption
151 +
152 + This module adds support for encryption/decryption of TLS 1.0 frames
153 + using blockcipher algorithms. The name of the resulting algorithm is
154 + "tls10(hmac(<digest>),cbc(<cipher>))". By default, the generic base
155 + algorithms are used (e.g. aes-generic, sha1-generic), but hardware
156 + accelerated versions will be used automatically if available.
157 +
158 + User-space applications (OpenSSL, GnuTLS) can offload TLS 1.0
159 + operations through AF_ALG or cryptodev interfaces
160 +
161 comment "Block modes"
162
163 config CRYPTO_CBC
164 --- a/crypto/Makefile
165 +++ b/crypto/Makefile
166 @@ -51,6 +51,9 @@ rsa_generic-y += rsa_helper.o
167 rsa_generic-y += rsa-pkcs1pad.o
168 obj-$(CONFIG_CRYPTO_RSA) += rsa_generic.o
169
170 +obj-$(CONFIG_CRYPTO_ACOMP2) += acompress.o
171 +obj-$(CONFIG_CRYPTO_ACOMP2) += scompress.o
172 +
173 cryptomgr-y := algboss.o testmgr.o
174
175 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
176 @@ -115,6 +118,7 @@ obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_ge
177 obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
178 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
179 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
180 +obj-$(CONFIG_CRYPTO_TLS) += tls.o
181 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
182 obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
183 obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
184 --- /dev/null
185 +++ b/crypto/acompress.c
186 @@ -0,0 +1,169 @@
187 +/*
188 + * Asynchronous Compression operations
189 + *
190 + * Copyright (c) 2016, Intel Corporation
191 + * Authors: Weigang Li <weigang.li@intel.com>
192 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
193 + *
194 + * This program is free software; you can redistribute it and/or modify it
195 + * under the terms of the GNU General Public License as published by the Free
196 + * Software Foundation; either version 2 of the License, or (at your option)
197 + * any later version.
198 + *
199 + */
200 +#include <linux/errno.h>
201 +#include <linux/kernel.h>
202 +#include <linux/module.h>
203 +#include <linux/seq_file.h>
204 +#include <linux/slab.h>
205 +#include <linux/string.h>
206 +#include <linux/crypto.h>
207 +#include <crypto/algapi.h>
208 +#include <linux/cryptouser.h>
209 +#include <net/netlink.h>
210 +#include <crypto/internal/acompress.h>
211 +#include <crypto/internal/scompress.h>
212 +#include "internal.h"
213 +
214 +static const struct crypto_type crypto_acomp_type;
215 +
216 +#ifdef CONFIG_NET
217 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
218 +{
219 + struct crypto_report_acomp racomp;
220 +
221 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
222 +
223 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
224 + sizeof(struct crypto_report_acomp), &racomp))
225 + goto nla_put_failure;
226 + return 0;
227 +
228 +nla_put_failure:
229 + return -EMSGSIZE;
230 +}
231 +#else
232 +static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
233 +{
234 + return -ENOSYS;
235 +}
236 +#endif
237 +
238 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
239 + __attribute__ ((unused));
240 +
241 +static void crypto_acomp_show(struct seq_file *m, struct crypto_alg *alg)
242 +{
243 + seq_puts(m, "type : acomp\n");
244 +}
245 +
246 +static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
247 +{
248 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
249 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
250 +
251 + alg->exit(acomp);
252 +}
253 +
254 +static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
255 +{
256 + struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
257 + struct acomp_alg *alg = crypto_acomp_alg(acomp);
258 +
259 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
260 + return crypto_init_scomp_ops_async(tfm);
261 +
262 + acomp->compress = alg->compress;
263 + acomp->decompress = alg->decompress;
264 + acomp->dst_free = alg->dst_free;
265 + acomp->reqsize = alg->reqsize;
266 +
267 + if (alg->exit)
268 + acomp->base.exit = crypto_acomp_exit_tfm;
269 +
270 + if (alg->init)
271 + return alg->init(acomp);
272 +
273 + return 0;
274 +}
275 +
276 +static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
277 +{
278 + int extsize = crypto_alg_extsize(alg);
279 +
280 + if (alg->cra_type != &crypto_acomp_type)
281 + extsize += sizeof(struct crypto_scomp *);
282 +
283 + return extsize;
284 +}
285 +
286 +static const struct crypto_type crypto_acomp_type = {
287 + .extsize = crypto_acomp_extsize,
288 + .init_tfm = crypto_acomp_init_tfm,
289 +#ifdef CONFIG_PROC_FS
290 + .show = crypto_acomp_show,
291 +#endif
292 + .report = crypto_acomp_report,
293 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
294 + .maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
295 + .type = CRYPTO_ALG_TYPE_ACOMPRESS,
296 + .tfmsize = offsetof(struct crypto_acomp, base),
297 +};
298 +
299 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
300 + u32 mask)
301 +{
302 + return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
303 +}
304 +EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
305 +
306 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
307 +{
308 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
309 + struct acomp_req *req;
310 +
311 + req = __acomp_request_alloc(acomp);
312 + if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type))
313 + return crypto_acomp_scomp_alloc_ctx(req);
314 +
315 + return req;
316 +}
317 +EXPORT_SYMBOL_GPL(acomp_request_alloc);
318 +
319 +void acomp_request_free(struct acomp_req *req)
320 +{
321 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
322 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
323 +
324 + if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
325 + crypto_acomp_scomp_free_ctx(req);
326 +
327 + if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) {
328 + acomp->dst_free(req->dst);
329 + req->dst = NULL;
330 + }
331 +
332 + __acomp_request_free(req);
333 +}
334 +EXPORT_SYMBOL_GPL(acomp_request_free);
335 +
336 +int crypto_register_acomp(struct acomp_alg *alg)
337 +{
338 + struct crypto_alg *base = &alg->base;
339 +
340 + base->cra_type = &crypto_acomp_type;
341 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
342 + base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
343 +
344 + return crypto_register_alg(base);
345 +}
346 +EXPORT_SYMBOL_GPL(crypto_register_acomp);
347 +
348 +int crypto_unregister_acomp(struct acomp_alg *alg)
349 +{
350 + return crypto_unregister_alg(&alg->base);
351 +}
352 +EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
353 +
354 +MODULE_LICENSE("GPL");
355 +MODULE_DESCRIPTION("Asynchronous compression type");
356 --- a/crypto/algboss.c
357 +++ b/crypto/algboss.c
358 @@ -247,17 +247,9 @@ static int cryptomgr_schedule_test(struc
359 memcpy(param->alg, alg->cra_name, sizeof(param->alg));
360 type = alg->cra_flags;
361
362 - /* This piece of crap needs to disappear into per-type test hooks. */
363 -#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
364 - type |= CRYPTO_ALG_TESTED;
365 -#else
366 - if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
367 - CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
368 - ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
369 - CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
370 - alg->cra_ablkcipher.ivsize))
371 + /* Do not test internal algorithms. */
372 + if (type & CRYPTO_ALG_INTERNAL)
373 type |= CRYPTO_ALG_TESTED;
374 -#endif
375
376 param->type = type;
377
378 --- a/crypto/crypto_user.c
379 +++ b/crypto/crypto_user.c
380 @@ -115,6 +115,21 @@ nla_put_failure:
381 return -EMSGSIZE;
382 }
383
384 +static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
385 +{
386 + struct crypto_report_acomp racomp;
387 +
388 + strncpy(racomp.type, "acomp", sizeof(racomp.type));
389 +
390 + if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
391 + sizeof(struct crypto_report_acomp), &racomp))
392 + goto nla_put_failure;
393 + return 0;
394 +
395 +nla_put_failure:
396 + return -EMSGSIZE;
397 +}
398 +
399 static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
400 {
401 struct crypto_report_akcipher rakcipher;
402 @@ -189,7 +204,11 @@ static int crypto_report_one(struct cryp
403 goto nla_put_failure;
404
405 break;
406 + case CRYPTO_ALG_TYPE_ACOMPRESS:
407 + if (crypto_report_acomp(skb, alg))
408 + goto nla_put_failure;
409
410 + break;
411 case CRYPTO_ALG_TYPE_AKCIPHER:
412 if (crypto_report_akcipher(skb, alg))
413 goto nla_put_failure;
414 --- /dev/null
415 +++ b/crypto/scompress.c
416 @@ -0,0 +1,356 @@
417 +/*
418 + * Synchronous Compression operations
419 + *
420 + * Copyright 2015 LG Electronics Inc.
421 + * Copyright (c) 2016, Intel Corporation
422 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
423 + *
424 + * This program is free software; you can redistribute it and/or modify it
425 + * under the terms of the GNU General Public License as published by the Free
426 + * Software Foundation; either version 2 of the License, or (at your option)
427 + * any later version.
428 + *
429 + */
430 +#include <linux/errno.h>
431 +#include <linux/kernel.h>
432 +#include <linux/module.h>
433 +#include <linux/seq_file.h>
434 +#include <linux/slab.h>
435 +#include <linux/string.h>
436 +#include <linux/crypto.h>
437 +#include <linux/vmalloc.h>
438 +#include <crypto/algapi.h>
439 +#include <linux/cryptouser.h>
440 +#include <net/netlink.h>
441 +#include <linux/scatterlist.h>
442 +#include <crypto/scatterwalk.h>
443 +#include <crypto/internal/acompress.h>
444 +#include <crypto/internal/scompress.h>
445 +#include "internal.h"
446 +
447 +static const struct crypto_type crypto_scomp_type;
448 +static void * __percpu *scomp_src_scratches;
449 +static void * __percpu *scomp_dst_scratches;
450 +static int scomp_scratch_users;
451 +static DEFINE_MUTEX(scomp_lock);
452 +
453 +#ifdef CONFIG_NET
454 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
455 +{
456 + struct crypto_report_comp rscomp;
457 +
458 + strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
459 +
460 + if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
461 + sizeof(struct crypto_report_comp), &rscomp))
462 + goto nla_put_failure;
463 + return 0;
464 +
465 +nla_put_failure:
466 + return -EMSGSIZE;
467 +}
468 +#else
469 +static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
470 +{
471 + return -ENOSYS;
472 +}
473 +#endif
474 +
475 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
476 + __attribute__ ((unused));
477 +
478 +static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
479 +{
480 + seq_puts(m, "type : scomp\n");
481 +}
482 +
483 +static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
484 +{
485 + return 0;
486 +}
487 +
488 +static void crypto_scomp_free_scratches(void * __percpu *scratches)
489 +{
490 + int i;
491 +
492 + if (!scratches)
493 + return;
494 +
495 + for_each_possible_cpu(i)
496 + vfree(*per_cpu_ptr(scratches, i));
497 +
498 + free_percpu(scratches);
499 +}
500 +
501 +static void * __percpu *crypto_scomp_alloc_scratches(void)
502 +{
503 + void * __percpu *scratches;
504 + int i;
505 +
506 + scratches = alloc_percpu(void *);
507 + if (!scratches)
508 + return NULL;
509 +
510 + for_each_possible_cpu(i) {
511 + void *scratch;
512 +
513 + scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
514 + if (!scratch)
515 + goto error;
516 + *per_cpu_ptr(scratches, i) = scratch;
517 + }
518 +
519 + return scratches;
520 +
521 +error:
522 + crypto_scomp_free_scratches(scratches);
523 + return NULL;
524 +}
525 +
526 +static void crypto_scomp_free_all_scratches(void)
527 +{
528 + if (!--scomp_scratch_users) {
529 + crypto_scomp_free_scratches(scomp_src_scratches);
530 + crypto_scomp_free_scratches(scomp_dst_scratches);
531 + scomp_src_scratches = NULL;
532 + scomp_dst_scratches = NULL;
533 + }
534 +}
535 +
536 +static int crypto_scomp_alloc_all_scratches(void)
537 +{
538 + if (!scomp_scratch_users++) {
539 + scomp_src_scratches = crypto_scomp_alloc_scratches();
540 + if (!scomp_src_scratches)
541 + return -ENOMEM;
542 + scomp_dst_scratches = crypto_scomp_alloc_scratches();
543 + if (!scomp_dst_scratches)
544 + return -ENOMEM;
545 + }
546 + return 0;
547 +}
548 +
549 +static void crypto_scomp_sg_free(struct scatterlist *sgl)
550 +{
551 + int i, n;
552 + struct page *page;
553 +
554 + if (!sgl)
555 + return;
556 +
557 + n = sg_nents(sgl);
558 + for_each_sg(sgl, sgl, n, i) {
559 + page = sg_page(sgl);
560 + if (page)
561 + __free_page(page);
562 + }
563 +
564 + kfree(sgl);
565 +}
566 +
567 +static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp)
568 +{
569 + struct scatterlist *sgl;
570 + struct page *page;
571 + int i, n;
572 +
573 + n = ((size - 1) >> PAGE_SHIFT) + 1;
574 +
575 + sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp);
576 + if (!sgl)
577 + return NULL;
578 +
579 + sg_init_table(sgl, n);
580 +
581 + for (i = 0; i < n; i++) {
582 + page = alloc_page(gfp);
583 + if (!page)
584 + goto err;
585 + sg_set_page(sgl + i, page, PAGE_SIZE, 0);
586 + }
587 +
588 + return sgl;
589 +
590 +err:
591 + sg_mark_end(sgl + i);
592 + crypto_scomp_sg_free(sgl);
593 + return NULL;
594 +}
595 +
596 +static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
597 +{
598 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
599 + void **tfm_ctx = acomp_tfm_ctx(tfm);
600 + struct crypto_scomp *scomp = *tfm_ctx;
601 + void **ctx = acomp_request_ctx(req);
602 + const int cpu = get_cpu();
603 + u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
604 + u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
605 + int ret;
606 +
607 + if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) {
608 + ret = -EINVAL;
609 + goto out;
610 + }
611 +
612 + if (req->dst && !req->dlen) {
613 + ret = -EINVAL;
614 + goto out;
615 + }
616 +
617 + if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
618 + req->dlen = SCOMP_SCRATCH_SIZE;
619 +
620 + scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0);
621 + if (dir)
622 + ret = crypto_scomp_compress(scomp, scratch_src, req->slen,
623 + scratch_dst, &req->dlen, *ctx);
624 + else
625 + ret = crypto_scomp_decompress(scomp, scratch_src, req->slen,
626 + scratch_dst, &req->dlen, *ctx);
627 + if (!ret) {
628 + if (!req->dst) {
629 + req->dst = crypto_scomp_sg_alloc(req->dlen,
630 + req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
631 + GFP_KERNEL : GFP_ATOMIC);
632 + if (!req->dst)
633 + goto out;
634 + }
635 + scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen,
636 + 1);
637 + }
638 +out:
639 + put_cpu();
640 + return ret;
641 +}
642 +
643 +static int scomp_acomp_compress(struct acomp_req *req)
644 +{
645 + return scomp_acomp_comp_decomp(req, 1);
646 +}
647 +
648 +static int scomp_acomp_decompress(struct acomp_req *req)
649 +{
650 + return scomp_acomp_comp_decomp(req, 0);
651 +}
652 +
653 +static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
654 +{
655 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
656 +
657 + crypto_free_scomp(*ctx);
658 +}
659 +
660 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
661 +{
662 + struct crypto_alg *calg = tfm->__crt_alg;
663 + struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
664 + struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
665 + struct crypto_scomp *scomp;
666 +
667 + if (!crypto_mod_get(calg))
668 + return -EAGAIN;
669 +
670 + scomp = crypto_create_tfm(calg, &crypto_scomp_type);
671 + if (IS_ERR(scomp)) {
672 + crypto_mod_put(calg);
673 + return PTR_ERR(scomp);
674 + }
675 +
676 + *ctx = scomp;
677 + tfm->exit = crypto_exit_scomp_ops_async;
678 +
679 + crt->compress = scomp_acomp_compress;
680 + crt->decompress = scomp_acomp_decompress;
681 + crt->dst_free = crypto_scomp_sg_free;
682 + crt->reqsize = sizeof(void *);
683 +
684 + return 0;
685 +}
686 +
687 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
688 +{
689 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
690 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
691 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
692 + struct crypto_scomp *scomp = *tfm_ctx;
693 + void *ctx;
694 +
695 + ctx = crypto_scomp_alloc_ctx(scomp);
696 + if (IS_ERR(ctx)) {
697 + kfree(req);
698 + return NULL;
699 + }
700 +
701 + *req->__ctx = ctx;
702 +
703 + return req;
704 +}
705 +
706 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
707 +{
708 + struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
709 + struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
710 + struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
711 + struct crypto_scomp *scomp = *tfm_ctx;
712 + void *ctx = *req->__ctx;
713 +
714 + if (ctx)
715 + crypto_scomp_free_ctx(scomp, ctx);
716 +}
717 +
718 +static const struct crypto_type crypto_scomp_type = {
719 + .extsize = crypto_alg_extsize,
720 + .init_tfm = crypto_scomp_init_tfm,
721 +#ifdef CONFIG_PROC_FS
722 + .show = crypto_scomp_show,
723 +#endif
724 + .report = crypto_scomp_report,
725 + .maskclear = ~CRYPTO_ALG_TYPE_MASK,
726 + .maskset = CRYPTO_ALG_TYPE_MASK,
727 + .type = CRYPTO_ALG_TYPE_SCOMPRESS,
728 + .tfmsize = offsetof(struct crypto_scomp, base),
729 +};
730 +
731 +int crypto_register_scomp(struct scomp_alg *alg)
732 +{
733 + struct crypto_alg *base = &alg->base;
734 + int ret = -ENOMEM;
735 +
736 + mutex_lock(&scomp_lock);
737 + if (crypto_scomp_alloc_all_scratches())
738 + goto error;
739 +
740 + base->cra_type = &crypto_scomp_type;
741 + base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
742 + base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
743 +
744 + ret = crypto_register_alg(base);
745 + if (ret)
746 + goto error;
747 +
748 + mutex_unlock(&scomp_lock);
749 + return ret;
750 +
751 +error:
752 + crypto_scomp_free_all_scratches();
753 + mutex_unlock(&scomp_lock);
754 + return ret;
755 +}
756 +EXPORT_SYMBOL_GPL(crypto_register_scomp);
757 +
758 +int crypto_unregister_scomp(struct scomp_alg *alg)
759 +{
760 + int ret;
761 +
762 + mutex_lock(&scomp_lock);
763 + ret = crypto_unregister_alg(&alg->base);
764 + crypto_scomp_free_all_scratches();
765 + mutex_unlock(&scomp_lock);
766 +
767 + return ret;
768 +}
769 +EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
770 +
771 +MODULE_LICENSE("GPL");
772 +MODULE_DESCRIPTION("Synchronous compression type");
773 --- a/crypto/tcrypt.c
774 +++ b/crypto/tcrypt.c
775 @@ -74,7 +74,7 @@ static char *check[] = {
776 "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
777 "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
778 "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
779 - NULL
780 + "rsa", NULL
781 };
782
783 struct tcrypt_result {
784 @@ -1336,6 +1336,10 @@ static int do_test(const char *alg, u32
785 ret += tcrypt_test("hmac(sha3-512)");
786 break;
787
788 + case 115:
789 + ret += tcrypt_test("rsa");
790 + break;
791 +
792 case 150:
793 ret += tcrypt_test("ansi_cprng");
794 break;
795 @@ -1397,6 +1401,9 @@ static int do_test(const char *alg, u32
796 case 190:
797 ret += tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))");
798 break;
799 + case 191:
800 + ret += tcrypt_test("tls10(hmac(sha1),cbc(aes))");
801 + break;
802 case 200:
803 test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
804 speed_template_16_24_32);
805 @@ -1411,9 +1418,9 @@ static int do_test(const char *alg, u32
806 test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
807 speed_template_32_40_48);
808 test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
809 - speed_template_32_48_64);
810 + speed_template_32_64);
811 test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
812 - speed_template_32_48_64);
813 + speed_template_32_64);
814 test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
815 speed_template_16_24_32);
816 test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
817 @@ -1844,9 +1851,9 @@ static int do_test(const char *alg, u32
818 test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
819 speed_template_32_40_48);
820 test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
821 - speed_template_32_48_64);
822 + speed_template_32_64);
823 test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
824 - speed_template_32_48_64);
825 + speed_template_32_64);
826 test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
827 speed_template_16_24_32);
828 test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
829 --- a/crypto/testmgr.c
830 +++ b/crypto/testmgr.c
831 @@ -33,6 +33,7 @@
832 #include <crypto/drbg.h>
833 #include <crypto/akcipher.h>
834 #include <crypto/kpp.h>
835 +#include <crypto/acompress.h>
836
837 #include "internal.h"
838
839 @@ -62,7 +63,7 @@ int alg_test(const char *driver, const c
840 */
841 #define IDX1 32
842 #define IDX2 32400
843 -#define IDX3 1
844 +#define IDX3 1511
845 #define IDX4 8193
846 #define IDX5 22222
847 #define IDX6 17101
848 @@ -82,47 +83,54 @@ struct tcrypt_result {
849
850 struct aead_test_suite {
851 struct {
852 - struct aead_testvec *vecs;
853 + const struct aead_testvec *vecs;
854 unsigned int count;
855 } enc, dec;
856 };
857
858 struct cipher_test_suite {
859 struct {
860 - struct cipher_testvec *vecs;
861 + const struct cipher_testvec *vecs;
862 unsigned int count;
863 } enc, dec;
864 };
865
866 struct comp_test_suite {
867 struct {
868 - struct comp_testvec *vecs;
869 + const struct comp_testvec *vecs;
870 unsigned int count;
871 } comp, decomp;
872 };
873
874 struct hash_test_suite {
875 - struct hash_testvec *vecs;
876 + const struct hash_testvec *vecs;
877 unsigned int count;
878 };
879
880 struct cprng_test_suite {
881 - struct cprng_testvec *vecs;
882 + const struct cprng_testvec *vecs;
883 unsigned int count;
884 };
885
886 struct drbg_test_suite {
887 - struct drbg_testvec *vecs;
888 + const struct drbg_testvec *vecs;
889 unsigned int count;
890 };
891
892 +struct tls_test_suite {
893 + struct {
894 + struct tls_testvec *vecs;
895 + unsigned int count;
896 + } enc, dec;
897 +};
898 +
899 struct akcipher_test_suite {
900 - struct akcipher_testvec *vecs;
901 + const struct akcipher_testvec *vecs;
902 unsigned int count;
903 };
904
905 struct kpp_test_suite {
906 - struct kpp_testvec *vecs;
907 + const struct kpp_testvec *vecs;
908 unsigned int count;
909 };
910
911 @@ -139,12 +147,14 @@ struct alg_test_desc {
912 struct hash_test_suite hash;
913 struct cprng_test_suite cprng;
914 struct drbg_test_suite drbg;
915 + struct tls_test_suite tls;
916 struct akcipher_test_suite akcipher;
917 struct kpp_test_suite kpp;
918 } suite;
919 };
920
921 -static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
922 +static const unsigned int IDX[8] = {
923 + IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
924
925 static void hexdump(unsigned char *buf, unsigned int len)
926 {
927 @@ -202,7 +212,7 @@ static int wait_async_op(struct tcrypt_r
928 }
929
930 static int ahash_partial_update(struct ahash_request **preq,
931 - struct crypto_ahash *tfm, struct hash_testvec *template,
932 + struct crypto_ahash *tfm, const struct hash_testvec *template,
933 void *hash_buff, int k, int temp, struct scatterlist *sg,
934 const char *algo, char *result, struct tcrypt_result *tresult)
935 {
936 @@ -259,11 +269,12 @@ out_nostate:
937 return ret;
938 }
939
940 -static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
941 - unsigned int tcount, bool use_digest,
942 - const int align_offset)
943 +static int __test_hash(struct crypto_ahash *tfm,
944 + const struct hash_testvec *template, unsigned int tcount,
945 + bool use_digest, const int align_offset)
946 {
947 const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
948 + size_t digest_size = crypto_ahash_digestsize(tfm);
949 unsigned int i, j, k, temp;
950 struct scatterlist sg[8];
951 char *result;
952 @@ -274,7 +285,7 @@ static int __test_hash(struct crypto_aha
953 char *xbuf[XBUFSIZE];
954 int ret = -ENOMEM;
955
956 - result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
957 + result = kmalloc(digest_size, GFP_KERNEL);
958 if (!result)
959 return ret;
960 key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
961 @@ -304,7 +315,7 @@ static int __test_hash(struct crypto_aha
962 goto out;
963
964 j++;
965 - memset(result, 0, MAX_DIGEST_SIZE);
966 + memset(result, 0, digest_size);
967
968 hash_buff = xbuf[0];
969 hash_buff += align_offset;
970 @@ -379,7 +390,7 @@ static int __test_hash(struct crypto_aha
971 continue;
972
973 j++;
974 - memset(result, 0, MAX_DIGEST_SIZE);
975 + memset(result, 0, digest_size);
976
977 temp = 0;
978 sg_init_table(sg, template[i].np);
979 @@ -457,7 +468,7 @@ static int __test_hash(struct crypto_aha
980 continue;
981
982 j++;
983 - memset(result, 0, MAX_DIGEST_SIZE);
984 + memset(result, 0, digest_size);
985
986 ret = -EINVAL;
987 hash_buff = xbuf[0];
988 @@ -536,7 +547,8 @@ out_nobuf:
989 return ret;
990 }
991
992 -static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
993 +static int test_hash(struct crypto_ahash *tfm,
994 + const struct hash_testvec *template,
995 unsigned int tcount, bool use_digest)
996 {
997 unsigned int alignmask;
998 @@ -564,7 +576,7 @@ static int test_hash(struct crypto_ahash
999 }
1000
1001 static int __test_aead(struct crypto_aead *tfm, int enc,
1002 - struct aead_testvec *template, unsigned int tcount,
1003 + const struct aead_testvec *template, unsigned int tcount,
1004 const bool diff_dst, const int align_offset)
1005 {
1006 const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1007 @@ -955,7 +967,7 @@ out_noxbuf:
1008 }
1009
1010 static int test_aead(struct crypto_aead *tfm, int enc,
1011 - struct aead_testvec *template, unsigned int tcount)
1012 + const struct aead_testvec *template, unsigned int tcount)
1013 {
1014 unsigned int alignmask;
1015 int ret;
1016 @@ -987,8 +999,236 @@ static int test_aead(struct crypto_aead
1017 return 0;
1018 }
1019
1020 +static int __test_tls(struct crypto_aead *tfm, int enc,
1021 + struct tls_testvec *template, unsigned int tcount,
1022 + const bool diff_dst)
1023 +{
1024 + const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
1025 + unsigned int i, k, authsize;
1026 + char *q;
1027 + struct aead_request *req;
1028 + struct scatterlist *sg;
1029 + struct scatterlist *sgout;
1030 + const char *e, *d;
1031 + struct tcrypt_result result;
1032 + void *input;
1033 + void *output;
1034 + void *assoc;
1035 + char *iv;
1036 + char *key;
1037 + char *xbuf[XBUFSIZE];
1038 + char *xoutbuf[XBUFSIZE];
1039 + char *axbuf[XBUFSIZE];
1040 + int ret = -ENOMEM;
1041 +
1042 + if (testmgr_alloc_buf(xbuf))
1043 + goto out_noxbuf;
1044 +
1045 + if (diff_dst && testmgr_alloc_buf(xoutbuf))
1046 + goto out_nooutbuf;
1047 +
1048 + if (testmgr_alloc_buf(axbuf))
1049 + goto out_noaxbuf;
1050 +
1051 + iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
1052 + if (!iv)
1053 + goto out_noiv;
1054 +
1055 + key = kzalloc(MAX_KEYLEN, GFP_KERNEL);
1056 + if (!key)
1057 + goto out_nokey;
1058 +
1059 + sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 2 : 1), GFP_KERNEL);
1060 + if (!sg)
1061 + goto out_nosg;
1062 +
1063 + sgout = sg + 8;
1064 +
1065 + d = diff_dst ? "-ddst" : "";
1066 + e = enc ? "encryption" : "decryption";
1067 +
1068 + init_completion(&result.completion);
1069 +
1070 + req = aead_request_alloc(tfm, GFP_KERNEL);
1071 + if (!req) {
1072 + pr_err("alg: tls%s: Failed to allocate request for %s\n",
1073 + d, algo);
1074 + goto out;
1075 + }
1076 +
1077 + aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1078 + tcrypt_complete, &result);
1079 +
1080 + for (i = 0; i < tcount; i++) {
1081 + input = xbuf[0];
1082 + assoc = axbuf[0];
1083 +
1084 + ret = -EINVAL;
1085 + if (WARN_ON(template[i].ilen > PAGE_SIZE ||
1086 + template[i].alen > PAGE_SIZE))
1087 + goto out;
1088 +
1089 + memcpy(assoc, template[i].assoc, template[i].alen);
1090 + memcpy(input, template[i].input, template[i].ilen);
1091 +
1092 + if (template[i].iv)
1093 + memcpy(iv, template[i].iv, MAX_IVLEN);
1094 + else
1095 + memset(iv, 0, MAX_IVLEN);
1096 +
1097 + crypto_aead_clear_flags(tfm, ~0);
1098 +
1099 + if (template[i].klen > MAX_KEYLEN) {
1100 + pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
1101 + d, i, algo, template[i].klen, MAX_KEYLEN);
1102 + ret = -EINVAL;
1103 + goto out;
1104 + }
1105 + memcpy(key, template[i].key, template[i].klen);
1106 +
1107 + ret = crypto_aead_setkey(tfm, key, template[i].klen);
1108 + if (!ret == template[i].fail) {
1109 + pr_err("alg: tls%s: setkey failed on test %d for %s: flags=%x\n",
1110 + d, i, algo, crypto_aead_get_flags(tfm));
1111 + goto out;
1112 + } else if (ret)
1113 + continue;
1114 +
1115 + authsize = 20;
1116 + ret = crypto_aead_setauthsize(tfm, authsize);
1117 + if (ret) {
1118 + pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
1119 + d, authsize, i, algo);
1120 + goto out;
1121 + }
1122 +
1123 + k = !!template[i].alen;
1124 + sg_init_table(sg, k + 1);
1125 + sg_set_buf(&sg[0], assoc, template[i].alen);
1126 + sg_set_buf(&sg[k], input, (enc ? template[i].rlen :
1127 + template[i].ilen));
1128 + output = input;
1129 +
1130 + if (diff_dst) {
1131 + sg_init_table(sgout, k + 1);
1132 + sg_set_buf(&sgout[0], assoc, template[i].alen);
1133 +
1134 + output = xoutbuf[0];
1135 + sg_set_buf(&sgout[k], output,
1136 + (enc ? template[i].rlen : template[i].ilen));
1137 + }
1138 +
1139 + aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
1140 + template[i].ilen, iv);
1141 +
1142 + aead_request_set_ad(req, template[i].alen);
1143 +
1144 + ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
1145 +
1146 + switch (ret) {
1147 + case 0:
1148 + if (template[i].novrfy) {
1149 + /* verification was supposed to fail */
1150 + pr_err("alg: tls%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
1151 + d, e, i, algo);
1152 + /* so really, we got a bad message */
1153 + ret = -EBADMSG;
1154 + goto out;
1155 + }
1156 + break;
1157 + case -EINPROGRESS:
1158 + case -EBUSY:
1159 + wait_for_completion(&result.completion);
1160 + reinit_completion(&result.completion);
1161 + ret = result.err;
1162 + if (!ret)
1163 + break;
1164 + case -EBADMSG:
1165 + /* verification failure was expected */
1166 + if (template[i].novrfy)
1167 + continue;
1168 + /* fall through */
1169 + default:
1170 + pr_err("alg: tls%s: %s failed on test %d for %s: ret=%d\n",
1171 + d, e, i, algo, -ret);
1172 + goto out;
1173 + }
1174 +
1175 + q = output;
1176 + if (memcmp(q, template[i].result, template[i].rlen)) {
1177 + pr_err("alg: tls%s: Test %d failed on %s for %s\n",
1178 + d, i, e, algo);
1179 + hexdump(q, template[i].rlen);
1180 + pr_err("should be:\n");
1181 + hexdump(template[i].result, template[i].rlen);
1182 + ret = -EINVAL;
1183 + goto out;
1184 + }
1185 + }
1186 +
1187 +out:
1188 + aead_request_free(req);
1189 +
1190 + kfree(sg);
1191 +out_nosg:
1192 + kfree(key);
1193 +out_nokey:
1194 + kfree(iv);
1195 +out_noiv:
1196 + testmgr_free_buf(axbuf);
1197 +out_noaxbuf:
1198 + if (diff_dst)
1199 + testmgr_free_buf(xoutbuf);
1200 +out_nooutbuf:
1201 + testmgr_free_buf(xbuf);
1202 +out_noxbuf:
1203 + return ret;
1204 +}
1205 +
1206 +static int test_tls(struct crypto_aead *tfm, int enc,
1207 + struct tls_testvec *template, unsigned int tcount)
1208 +{
1209 + int ret;
1210 + /* test 'dst == src' case */
1211 + ret = __test_tls(tfm, enc, template, tcount, false);
1212 + if (ret)
1213 + return ret;
1214 + /* test 'dst != src' case */
1215 + return __test_tls(tfm, enc, template, tcount, true);
1216 +}
1217 +
1218 +static int alg_test_tls(const struct alg_test_desc *desc, const char *driver,
1219 + u32 type, u32 mask)
1220 +{
1221 + struct crypto_aead *tfm;
1222 + int err = 0;
1223 +
1224 + tfm = crypto_alloc_aead(driver, type, mask);
1225 + if (IS_ERR(tfm)) {
1226 + pr_err("alg: aead: Failed to load transform for %s: %ld\n",
1227 + driver, PTR_ERR(tfm));
1228 + return PTR_ERR(tfm);
1229 + }
1230 +
1231 + if (desc->suite.tls.enc.vecs) {
1232 + err = test_tls(tfm, ENCRYPT, desc->suite.tls.enc.vecs,
1233 + desc->suite.tls.enc.count);
1234 + if (err)
1235 + goto out;
1236 + }
1237 +
1238 + if (!err && desc->suite.tls.dec.vecs)
1239 + err = test_tls(tfm, DECRYPT, desc->suite.tls.dec.vecs,
1240 + desc->suite.tls.dec.count);
1241 +
1242 +out:
1243 + crypto_free_aead(tfm);
1244 + return err;
1245 +}
1246 +
1247 static int test_cipher(struct crypto_cipher *tfm, int enc,
1248 - struct cipher_testvec *template, unsigned int tcount)
1249 + const struct cipher_testvec *template,
1250 + unsigned int tcount)
1251 {
1252 const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
1253 unsigned int i, j, k;
1254 @@ -1066,7 +1306,8 @@ out_nobuf:
1255 }
1256
1257 static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
1258 - struct cipher_testvec *template, unsigned int tcount,
1259 + const struct cipher_testvec *template,
1260 + unsigned int tcount,
1261 const bool diff_dst, const int align_offset)
1262 {
1263 const char *algo =
1264 @@ -1079,12 +1320,16 @@ static int __test_skcipher(struct crypto
1265 const char *e, *d;
1266 struct tcrypt_result result;
1267 void *data;
1268 - char iv[MAX_IVLEN];
1269 + char *iv;
1270 char *xbuf[XBUFSIZE];
1271 char *xoutbuf[XBUFSIZE];
1272 int ret = -ENOMEM;
1273 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1274
1275 + iv = kmalloc(MAX_IVLEN, GFP_KERNEL);
1276 + if (!iv)
1277 + return ret;
1278 +
1279 if (testmgr_alloc_buf(xbuf))
1280 goto out_nobuf;
1281
1282 @@ -1325,12 +1570,14 @@ out:
1283 testmgr_free_buf(xoutbuf);
1284 out_nooutbuf:
1285 testmgr_free_buf(xbuf);
1286 + kfree(iv);
1287 out_nobuf:
1288 return ret;
1289 }
1290
1291 static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1292 - struct cipher_testvec *template, unsigned int tcount)
1293 + const struct cipher_testvec *template,
1294 + unsigned int tcount)
1295 {
1296 unsigned int alignmask;
1297 int ret;
1298 @@ -1362,8 +1609,10 @@ static int test_skcipher(struct crypto_s
1299 return 0;
1300 }
1301
1302 -static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
1303 - struct comp_testvec *dtemplate, int ctcount, int dtcount)
1304 +static int test_comp(struct crypto_comp *tfm,
1305 + const struct comp_testvec *ctemplate,
1306 + const struct comp_testvec *dtemplate,
1307 + int ctcount, int dtcount)
1308 {
1309 const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
1310 unsigned int i;
1311 @@ -1442,7 +1691,154 @@ out:
1312 return ret;
1313 }
1314
1315 -static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
1316 +static int test_acomp(struct crypto_acomp *tfm,
1317 + const struct comp_testvec *ctemplate,
1318 + const struct comp_testvec *dtemplate,
1319 + int ctcount, int dtcount)
1320 +{
1321 + const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm));
1322 + unsigned int i;
1323 + char *output;
1324 + int ret;
1325 + struct scatterlist src, dst;
1326 + struct acomp_req *req;
1327 + struct tcrypt_result result;
1328 +
1329 + output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL);
1330 + if (!output)
1331 + return -ENOMEM;
1332 +
1333 + for (i = 0; i < ctcount; i++) {
1334 + unsigned int dlen = COMP_BUF_SIZE;
1335 + int ilen = ctemplate[i].inlen;
1336 + void *input_vec;
1337 +
1338 + input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL);
1339 + if (!input_vec) {
1340 + ret = -ENOMEM;
1341 + goto out;
1342 + }
1343 +
1344 + memset(output, 0, dlen);
1345 + init_completion(&result.completion);
1346 + sg_init_one(&src, input_vec, ilen);
1347 + sg_init_one(&dst, output, dlen);
1348 +
1349 + req = acomp_request_alloc(tfm);
1350 + if (!req) {
1351 + pr_err("alg: acomp: request alloc failed for %s\n",
1352 + algo);
1353 + kfree(input_vec);
1354 + ret = -ENOMEM;
1355 + goto out;
1356 + }
1357 +
1358 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1359 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1360 + tcrypt_complete, &result);
1361 +
1362 + ret = wait_async_op(&result, crypto_acomp_compress(req));
1363 + if (ret) {
1364 + pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
1365 + i + 1, algo, -ret);
1366 + kfree(input_vec);
1367 + acomp_request_free(req);
1368 + goto out;
1369 + }
1370 +
1371 + if (req->dlen != ctemplate[i].outlen) {
1372 + pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
1373 + i + 1, algo, req->dlen);
1374 + ret = -EINVAL;
1375 + kfree(input_vec);
1376 + acomp_request_free(req);
1377 + goto out;
1378 + }
1379 +
1380 + if (memcmp(output, ctemplate[i].output, req->dlen)) {
1381 + pr_err("alg: acomp: Compression test %d failed for %s\n",
1382 + i + 1, algo);
1383 + hexdump(output, req->dlen);
1384 + ret = -EINVAL;
1385 + kfree(input_vec);
1386 + acomp_request_free(req);
1387 + goto out;
1388 + }
1389 +
1390 + kfree(input_vec);
1391 + acomp_request_free(req);
1392 + }
1393 +
1394 + for (i = 0; i < dtcount; i++) {
1395 + unsigned int dlen = COMP_BUF_SIZE;
1396 + int ilen = dtemplate[i].inlen;
1397 + void *input_vec;
1398 +
1399 + input_vec = kmemdup(dtemplate[i].input, ilen, GFP_KERNEL);
1400 + if (!input_vec) {
1401 + ret = -ENOMEM;
1402 + goto out;
1403 + }
1404 +
1405 + memset(output, 0, dlen);
1406 + init_completion(&result.completion);
1407 + sg_init_one(&src, input_vec, ilen);
1408 + sg_init_one(&dst, output, dlen);
1409 +
1410 + req = acomp_request_alloc(tfm);
1411 + if (!req) {
1412 + pr_err("alg: acomp: request alloc failed for %s\n",
1413 + algo);
1414 + kfree(input_vec);
1415 + ret = -ENOMEM;
1416 + goto out;
1417 + }
1418 +
1419 + acomp_request_set_params(req, &src, &dst, ilen, dlen);
1420 + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1421 + tcrypt_complete, &result);
1422 +
1423 + ret = wait_async_op(&result, crypto_acomp_decompress(req));
1424 + if (ret) {
1425 + pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
1426 + i + 1, algo, -ret);
1427 + kfree(input_vec);
1428 + acomp_request_free(req);
1429 + goto out;
1430 + }
1431 +
1432 + if (req->dlen != dtemplate[i].outlen) {
1433 + pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
1434 + i + 1, algo, req->dlen);
1435 + ret = -EINVAL;
1436 + kfree(input_vec);
1437 + acomp_request_free(req);
1438 + goto out;
1439 + }
1440 +
1441 + if (memcmp(output, dtemplate[i].output, req->dlen)) {
1442 + pr_err("alg: acomp: Decompression test %d failed for %s\n",
1443 + i + 1, algo);
1444 + hexdump(output, req->dlen);
1445 + ret = -EINVAL;
1446 + kfree(input_vec);
1447 + acomp_request_free(req);
1448 + goto out;
1449 + }
1450 +
1451 + kfree(input_vec);
1452 + acomp_request_free(req);
1453 + }
1454 +
1455 + ret = 0;
1456 +
1457 +out:
1458 + kfree(output);
1459 + return ret;
1460 +}
1461 +
1462 +static int test_cprng(struct crypto_rng *tfm,
1463 + const struct cprng_testvec *template,
1464 unsigned int tcount)
1465 {
1466 const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
1467 @@ -1509,7 +1905,7 @@ static int alg_test_aead(const struct al
1468 struct crypto_aead *tfm;
1469 int err = 0;
1470
1471 - tfm = crypto_alloc_aead(driver, type | CRYPTO_ALG_INTERNAL, mask);
1472 + tfm = crypto_alloc_aead(driver, type, mask);
1473 if (IS_ERR(tfm)) {
1474 printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
1475 "%ld\n", driver, PTR_ERR(tfm));
1476 @@ -1538,7 +1934,7 @@ static int alg_test_cipher(const struct
1477 struct crypto_cipher *tfm;
1478 int err = 0;
1479
1480 - tfm = crypto_alloc_cipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1481 + tfm = crypto_alloc_cipher(driver, type, mask);
1482 if (IS_ERR(tfm)) {
1483 printk(KERN_ERR "alg: cipher: Failed to load transform for "
1484 "%s: %ld\n", driver, PTR_ERR(tfm));
1485 @@ -1567,7 +1963,7 @@ static int alg_test_skcipher(const struc
1486 struct crypto_skcipher *tfm;
1487 int err = 0;
1488
1489 - tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1490 + tfm = crypto_alloc_skcipher(driver, type, mask);
1491 if (IS_ERR(tfm)) {
1492 printk(KERN_ERR "alg: skcipher: Failed to load transform for "
1493 "%s: %ld\n", driver, PTR_ERR(tfm));
1494 @@ -1593,22 +1989,38 @@ out:
1495 static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
1496 u32 type, u32 mask)
1497 {
1498 - struct crypto_comp *tfm;
1499 + struct crypto_comp *comp;
1500 + struct crypto_acomp *acomp;
1501 int err;
1502 + u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK;
1503
1504 - tfm = crypto_alloc_comp(driver, type, mask);
1505 - if (IS_ERR(tfm)) {
1506 - printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
1507 - "%ld\n", driver, PTR_ERR(tfm));
1508 - return PTR_ERR(tfm);
1509 - }
1510 + if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) {
1511 + acomp = crypto_alloc_acomp(driver, type, mask);
1512 + if (IS_ERR(acomp)) {
1513 + pr_err("alg: acomp: Failed to load transform for %s: %ld\n",
1514 + driver, PTR_ERR(acomp));
1515 + return PTR_ERR(acomp);
1516 + }
1517 + err = test_acomp(acomp, desc->suite.comp.comp.vecs,
1518 + desc->suite.comp.decomp.vecs,
1519 + desc->suite.comp.comp.count,
1520 + desc->suite.comp.decomp.count);
1521 + crypto_free_acomp(acomp);
1522 + } else {
1523 + comp = crypto_alloc_comp(driver, type, mask);
1524 + if (IS_ERR(comp)) {
1525 + pr_err("alg: comp: Failed to load transform for %s: %ld\n",
1526 + driver, PTR_ERR(comp));
1527 + return PTR_ERR(comp);
1528 + }
1529
1530 - err = test_comp(tfm, desc->suite.comp.comp.vecs,
1531 - desc->suite.comp.decomp.vecs,
1532 - desc->suite.comp.comp.count,
1533 - desc->suite.comp.decomp.count);
1534 + err = test_comp(comp, desc->suite.comp.comp.vecs,
1535 + desc->suite.comp.decomp.vecs,
1536 + desc->suite.comp.comp.count,
1537 + desc->suite.comp.decomp.count);
1538
1539 - crypto_free_comp(tfm);
1540 + crypto_free_comp(comp);
1541 + }
1542 return err;
1543 }
1544
1545 @@ -1618,7 +2030,7 @@ static int alg_test_hash(const struct al
1546 struct crypto_ahash *tfm;
1547 int err;
1548
1549 - tfm = crypto_alloc_ahash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1550 + tfm = crypto_alloc_ahash(driver, type, mask);
1551 if (IS_ERR(tfm)) {
1552 printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
1553 "%ld\n", driver, PTR_ERR(tfm));
1554 @@ -1646,7 +2058,7 @@ static int alg_test_crc32c(const struct
1555 if (err)
1556 goto out;
1557
1558 - tfm = crypto_alloc_shash(driver, type | CRYPTO_ALG_INTERNAL, mask);
1559 + tfm = crypto_alloc_shash(driver, type, mask);
1560 if (IS_ERR(tfm)) {
1561 printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
1562 "%ld\n", driver, PTR_ERR(tfm));
1563 @@ -1688,7 +2100,7 @@ static int alg_test_cprng(const struct a
1564 struct crypto_rng *rng;
1565 int err;
1566
1567 - rng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1568 + rng = crypto_alloc_rng(driver, type, mask);
1569 if (IS_ERR(rng)) {
1570 printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
1571 "%ld\n", driver, PTR_ERR(rng));
1572 @@ -1703,7 +2115,7 @@ static int alg_test_cprng(const struct a
1573 }
1574
1575
1576 -static int drbg_cavs_test(struct drbg_testvec *test, int pr,
1577 +static int drbg_cavs_test(const struct drbg_testvec *test, int pr,
1578 const char *driver, u32 type, u32 mask)
1579 {
1580 int ret = -EAGAIN;
1581 @@ -1715,7 +2127,7 @@ static int drbg_cavs_test(struct drbg_te
1582 if (!buf)
1583 return -ENOMEM;
1584
1585 - drng = crypto_alloc_rng(driver, type | CRYPTO_ALG_INTERNAL, mask);
1586 + drng = crypto_alloc_rng(driver, type, mask);
1587 if (IS_ERR(drng)) {
1588 printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
1589 "%s\n", driver);
1590 @@ -1777,7 +2189,7 @@ static int alg_test_drbg(const struct al
1591 int err = 0;
1592 int pr = 0;
1593 int i = 0;
1594 - struct drbg_testvec *template = desc->suite.drbg.vecs;
1595 + const struct drbg_testvec *template = desc->suite.drbg.vecs;
1596 unsigned int tcount = desc->suite.drbg.count;
1597
1598 if (0 == memcmp(driver, "drbg_pr_", 8))
1599 @@ -1796,7 +2208,7 @@ static int alg_test_drbg(const struct al
1600
1601 }
1602
1603 -static int do_test_kpp(struct crypto_kpp *tfm, struct kpp_testvec *vec,
1604 +static int do_test_kpp(struct crypto_kpp *tfm, const struct kpp_testvec *vec,
1605 const char *alg)
1606 {
1607 struct kpp_request *req;
1608 @@ -1888,7 +2300,7 @@ free_req:
1609 }
1610
1611 static int test_kpp(struct crypto_kpp *tfm, const char *alg,
1612 - struct kpp_testvec *vecs, unsigned int tcount)
1613 + const struct kpp_testvec *vecs, unsigned int tcount)
1614 {
1615 int ret, i;
1616
1617 @@ -1909,7 +2321,7 @@ static int alg_test_kpp(const struct alg
1618 struct crypto_kpp *tfm;
1619 int err = 0;
1620
1621 - tfm = crypto_alloc_kpp(driver, type | CRYPTO_ALG_INTERNAL, mask);
1622 + tfm = crypto_alloc_kpp(driver, type, mask);
1623 if (IS_ERR(tfm)) {
1624 pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
1625 driver, PTR_ERR(tfm));
1626 @@ -1924,7 +2336,7 @@ static int alg_test_kpp(const struct alg
1627 }
1628
1629 static int test_akcipher_one(struct crypto_akcipher *tfm,
1630 - struct akcipher_testvec *vecs)
1631 + const struct akcipher_testvec *vecs)
1632 {
1633 char *xbuf[XBUFSIZE];
1634 struct akcipher_request *req;
1635 @@ -2044,7 +2456,8 @@ free_xbuf:
1636 }
1637
1638 static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
1639 - struct akcipher_testvec *vecs, unsigned int tcount)
1640 + const struct akcipher_testvec *vecs,
1641 + unsigned int tcount)
1642 {
1643 const char *algo =
1644 crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
1645 @@ -2068,7 +2481,7 @@ static int alg_test_akcipher(const struc
1646 struct crypto_akcipher *tfm;
1647 int err = 0;
1648
1649 - tfm = crypto_alloc_akcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
1650 + tfm = crypto_alloc_akcipher(driver, type, mask);
1651 if (IS_ERR(tfm)) {
1652 pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
1653 driver, PTR_ERR(tfm));
1654 @@ -2088,112 +2501,23 @@ static int alg_test_null(const struct al
1655 return 0;
1656 }
1657
1658 +#define __VECS(tv) { .vecs = tv, .count = ARRAY_SIZE(tv) }
1659 +
1660 /* Please keep this list sorted by algorithm name. */
1661 static const struct alg_test_desc alg_test_descs[] = {
1662 {
1663 - .alg = "__cbc-cast5-avx",
1664 - .test = alg_test_null,
1665 - }, {
1666 - .alg = "__cbc-cast6-avx",
1667 - .test = alg_test_null,
1668 - }, {
1669 - .alg = "__cbc-serpent-avx",
1670 - .test = alg_test_null,
1671 - }, {
1672 - .alg = "__cbc-serpent-avx2",
1673 - .test = alg_test_null,
1674 - }, {
1675 - .alg = "__cbc-serpent-sse2",
1676 - .test = alg_test_null,
1677 - }, {
1678 - .alg = "__cbc-twofish-avx",
1679 - .test = alg_test_null,
1680 - }, {
1681 - .alg = "__driver-cbc-aes-aesni",
1682 - .test = alg_test_null,
1683 - .fips_allowed = 1,
1684 - }, {
1685 - .alg = "__driver-cbc-camellia-aesni",
1686 - .test = alg_test_null,
1687 - }, {
1688 - .alg = "__driver-cbc-camellia-aesni-avx2",
1689 - .test = alg_test_null,
1690 - }, {
1691 - .alg = "__driver-cbc-cast5-avx",
1692 - .test = alg_test_null,
1693 - }, {
1694 - .alg = "__driver-cbc-cast6-avx",
1695 - .test = alg_test_null,
1696 - }, {
1697 - .alg = "__driver-cbc-serpent-avx",
1698 - .test = alg_test_null,
1699 - }, {
1700 - .alg = "__driver-cbc-serpent-avx2",
1701 - .test = alg_test_null,
1702 - }, {
1703 - .alg = "__driver-cbc-serpent-sse2",
1704 - .test = alg_test_null,
1705 - }, {
1706 - .alg = "__driver-cbc-twofish-avx",
1707 - .test = alg_test_null,
1708 - }, {
1709 - .alg = "__driver-ecb-aes-aesni",
1710 - .test = alg_test_null,
1711 - .fips_allowed = 1,
1712 - }, {
1713 - .alg = "__driver-ecb-camellia-aesni",
1714 - .test = alg_test_null,
1715 - }, {
1716 - .alg = "__driver-ecb-camellia-aesni-avx2",
1717 - .test = alg_test_null,
1718 - }, {
1719 - .alg = "__driver-ecb-cast5-avx",
1720 - .test = alg_test_null,
1721 - }, {
1722 - .alg = "__driver-ecb-cast6-avx",
1723 - .test = alg_test_null,
1724 - }, {
1725 - .alg = "__driver-ecb-serpent-avx",
1726 - .test = alg_test_null,
1727 - }, {
1728 - .alg = "__driver-ecb-serpent-avx2",
1729 - .test = alg_test_null,
1730 - }, {
1731 - .alg = "__driver-ecb-serpent-sse2",
1732 - .test = alg_test_null,
1733 - }, {
1734 - .alg = "__driver-ecb-twofish-avx",
1735 - .test = alg_test_null,
1736 - }, {
1737 - .alg = "__driver-gcm-aes-aesni",
1738 - .test = alg_test_null,
1739 - .fips_allowed = 1,
1740 - }, {
1741 - .alg = "__ghash-pclmulqdqni",
1742 - .test = alg_test_null,
1743 - .fips_allowed = 1,
1744 - }, {
1745 .alg = "ansi_cprng",
1746 .test = alg_test_cprng,
1747 .suite = {
1748 - .cprng = {
1749 - .vecs = ansi_cprng_aes_tv_template,
1750 - .count = ANSI_CPRNG_AES_TEST_VECTORS
1751 - }
1752 + .cprng = __VECS(ansi_cprng_aes_tv_template)
1753 }
1754 }, {
1755 .alg = "authenc(hmac(md5),ecb(cipher_null))",
1756 .test = alg_test_aead,
1757 .suite = {
1758 .aead = {
1759 - .enc = {
1760 - .vecs = hmac_md5_ecb_cipher_null_enc_tv_template,
1761 - .count = HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS
1762 - },
1763 - .dec = {
1764 - .vecs = hmac_md5_ecb_cipher_null_dec_tv_template,
1765 - .count = HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS
1766 - }
1767 + .enc = __VECS(hmac_md5_ecb_cipher_null_enc_tv_template),
1768 + .dec = __VECS(hmac_md5_ecb_cipher_null_dec_tv_template)
1769 }
1770 }
1771 }, {
1772 @@ -2201,12 +2525,7 @@ static const struct alg_test_desc alg_te
1773 .test = alg_test_aead,
1774 .suite = {
1775 .aead = {
1776 - .enc = {
1777 - .vecs =
1778 - hmac_sha1_aes_cbc_enc_tv_temp,
1779 - .count =
1780 - HMAC_SHA1_AES_CBC_ENC_TEST_VEC
1781 - }
1782 + .enc = __VECS(hmac_sha1_aes_cbc_enc_tv_temp)
1783 }
1784 }
1785 }, {
1786 @@ -2214,12 +2533,7 @@ static const struct alg_test_desc alg_te
1787 .test = alg_test_aead,
1788 .suite = {
1789 .aead = {
1790 - .enc = {
1791 - .vecs =
1792 - hmac_sha1_des_cbc_enc_tv_temp,
1793 - .count =
1794 - HMAC_SHA1_DES_CBC_ENC_TEST_VEC
1795 - }
1796 + .enc = __VECS(hmac_sha1_des_cbc_enc_tv_temp)
1797 }
1798 }
1799 }, {
1800 @@ -2228,12 +2542,7 @@ static const struct alg_test_desc alg_te
1801 .fips_allowed = 1,
1802 .suite = {
1803 .aead = {
1804 - .enc = {
1805 - .vecs =
1806 - hmac_sha1_des3_ede_cbc_enc_tv_temp,
1807 - .count =
1808 - HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC
1809 - }
1810 + .enc = __VECS(hmac_sha1_des3_ede_cbc_enc_tv_temp)
1811 }
1812 }
1813 }, {
1814 @@ -2245,18 +2554,8 @@ static const struct alg_test_desc alg_te
1815 .test = alg_test_aead,
1816 .suite = {
1817 .aead = {
1818 - .enc = {
1819 - .vecs =
1820 - hmac_sha1_ecb_cipher_null_enc_tv_temp,
1821 - .count =
1822 - HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC
1823 - },
1824 - .dec = {
1825 - .vecs =
1826 - hmac_sha1_ecb_cipher_null_dec_tv_temp,
1827 - .count =
1828 - HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC
1829 - }
1830 + .enc = __VECS(hmac_sha1_ecb_cipher_null_enc_tv_temp),
1831 + .dec = __VECS(hmac_sha1_ecb_cipher_null_dec_tv_temp)
1832 }
1833 }
1834 }, {
1835 @@ -2268,12 +2567,7 @@ static const struct alg_test_desc alg_te
1836 .test = alg_test_aead,
1837 .suite = {
1838 .aead = {
1839 - .enc = {
1840 - .vecs =
1841 - hmac_sha224_des_cbc_enc_tv_temp,
1842 - .count =
1843 - HMAC_SHA224_DES_CBC_ENC_TEST_VEC
1844 - }
1845 + .enc = __VECS(hmac_sha224_des_cbc_enc_tv_temp)
1846 }
1847 }
1848 }, {
1849 @@ -2282,12 +2576,7 @@ static const struct alg_test_desc alg_te
1850 .fips_allowed = 1,
1851 .suite = {
1852 .aead = {
1853 - .enc = {
1854 - .vecs =
1855 - hmac_sha224_des3_ede_cbc_enc_tv_temp,
1856 - .count =
1857 - HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC
1858 - }
1859 + .enc = __VECS(hmac_sha224_des3_ede_cbc_enc_tv_temp)
1860 }
1861 }
1862 }, {
1863 @@ -2296,12 +2585,7 @@ static const struct alg_test_desc alg_te
1864 .fips_allowed = 1,
1865 .suite = {
1866 .aead = {
1867 - .enc = {
1868 - .vecs =
1869 - hmac_sha256_aes_cbc_enc_tv_temp,
1870 - .count =
1871 - HMAC_SHA256_AES_CBC_ENC_TEST_VEC
1872 - }
1873 + .enc = __VECS(hmac_sha256_aes_cbc_enc_tv_temp)
1874 }
1875 }
1876 }, {
1877 @@ -2309,12 +2593,7 @@ static const struct alg_test_desc alg_te
1878 .test = alg_test_aead,
1879 .suite = {
1880 .aead = {
1881 - .enc = {
1882 - .vecs =
1883 - hmac_sha256_des_cbc_enc_tv_temp,
1884 - .count =
1885 - HMAC_SHA256_DES_CBC_ENC_TEST_VEC
1886 - }
1887 + .enc = __VECS(hmac_sha256_des_cbc_enc_tv_temp)
1888 }
1889 }
1890 }, {
1891 @@ -2323,12 +2602,7 @@ static const struct alg_test_desc alg_te
1892 .fips_allowed = 1,
1893 .suite = {
1894 .aead = {
1895 - .enc = {
1896 - .vecs =
1897 - hmac_sha256_des3_ede_cbc_enc_tv_temp,
1898 - .count =
1899 - HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC
1900 - }
1901 + .enc = __VECS(hmac_sha256_des3_ede_cbc_enc_tv_temp)
1902 }
1903 }
1904 }, {
1905 @@ -2344,12 +2618,7 @@ static const struct alg_test_desc alg_te
1906 .test = alg_test_aead,
1907 .suite = {
1908 .aead = {
1909 - .enc = {
1910 - .vecs =
1911 - hmac_sha384_des_cbc_enc_tv_temp,
1912 - .count =
1913 - HMAC_SHA384_DES_CBC_ENC_TEST_VEC
1914 - }
1915 + .enc = __VECS(hmac_sha384_des_cbc_enc_tv_temp)
1916 }
1917 }
1918 }, {
1919 @@ -2358,12 +2627,7 @@ static const struct alg_test_desc alg_te
1920 .fips_allowed = 1,
1921 .suite = {
1922 .aead = {
1923 - .enc = {
1924 - .vecs =
1925 - hmac_sha384_des3_ede_cbc_enc_tv_temp,
1926 - .count =
1927 - HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC
1928 - }
1929 + .enc = __VECS(hmac_sha384_des3_ede_cbc_enc_tv_temp)
1930 }
1931 }
1932 }, {
1933 @@ -2380,12 +2644,7 @@ static const struct alg_test_desc alg_te
1934 .test = alg_test_aead,
1935 .suite = {
1936 .aead = {
1937 - .enc = {
1938 - .vecs =
1939 - hmac_sha512_aes_cbc_enc_tv_temp,
1940 - .count =
1941 - HMAC_SHA512_AES_CBC_ENC_TEST_VEC
1942 - }
1943 + .enc = __VECS(hmac_sha512_aes_cbc_enc_tv_temp)
1944 }
1945 }
1946 }, {
1947 @@ -2393,12 +2652,7 @@ static const struct alg_test_desc alg_te
1948 .test = alg_test_aead,
1949 .suite = {
1950 .aead = {
1951 - .enc = {
1952 - .vecs =
1953 - hmac_sha512_des_cbc_enc_tv_temp,
1954 - .count =
1955 - HMAC_SHA512_DES_CBC_ENC_TEST_VEC
1956 - }
1957 + .enc = __VECS(hmac_sha512_des_cbc_enc_tv_temp)
1958 }
1959 }
1960 }, {
1961 @@ -2407,12 +2661,7 @@ static const struct alg_test_desc alg_te
1962 .fips_allowed = 1,
1963 .suite = {
1964 .aead = {
1965 - .enc = {
1966 - .vecs =
1967 - hmac_sha512_des3_ede_cbc_enc_tv_temp,
1968 - .count =
1969 - HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC
1970 - }
1971 + .enc = __VECS(hmac_sha512_des3_ede_cbc_enc_tv_temp)
1972 }
1973 }
1974 }, {
1975 @@ -2429,14 +2678,8 @@ static const struct alg_test_desc alg_te
1976 .fips_allowed = 1,
1977 .suite = {
1978 .cipher = {
1979 - .enc = {
1980 - .vecs = aes_cbc_enc_tv_template,
1981 - .count = AES_CBC_ENC_TEST_VECTORS
1982 - },
1983 - .dec = {
1984 - .vecs = aes_cbc_dec_tv_template,
1985 - .count = AES_CBC_DEC_TEST_VECTORS
1986 - }
1987 + .enc = __VECS(aes_cbc_enc_tv_template),
1988 + .dec = __VECS(aes_cbc_dec_tv_template)
1989 }
1990 }
1991 }, {
1992 @@ -2444,14 +2687,8 @@ static const struct alg_test_desc alg_te
1993 .test = alg_test_skcipher,
1994 .suite = {
1995 .cipher = {
1996 - .enc = {
1997 - .vecs = anubis_cbc_enc_tv_template,
1998 - .count = ANUBIS_CBC_ENC_TEST_VECTORS
1999 - },
2000 - .dec = {
2001 - .vecs = anubis_cbc_dec_tv_template,
2002 - .count = ANUBIS_CBC_DEC_TEST_VECTORS
2003 - }
2004 + .enc = __VECS(anubis_cbc_enc_tv_template),
2005 + .dec = __VECS(anubis_cbc_dec_tv_template)
2006 }
2007 }
2008 }, {
2009 @@ -2459,14 +2696,8 @@ static const struct alg_test_desc alg_te
2010 .test = alg_test_skcipher,
2011 .suite = {
2012 .cipher = {
2013 - .enc = {
2014 - .vecs = bf_cbc_enc_tv_template,
2015 - .count = BF_CBC_ENC_TEST_VECTORS
2016 - },
2017 - .dec = {
2018 - .vecs = bf_cbc_dec_tv_template,
2019 - .count = BF_CBC_DEC_TEST_VECTORS
2020 - }
2021 + .enc = __VECS(bf_cbc_enc_tv_template),
2022 + .dec = __VECS(bf_cbc_dec_tv_template)
2023 }
2024 }
2025 }, {
2026 @@ -2474,14 +2705,8 @@ static const struct alg_test_desc alg_te
2027 .test = alg_test_skcipher,
2028 .suite = {
2029 .cipher = {
2030 - .enc = {
2031 - .vecs = camellia_cbc_enc_tv_template,
2032 - .count = CAMELLIA_CBC_ENC_TEST_VECTORS
2033 - },
2034 - .dec = {
2035 - .vecs = camellia_cbc_dec_tv_template,
2036 - .count = CAMELLIA_CBC_DEC_TEST_VECTORS
2037 - }
2038 + .enc = __VECS(camellia_cbc_enc_tv_template),
2039 + .dec = __VECS(camellia_cbc_dec_tv_template)
2040 }
2041 }
2042 }, {
2043 @@ -2489,14 +2714,8 @@ static const struct alg_test_desc alg_te
2044 .test = alg_test_skcipher,
2045 .suite = {
2046 .cipher = {
2047 - .enc = {
2048 - .vecs = cast5_cbc_enc_tv_template,
2049 - .count = CAST5_CBC_ENC_TEST_VECTORS
2050 - },
2051 - .dec = {
2052 - .vecs = cast5_cbc_dec_tv_template,
2053 - .count = CAST5_CBC_DEC_TEST_VECTORS
2054 - }
2055 + .enc = __VECS(cast5_cbc_enc_tv_template),
2056 + .dec = __VECS(cast5_cbc_dec_tv_template)
2057 }
2058 }
2059 }, {
2060 @@ -2504,14 +2723,8 @@ static const struct alg_test_desc alg_te
2061 .test = alg_test_skcipher,
2062 .suite = {
2063 .cipher = {
2064 - .enc = {
2065 - .vecs = cast6_cbc_enc_tv_template,
2066 - .count = CAST6_CBC_ENC_TEST_VECTORS
2067 - },
2068 - .dec = {
2069 - .vecs = cast6_cbc_dec_tv_template,
2070 - .count = CAST6_CBC_DEC_TEST_VECTORS
2071 - }
2072 + .enc = __VECS(cast6_cbc_enc_tv_template),
2073 + .dec = __VECS(cast6_cbc_dec_tv_template)
2074 }
2075 }
2076 }, {
2077 @@ -2519,14 +2732,8 @@ static const struct alg_test_desc alg_te
2078 .test = alg_test_skcipher,
2079 .suite = {
2080 .cipher = {
2081 - .enc = {
2082 - .vecs = des_cbc_enc_tv_template,
2083 - .count = DES_CBC_ENC_TEST_VECTORS
2084 - },
2085 - .dec = {
2086 - .vecs = des_cbc_dec_tv_template,
2087 - .count = DES_CBC_DEC_TEST_VECTORS
2088 - }
2089 + .enc = __VECS(des_cbc_enc_tv_template),
2090 + .dec = __VECS(des_cbc_dec_tv_template)
2091 }
2092 }
2093 }, {
2094 @@ -2535,14 +2742,8 @@ static const struct alg_test_desc alg_te
2095 .fips_allowed = 1,
2096 .suite = {
2097 .cipher = {
2098 - .enc = {
2099 - .vecs = des3_ede_cbc_enc_tv_template,
2100 - .count = DES3_EDE_CBC_ENC_TEST_VECTORS
2101 - },
2102 - .dec = {
2103 - .vecs = des3_ede_cbc_dec_tv_template,
2104 - .count = DES3_EDE_CBC_DEC_TEST_VECTORS
2105 - }
2106 + .enc = __VECS(des3_ede_cbc_enc_tv_template),
2107 + .dec = __VECS(des3_ede_cbc_dec_tv_template)
2108 }
2109 }
2110 }, {
2111 @@ -2550,14 +2751,8 @@ static const struct alg_test_desc alg_te
2112 .test = alg_test_skcipher,
2113 .suite = {
2114 .cipher = {
2115 - .enc = {
2116 - .vecs = serpent_cbc_enc_tv_template,
2117 - .count = SERPENT_CBC_ENC_TEST_VECTORS
2118 - },
2119 - .dec = {
2120 - .vecs = serpent_cbc_dec_tv_template,
2121 - .count = SERPENT_CBC_DEC_TEST_VECTORS
2122 - }
2123 + .enc = __VECS(serpent_cbc_enc_tv_template),
2124 + .dec = __VECS(serpent_cbc_dec_tv_template)
2125 }
2126 }
2127 }, {
2128 @@ -2565,30 +2760,25 @@ static const struct alg_test_desc alg_te
2129 .test = alg_test_skcipher,
2130 .suite = {
2131 .cipher = {
2132 - .enc = {
2133 - .vecs = tf_cbc_enc_tv_template,
2134 - .count = TF_CBC_ENC_TEST_VECTORS
2135 - },
2136 - .dec = {
2137 - .vecs = tf_cbc_dec_tv_template,
2138 - .count = TF_CBC_DEC_TEST_VECTORS
2139 - }
2140 + .enc = __VECS(tf_cbc_enc_tv_template),
2141 + .dec = __VECS(tf_cbc_dec_tv_template)
2142 }
2143 }
2144 }, {
2145 + .alg = "cbcmac(aes)",
2146 + .fips_allowed = 1,
2147 + .test = alg_test_hash,
2148 + .suite = {
2149 + .hash = __VECS(aes_cbcmac_tv_template)
2150 + }
2151 + }, {
2152 .alg = "ccm(aes)",
2153 .test = alg_test_aead,
2154 .fips_allowed = 1,
2155 .suite = {
2156 .aead = {
2157 - .enc = {
2158 - .vecs = aes_ccm_enc_tv_template,
2159 - .count = AES_CCM_ENC_TEST_VECTORS
2160 - },
2161 - .dec = {
2162 - .vecs = aes_ccm_dec_tv_template,
2163 - .count = AES_CCM_DEC_TEST_VECTORS
2164 - }
2165 + .enc = __VECS(aes_ccm_enc_tv_template),
2166 + .dec = __VECS(aes_ccm_dec_tv_template)
2167 }
2168 }
2169 }, {
2170 @@ -2596,14 +2786,8 @@ static const struct alg_test_desc alg_te
2171 .test = alg_test_skcipher,
2172 .suite = {
2173 .cipher = {
2174 - .enc = {
2175 - .vecs = chacha20_enc_tv_template,
2176 - .count = CHACHA20_ENC_TEST_VECTORS
2177 - },
2178 - .dec = {
2179 - .vecs = chacha20_enc_tv_template,
2180 - .count = CHACHA20_ENC_TEST_VECTORS
2181 - },
2182 + .enc = __VECS(chacha20_enc_tv_template),
2183 + .dec = __VECS(chacha20_enc_tv_template),
2184 }
2185 }
2186 }, {
2187 @@ -2611,20 +2795,14 @@ static const struct alg_test_desc alg_te
2188 .fips_allowed = 1,
2189 .test = alg_test_hash,
2190 .suite = {
2191 - .hash = {
2192 - .vecs = aes_cmac128_tv_template,
2193 - .count = CMAC_AES_TEST_VECTORS
2194 - }
2195 + .hash = __VECS(aes_cmac128_tv_template)
2196 }
2197 }, {
2198 .alg = "cmac(des3_ede)",
2199 .fips_allowed = 1,
2200 .test = alg_test_hash,
2201 .suite = {
2202 - .hash = {
2203 - .vecs = des3_ede_cmac64_tv_template,
2204 - .count = CMAC_DES3_EDE_TEST_VECTORS
2205 - }
2206 + .hash = __VECS(des3_ede_cmac64_tv_template)
2207 }
2208 }, {
2209 .alg = "compress_null",
2210 @@ -2633,94 +2811,30 @@ static const struct alg_test_desc alg_te
2211 .alg = "crc32",
2212 .test = alg_test_hash,
2213 .suite = {
2214 - .hash = {
2215 - .vecs = crc32_tv_template,
2216 - .count = CRC32_TEST_VECTORS
2217 - }
2218 + .hash = __VECS(crc32_tv_template)
2219 }
2220 }, {
2221 .alg = "crc32c",
2222 .test = alg_test_crc32c,
2223 .fips_allowed = 1,
2224 .suite = {
2225 - .hash = {
2226 - .vecs = crc32c_tv_template,
2227 - .count = CRC32C_TEST_VECTORS
2228 - }
2229 + .hash = __VECS(crc32c_tv_template)
2230 }
2231 }, {
2232 .alg = "crct10dif",
2233 .test = alg_test_hash,
2234 .fips_allowed = 1,
2235 .suite = {
2236 - .hash = {
2237 - .vecs = crct10dif_tv_template,
2238 - .count = CRCT10DIF_TEST_VECTORS
2239 - }
2240 + .hash = __VECS(crct10dif_tv_template)
2241 }
2242 }, {
2243 - .alg = "cryptd(__driver-cbc-aes-aesni)",
2244 - .test = alg_test_null,
2245 - .fips_allowed = 1,
2246 - }, {
2247 - .alg = "cryptd(__driver-cbc-camellia-aesni)",
2248 - .test = alg_test_null,
2249 - }, {
2250 - .alg = "cryptd(__driver-cbc-camellia-aesni-avx2)",
2251 - .test = alg_test_null,
2252 - }, {
2253 - .alg = "cryptd(__driver-cbc-serpent-avx2)",
2254 - .test = alg_test_null,
2255 - }, {
2256 - .alg = "cryptd(__driver-ecb-aes-aesni)",
2257 - .test = alg_test_null,
2258 - .fips_allowed = 1,
2259 - }, {
2260 - .alg = "cryptd(__driver-ecb-camellia-aesni)",
2261 - .test = alg_test_null,
2262 - }, {
2263 - .alg = "cryptd(__driver-ecb-camellia-aesni-avx2)",
2264 - .test = alg_test_null,
2265 - }, {
2266 - .alg = "cryptd(__driver-ecb-cast5-avx)",
2267 - .test = alg_test_null,
2268 - }, {
2269 - .alg = "cryptd(__driver-ecb-cast6-avx)",
2270 - .test = alg_test_null,
2271 - }, {
2272 - .alg = "cryptd(__driver-ecb-serpent-avx)",
2273 - .test = alg_test_null,
2274 - }, {
2275 - .alg = "cryptd(__driver-ecb-serpent-avx2)",
2276 - .test = alg_test_null,
2277 - }, {
2278 - .alg = "cryptd(__driver-ecb-serpent-sse2)",
2279 - .test = alg_test_null,
2280 - }, {
2281 - .alg = "cryptd(__driver-ecb-twofish-avx)",
2282 - .test = alg_test_null,
2283 - }, {
2284 - .alg = "cryptd(__driver-gcm-aes-aesni)",
2285 - .test = alg_test_null,
2286 - .fips_allowed = 1,
2287 - }, {
2288 - .alg = "cryptd(__ghash-pclmulqdqni)",
2289 - .test = alg_test_null,
2290 - .fips_allowed = 1,
2291 - }, {
2292 .alg = "ctr(aes)",
2293 .test = alg_test_skcipher,
2294 .fips_allowed = 1,
2295 .suite = {
2296 .cipher = {
2297 - .enc = {
2298 - .vecs = aes_ctr_enc_tv_template,
2299 - .count = AES_CTR_ENC_TEST_VECTORS
2300 - },
2301 - .dec = {
2302 - .vecs = aes_ctr_dec_tv_template,
2303 - .count = AES_CTR_DEC_TEST_VECTORS
2304 - }
2305 + .enc = __VECS(aes_ctr_enc_tv_template),
2306 + .dec = __VECS(aes_ctr_dec_tv_template)
2307 }
2308 }
2309 }, {
2310 @@ -2728,14 +2842,8 @@ static const struct alg_test_desc alg_te
2311 .test = alg_test_skcipher,
2312 .suite = {
2313 .cipher = {
2314 - .enc = {
2315 - .vecs = bf_ctr_enc_tv_template,
2316 - .count = BF_CTR_ENC_TEST_VECTORS
2317 - },
2318 - .dec = {
2319 - .vecs = bf_ctr_dec_tv_template,
2320 - .count = BF_CTR_DEC_TEST_VECTORS
2321 - }
2322 + .enc = __VECS(bf_ctr_enc_tv_template),
2323 + .dec = __VECS(bf_ctr_dec_tv_template)
2324 }
2325 }
2326 }, {
2327 @@ -2743,14 +2851,8 @@ static const struct alg_test_desc alg_te
2328 .test = alg_test_skcipher,
2329 .suite = {
2330 .cipher = {
2331 - .enc = {
2332 - .vecs = camellia_ctr_enc_tv_template,
2333 - .count = CAMELLIA_CTR_ENC_TEST_VECTORS
2334 - },
2335 - .dec = {
2336 - .vecs = camellia_ctr_dec_tv_template,
2337 - .count = CAMELLIA_CTR_DEC_TEST_VECTORS
2338 - }
2339 + .enc = __VECS(camellia_ctr_enc_tv_template),
2340 + .dec = __VECS(camellia_ctr_dec_tv_template)
2341 }
2342 }
2343 }, {
2344 @@ -2758,14 +2860,8 @@ static const struct alg_test_desc alg_te
2345 .test = alg_test_skcipher,
2346 .suite = {
2347 .cipher = {
2348 - .enc = {
2349 - .vecs = cast5_ctr_enc_tv_template,
2350 - .count = CAST5_CTR_ENC_TEST_VECTORS
2351 - },
2352 - .dec = {
2353 - .vecs = cast5_ctr_dec_tv_template,
2354 - .count = CAST5_CTR_DEC_TEST_VECTORS
2355 - }
2356 + .enc = __VECS(cast5_ctr_enc_tv_template),
2357 + .dec = __VECS(cast5_ctr_dec_tv_template)
2358 }
2359 }
2360 }, {
2361 @@ -2773,14 +2869,8 @@ static const struct alg_test_desc alg_te
2362 .test = alg_test_skcipher,
2363 .suite = {
2364 .cipher = {
2365 - .enc = {
2366 - .vecs = cast6_ctr_enc_tv_template,
2367 - .count = CAST6_CTR_ENC_TEST_VECTORS
2368 - },
2369 - .dec = {
2370 - .vecs = cast6_ctr_dec_tv_template,
2371 - .count = CAST6_CTR_DEC_TEST_VECTORS
2372 - }
2373 + .enc = __VECS(cast6_ctr_enc_tv_template),
2374 + .dec = __VECS(cast6_ctr_dec_tv_template)
2375 }
2376 }
2377 }, {
2378 @@ -2788,29 +2878,18 @@ static const struct alg_test_desc alg_te
2379 .test = alg_test_skcipher,
2380 .suite = {
2381 .cipher = {
2382 - .enc = {
2383 - .vecs = des_ctr_enc_tv_template,
2384 - .count = DES_CTR_ENC_TEST_VECTORS
2385 - },
2386 - .dec = {
2387 - .vecs = des_ctr_dec_tv_template,
2388 - .count = DES_CTR_DEC_TEST_VECTORS
2389 - }
2390 + .enc = __VECS(des_ctr_enc_tv_template),
2391 + .dec = __VECS(des_ctr_dec_tv_template)
2392 }
2393 }
2394 }, {
2395 .alg = "ctr(des3_ede)",
2396 .test = alg_test_skcipher,
2397 + .fips_allowed = 1,
2398 .suite = {
2399 .cipher = {
2400 - .enc = {
2401 - .vecs = des3_ede_ctr_enc_tv_template,
2402 - .count = DES3_EDE_CTR_ENC_TEST_VECTORS
2403 - },
2404 - .dec = {
2405 - .vecs = des3_ede_ctr_dec_tv_template,
2406 - .count = DES3_EDE_CTR_DEC_TEST_VECTORS
2407 - }
2408 + .enc = __VECS(des3_ede_ctr_enc_tv_template),
2409 + .dec = __VECS(des3_ede_ctr_dec_tv_template)
2410 }
2411 }
2412 }, {
2413 @@ -2818,14 +2897,8 @@ static const struct alg_test_desc alg_te
2414 .test = alg_test_skcipher,
2415 .suite = {
2416 .cipher = {
2417 - .enc = {
2418 - .vecs = serpent_ctr_enc_tv_template,
2419 - .count = SERPENT_CTR_ENC_TEST_VECTORS
2420 - },
2421 - .dec = {
2422 - .vecs = serpent_ctr_dec_tv_template,
2423 - .count = SERPENT_CTR_DEC_TEST_VECTORS
2424 - }
2425 + .enc = __VECS(serpent_ctr_enc_tv_template),
2426 + .dec = __VECS(serpent_ctr_dec_tv_template)
2427 }
2428 }
2429 }, {
2430 @@ -2833,14 +2906,8 @@ static const struct alg_test_desc alg_te
2431 .test = alg_test_skcipher,
2432 .suite = {
2433 .cipher = {
2434 - .enc = {
2435 - .vecs = tf_ctr_enc_tv_template,
2436 - .count = TF_CTR_ENC_TEST_VECTORS
2437 - },
2438 - .dec = {
2439 - .vecs = tf_ctr_dec_tv_template,
2440 - .count = TF_CTR_DEC_TEST_VECTORS
2441 - }
2442 + .enc = __VECS(tf_ctr_enc_tv_template),
2443 + .dec = __VECS(tf_ctr_dec_tv_template)
2444 }
2445 }
2446 }, {
2447 @@ -2848,14 +2915,8 @@ static const struct alg_test_desc alg_te
2448 .test = alg_test_skcipher,
2449 .suite = {
2450 .cipher = {
2451 - .enc = {
2452 - .vecs = cts_mode_enc_tv_template,
2453 - .count = CTS_MODE_ENC_TEST_VECTORS
2454 - },
2455 - .dec = {
2456 - .vecs = cts_mode_dec_tv_template,
2457 - .count = CTS_MODE_DEC_TEST_VECTORS
2458 - }
2459 + .enc = __VECS(cts_mode_enc_tv_template),
2460 + .dec = __VECS(cts_mode_dec_tv_template)
2461 }
2462 }
2463 }, {
2464 @@ -2864,14 +2925,8 @@ static const struct alg_test_desc alg_te
2465 .fips_allowed = 1,
2466 .suite = {
2467 .comp = {
2468 - .comp = {
2469 - .vecs = deflate_comp_tv_template,
2470 - .count = DEFLATE_COMP_TEST_VECTORS
2471 - },
2472 - .decomp = {
2473 - .vecs = deflate_decomp_tv_template,
2474 - .count = DEFLATE_DECOMP_TEST_VECTORS
2475 - }
2476 + .comp = __VECS(deflate_comp_tv_template),
2477 + .decomp = __VECS(deflate_decomp_tv_template)
2478 }
2479 }
2480 }, {
2481 @@ -2879,10 +2934,7 @@ static const struct alg_test_desc alg_te
2482 .test = alg_test_kpp,
2483 .fips_allowed = 1,
2484 .suite = {
2485 - .kpp = {
2486 - .vecs = dh_tv_template,
2487 - .count = DH_TEST_VECTORS
2488 - }
2489 + .kpp = __VECS(dh_tv_template)
2490 }
2491 }, {
2492 .alg = "digest_null",
2493 @@ -2892,30 +2944,21 @@ static const struct alg_test_desc alg_te
2494 .test = alg_test_drbg,
2495 .fips_allowed = 1,
2496 .suite = {
2497 - .drbg = {
2498 - .vecs = drbg_nopr_ctr_aes128_tv_template,
2499 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
2500 - }
2501 + .drbg = __VECS(drbg_nopr_ctr_aes128_tv_template)
2502 }
2503 }, {
2504 .alg = "drbg_nopr_ctr_aes192",
2505 .test = alg_test_drbg,
2506 .fips_allowed = 1,
2507 .suite = {
2508 - .drbg = {
2509 - .vecs = drbg_nopr_ctr_aes192_tv_template,
2510 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
2511 - }
2512 + .drbg = __VECS(drbg_nopr_ctr_aes192_tv_template)
2513 }
2514 }, {
2515 .alg = "drbg_nopr_ctr_aes256",
2516 .test = alg_test_drbg,
2517 .fips_allowed = 1,
2518 .suite = {
2519 - .drbg = {
2520 - .vecs = drbg_nopr_ctr_aes256_tv_template,
2521 - .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
2522 - }
2523 + .drbg = __VECS(drbg_nopr_ctr_aes256_tv_template)
2524 }
2525 }, {
2526 /*
2527 @@ -2930,11 +2973,7 @@ static const struct alg_test_desc alg_te
2528 .test = alg_test_drbg,
2529 .fips_allowed = 1,
2530 .suite = {
2531 - .drbg = {
2532 - .vecs = drbg_nopr_hmac_sha256_tv_template,
2533 - .count =
2534 - ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
2535 - }
2536 + .drbg = __VECS(drbg_nopr_hmac_sha256_tv_template)
2537 }
2538 }, {
2539 /* covered by drbg_nopr_hmac_sha256 test */
2540 @@ -2954,10 +2993,7 @@ static const struct alg_test_desc alg_te
2541 .test = alg_test_drbg,
2542 .fips_allowed = 1,
2543 .suite = {
2544 - .drbg = {
2545 - .vecs = drbg_nopr_sha256_tv_template,
2546 - .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
2547 - }
2548 + .drbg = __VECS(drbg_nopr_sha256_tv_template)
2549 }
2550 }, {
2551 /* covered by drbg_nopr_sha256 test */
2552 @@ -2973,10 +3009,7 @@ static const struct alg_test_desc alg_te
2553 .test = alg_test_drbg,
2554 .fips_allowed = 1,
2555 .suite = {
2556 - .drbg = {
2557 - .vecs = drbg_pr_ctr_aes128_tv_template,
2558 - .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
2559 - }
2560 + .drbg = __VECS(drbg_pr_ctr_aes128_tv_template)
2561 }
2562 }, {
2563 /* covered by drbg_pr_ctr_aes128 test */
2564 @@ -2996,10 +3029,7 @@ static const struct alg_test_desc alg_te
2565 .test = alg_test_drbg,
2566 .fips_allowed = 1,
2567 .suite = {
2568 - .drbg = {
2569 - .vecs = drbg_pr_hmac_sha256_tv_template,
2570 - .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
2571 - }
2572 + .drbg = __VECS(drbg_pr_hmac_sha256_tv_template)
2573 }
2574 }, {
2575 /* covered by drbg_pr_hmac_sha256 test */
2576 @@ -3019,10 +3049,7 @@ static const struct alg_test_desc alg_te
2577 .test = alg_test_drbg,
2578 .fips_allowed = 1,
2579 .suite = {
2580 - .drbg = {
2581 - .vecs = drbg_pr_sha256_tv_template,
2582 - .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
2583 - }
2584 + .drbg = __VECS(drbg_pr_sha256_tv_template)
2585 }
2586 }, {
2587 /* covered by drbg_pr_sha256 test */
2588 @@ -3034,23 +3061,13 @@ static const struct alg_test_desc alg_te
2589 .fips_allowed = 1,
2590 .test = alg_test_null,
2591 }, {
2592 - .alg = "ecb(__aes-aesni)",
2593 - .test = alg_test_null,
2594 - .fips_allowed = 1,
2595 - }, {
2596 .alg = "ecb(aes)",
2597 .test = alg_test_skcipher,
2598 .fips_allowed = 1,
2599 .suite = {
2600 .cipher = {
2601 - .enc = {
2602 - .vecs = aes_enc_tv_template,
2603 - .count = AES_ENC_TEST_VECTORS
2604 - },
2605 - .dec = {
2606 - .vecs = aes_dec_tv_template,
2607 - .count = AES_DEC_TEST_VECTORS
2608 - }
2609 + .enc = __VECS(aes_enc_tv_template),
2610 + .dec = __VECS(aes_dec_tv_template)
2611 }
2612 }
2613 }, {
2614 @@ -3058,14 +3075,8 @@ static const struct alg_test_desc alg_te
2615 .test = alg_test_skcipher,
2616 .suite = {
2617 .cipher = {
2618 - .enc = {
2619 - .vecs = anubis_enc_tv_template,
2620 - .count = ANUBIS_ENC_TEST_VECTORS
2621 - },
2622 - .dec = {
2623 - .vecs = anubis_dec_tv_template,
2624 - .count = ANUBIS_DEC_TEST_VECTORS
2625 - }
2626 + .enc = __VECS(anubis_enc_tv_template),
2627 + .dec = __VECS(anubis_dec_tv_template)
2628 }
2629 }
2630 }, {
2631 @@ -3073,14 +3084,8 @@ static const struct alg_test_desc alg_te
2632 .test = alg_test_skcipher,
2633 .suite = {
2634 .cipher = {
2635 - .enc = {
2636 - .vecs = arc4_enc_tv_template,
2637 - .count = ARC4_ENC_TEST_VECTORS
2638 - },
2639 - .dec = {
2640 - .vecs = arc4_dec_tv_template,
2641 - .count = ARC4_DEC_TEST_VECTORS
2642 - }
2643 + .enc = __VECS(arc4_enc_tv_template),
2644 + .dec = __VECS(arc4_dec_tv_template)
2645 }
2646 }
2647 }, {
2648 @@ -3088,14 +3093,8 @@ static const struct alg_test_desc alg_te
2649 .test = alg_test_skcipher,
2650 .suite = {
2651 .cipher = {
2652 - .enc = {
2653 - .vecs = bf_enc_tv_template,
2654 - .count = BF_ENC_TEST_VECTORS
2655 - },
2656 - .dec = {
2657 - .vecs = bf_dec_tv_template,
2658 - .count = BF_DEC_TEST_VECTORS
2659 - }
2660 + .enc = __VECS(bf_enc_tv_template),
2661 + .dec = __VECS(bf_dec_tv_template)
2662 }
2663 }
2664 }, {
2665 @@ -3103,14 +3102,8 @@ static const struct alg_test_desc alg_te
2666 .test = alg_test_skcipher,
2667 .suite = {
2668 .cipher = {
2669 - .enc = {
2670 - .vecs = camellia_enc_tv_template,
2671 - .count = CAMELLIA_ENC_TEST_VECTORS
2672 - },
2673 - .dec = {
2674 - .vecs = camellia_dec_tv_template,
2675 - .count = CAMELLIA_DEC_TEST_VECTORS
2676 - }
2677 + .enc = __VECS(camellia_enc_tv_template),
2678 + .dec = __VECS(camellia_dec_tv_template)
2679 }
2680 }
2681 }, {
2682 @@ -3118,14 +3111,8 @@ static const struct alg_test_desc alg_te
2683 .test = alg_test_skcipher,
2684 .suite = {
2685 .cipher = {
2686 - .enc = {
2687 - .vecs = cast5_enc_tv_template,
2688 - .count = CAST5_ENC_TEST_VECTORS
2689 - },
2690 - .dec = {
2691 - .vecs = cast5_dec_tv_template,
2692 - .count = CAST5_DEC_TEST_VECTORS
2693 - }
2694 + .enc = __VECS(cast5_enc_tv_template),
2695 + .dec = __VECS(cast5_dec_tv_template)
2696 }
2697 }
2698 }, {
2699 @@ -3133,14 +3120,8 @@ static const struct alg_test_desc alg_te
2700 .test = alg_test_skcipher,
2701 .suite = {
2702 .cipher = {
2703 - .enc = {
2704 - .vecs = cast6_enc_tv_template,
2705 - .count = CAST6_ENC_TEST_VECTORS
2706 - },
2707 - .dec = {
2708 - .vecs = cast6_dec_tv_template,
2709 - .count = CAST6_DEC_TEST_VECTORS
2710 - }
2711 + .enc = __VECS(cast6_enc_tv_template),
2712 + .dec = __VECS(cast6_dec_tv_template)
2713 }
2714 }
2715 }, {
2716 @@ -3151,14 +3132,8 @@ static const struct alg_test_desc alg_te
2717 .test = alg_test_skcipher,
2718 .suite = {
2719 .cipher = {
2720 - .enc = {
2721 - .vecs = des_enc_tv_template,
2722 - .count = DES_ENC_TEST_VECTORS
2723 - },
2724 - .dec = {
2725 - .vecs = des_dec_tv_template,
2726 - .count = DES_DEC_TEST_VECTORS
2727 - }
2728 + .enc = __VECS(des_enc_tv_template),
2729 + .dec = __VECS(des_dec_tv_template)
2730 }
2731 }
2732 }, {
2733 @@ -3167,14 +3142,8 @@ static const struct alg_test_desc alg_te
2734 .fips_allowed = 1,
2735 .suite = {
2736 .cipher = {
2737 - .enc = {
2738 - .vecs = des3_ede_enc_tv_template,
2739 - .count = DES3_EDE_ENC_TEST_VECTORS
2740 - },
2741 - .dec = {
2742 - .vecs = des3_ede_dec_tv_template,
2743 - .count = DES3_EDE_DEC_TEST_VECTORS
2744 - }
2745 + .enc = __VECS(des3_ede_enc_tv_template),
2746 + .dec = __VECS(des3_ede_dec_tv_template)
2747 }
2748 }
2749 }, {
2750 @@ -3197,14 +3166,8 @@ static const struct alg_test_desc alg_te
2751 .test = alg_test_skcipher,
2752 .suite = {
2753 .cipher = {
2754 - .enc = {
2755 - .vecs = khazad_enc_tv_template,
2756 - .count = KHAZAD_ENC_TEST_VECTORS
2757 - },
2758 - .dec = {
2759 - .vecs = khazad_dec_tv_template,
2760 - .count = KHAZAD_DEC_TEST_VECTORS
2761 - }
2762 + .enc = __VECS(khazad_enc_tv_template),
2763 + .dec = __VECS(khazad_dec_tv_template)
2764 }
2765 }
2766 }, {
2767 @@ -3212,14 +3175,8 @@ static const struct alg_test_desc alg_te
2768 .test = alg_test_skcipher,
2769 .suite = {
2770 .cipher = {
2771 - .enc = {
2772 - .vecs = seed_enc_tv_template,
2773 - .count = SEED_ENC_TEST_VECTORS
2774 - },
2775 - .dec = {
2776 - .vecs = seed_dec_tv_template,
2777 - .count = SEED_DEC_TEST_VECTORS
2778 - }
2779 + .enc = __VECS(seed_enc_tv_template),
2780 + .dec = __VECS(seed_dec_tv_template)
2781 }
2782 }
2783 }, {
2784 @@ -3227,14 +3184,8 @@ static const struct alg_test_desc alg_te
2785 .test = alg_test_skcipher,
2786 .suite = {
2787 .cipher = {
2788 - .enc = {
2789 - .vecs = serpent_enc_tv_template,
2790 - .count = SERPENT_ENC_TEST_VECTORS
2791 - },
2792 - .dec = {
2793 - .vecs = serpent_dec_tv_template,
2794 - .count = SERPENT_DEC_TEST_VECTORS
2795 - }
2796 + .enc = __VECS(serpent_enc_tv_template),
2797 + .dec = __VECS(serpent_dec_tv_template)
2798 }
2799 }
2800 }, {
2801 @@ -3242,14 +3193,8 @@ static const struct alg_test_desc alg_te
2802 .test = alg_test_skcipher,
2803 .suite = {
2804 .cipher = {
2805 - .enc = {
2806 - .vecs = tea_enc_tv_template,
2807 - .count = TEA_ENC_TEST_VECTORS
2808 - },
2809 - .dec = {
2810 - .vecs = tea_dec_tv_template,
2811 - .count = TEA_DEC_TEST_VECTORS
2812 - }
2813 + .enc = __VECS(tea_enc_tv_template),
2814 + .dec = __VECS(tea_dec_tv_template)
2815 }
2816 }
2817 }, {
2818 @@ -3257,14 +3202,8 @@ static const struct alg_test_desc alg_te
2819 .test = alg_test_skcipher,
2820 .suite = {
2821 .cipher = {
2822 - .enc = {
2823 - .vecs = tnepres_enc_tv_template,
2824 - .count = TNEPRES_ENC_TEST_VECTORS
2825 - },
2826 - .dec = {
2827 - .vecs = tnepres_dec_tv_template,
2828 - .count = TNEPRES_DEC_TEST_VECTORS
2829 - }
2830 + .enc = __VECS(tnepres_enc_tv_template),
2831 + .dec = __VECS(tnepres_dec_tv_template)
2832 }
2833 }
2834 }, {
2835 @@ -3272,14 +3211,8 @@ static const struct alg_test_desc alg_te
2836 .test = alg_test_skcipher,
2837 .suite = {
2838 .cipher = {
2839 - .enc = {
2840 - .vecs = tf_enc_tv_template,
2841 - .count = TF_ENC_TEST_VECTORS
2842 - },
2843 - .dec = {
2844 - .vecs = tf_dec_tv_template,
2845 - .count = TF_DEC_TEST_VECTORS
2846 - }
2847 + .enc = __VECS(tf_enc_tv_template),
2848 + .dec = __VECS(tf_dec_tv_template)
2849 }
2850 }
2851 }, {
2852 @@ -3287,14 +3220,8 @@ static const struct alg_test_desc alg_te
2853 .test = alg_test_skcipher,
2854 .suite = {
2855 .cipher = {
2856 - .enc = {
2857 - .vecs = xeta_enc_tv_template,
2858 - .count = XETA_ENC_TEST_VECTORS
2859 - },
2860 - .dec = {
2861 - .vecs = xeta_dec_tv_template,
2862 - .count = XETA_DEC_TEST_VECTORS
2863 - }
2864 + .enc = __VECS(xeta_enc_tv_template),
2865 + .dec = __VECS(xeta_dec_tv_template)
2866 }
2867 }
2868 }, {
2869 @@ -3302,14 +3229,8 @@ static const struct alg_test_desc alg_te
2870 .test = alg_test_skcipher,
2871 .suite = {
2872 .cipher = {
2873 - .enc = {
2874 - .vecs = xtea_enc_tv_template,
2875 - .count = XTEA_ENC_TEST_VECTORS
2876 - },
2877 - .dec = {
2878 - .vecs = xtea_dec_tv_template,
2879 - .count = XTEA_DEC_TEST_VECTORS
2880 - }
2881 + .enc = __VECS(xtea_enc_tv_template),
2882 + .dec = __VECS(xtea_dec_tv_template)
2883 }
2884 }
2885 }, {
2886 @@ -3317,10 +3238,7 @@ static const struct alg_test_desc alg_te
2887 .test = alg_test_kpp,
2888 .fips_allowed = 1,
2889 .suite = {
2890 - .kpp = {
2891 - .vecs = ecdh_tv_template,
2892 - .count = ECDH_TEST_VECTORS
2893 - }
2894 + .kpp = __VECS(ecdh_tv_template)
2895 }
2896 }, {
2897 .alg = "gcm(aes)",
2898 @@ -3328,14 +3246,8 @@ static const struct alg_test_desc alg_te
2899 .fips_allowed = 1,
2900 .suite = {
2901 .aead = {
2902 - .enc = {
2903 - .vecs = aes_gcm_enc_tv_template,
2904 - .count = AES_GCM_ENC_TEST_VECTORS
2905 - },
2906 - .dec = {
2907 - .vecs = aes_gcm_dec_tv_template,
2908 - .count = AES_GCM_DEC_TEST_VECTORS
2909 - }
2910 + .enc = __VECS(aes_gcm_enc_tv_template),
2911 + .dec = __VECS(aes_gcm_dec_tv_template)
2912 }
2913 }
2914 }, {
2915 @@ -3343,136 +3255,94 @@ static const struct alg_test_desc alg_te
2916 .test = alg_test_hash,
2917 .fips_allowed = 1,
2918 .suite = {
2919 - .hash = {
2920 - .vecs = ghash_tv_template,
2921 - .count = GHASH_TEST_VECTORS
2922 - }
2923 + .hash = __VECS(ghash_tv_template)
2924 }
2925 }, {
2926 .alg = "hmac(crc32)",
2927 .test = alg_test_hash,
2928 .suite = {
2929 - .hash = {
2930 - .vecs = bfin_crc_tv_template,
2931 - .count = BFIN_CRC_TEST_VECTORS
2932 - }
2933 + .hash = __VECS(bfin_crc_tv_template)
2934 }
2935 }, {
2936 .alg = "hmac(md5)",
2937 .test = alg_test_hash,
2938 .suite = {
2939 - .hash = {
2940 - .vecs = hmac_md5_tv_template,
2941 - .count = HMAC_MD5_TEST_VECTORS
2942 - }
2943 + .hash = __VECS(hmac_md5_tv_template)
2944 }
2945 }, {
2946 .alg = "hmac(rmd128)",
2947 .test = alg_test_hash,
2948 .suite = {
2949 - .hash = {
2950 - .vecs = hmac_rmd128_tv_template,
2951 - .count = HMAC_RMD128_TEST_VECTORS
2952 - }
2953 + .hash = __VECS(hmac_rmd128_tv_template)
2954 }
2955 }, {
2956 .alg = "hmac(rmd160)",
2957 .test = alg_test_hash,
2958 .suite = {
2959 - .hash = {
2960 - .vecs = hmac_rmd160_tv_template,
2961 - .count = HMAC_RMD160_TEST_VECTORS
2962 - }
2963 + .hash = __VECS(hmac_rmd160_tv_template)
2964 }
2965 }, {
2966 .alg = "hmac(sha1)",
2967 .test = alg_test_hash,
2968 .fips_allowed = 1,
2969 .suite = {
2970 - .hash = {
2971 - .vecs = hmac_sha1_tv_template,
2972 - .count = HMAC_SHA1_TEST_VECTORS
2973 - }
2974 + .hash = __VECS(hmac_sha1_tv_template)
2975 }
2976 }, {
2977 .alg = "hmac(sha224)",
2978 .test = alg_test_hash,
2979 .fips_allowed = 1,
2980 .suite = {
2981 - .hash = {
2982 - .vecs = hmac_sha224_tv_template,
2983 - .count = HMAC_SHA224_TEST_VECTORS
2984 - }
2985 + .hash = __VECS(hmac_sha224_tv_template)
2986 }
2987 }, {
2988 .alg = "hmac(sha256)",
2989 .test = alg_test_hash,
2990 .fips_allowed = 1,
2991 .suite = {
2992 - .hash = {
2993 - .vecs = hmac_sha256_tv_template,
2994 - .count = HMAC_SHA256_TEST_VECTORS
2995 - }
2996 + .hash = __VECS(hmac_sha256_tv_template)
2997 }
2998 }, {
2999 .alg = "hmac(sha3-224)",
3000 .test = alg_test_hash,
3001 .fips_allowed = 1,
3002 .suite = {
3003 - .hash = {
3004 - .vecs = hmac_sha3_224_tv_template,
3005 - .count = HMAC_SHA3_224_TEST_VECTORS
3006 - }
3007 + .hash = __VECS(hmac_sha3_224_tv_template)
3008 }
3009 }, {
3010 .alg = "hmac(sha3-256)",
3011 .test = alg_test_hash,
3012 .fips_allowed = 1,
3013 .suite = {
3014 - .hash = {
3015 - .vecs = hmac_sha3_256_tv_template,
3016 - .count = HMAC_SHA3_256_TEST_VECTORS
3017 - }
3018 + .hash = __VECS(hmac_sha3_256_tv_template)
3019 }
3020 }, {
3021 .alg = "hmac(sha3-384)",
3022 .test = alg_test_hash,
3023 .fips_allowed = 1,
3024 .suite = {
3025 - .hash = {
3026 - .vecs = hmac_sha3_384_tv_template,
3027 - .count = HMAC_SHA3_384_TEST_VECTORS
3028 - }
3029 + .hash = __VECS(hmac_sha3_384_tv_template)
3030 }
3031 }, {
3032 .alg = "hmac(sha3-512)",
3033 .test = alg_test_hash,
3034 .fips_allowed = 1,
3035 .suite = {
3036 - .hash = {
3037 - .vecs = hmac_sha3_512_tv_template,
3038 - .count = HMAC_SHA3_512_TEST_VECTORS
3039 - }
3040 + .hash = __VECS(hmac_sha3_512_tv_template)
3041 }
3042 }, {
3043 .alg = "hmac(sha384)",
3044 .test = alg_test_hash,
3045 .fips_allowed = 1,
3046 .suite = {
3047 - .hash = {
3048 - .vecs = hmac_sha384_tv_template,
3049 - .count = HMAC_SHA384_TEST_VECTORS
3050 - }
3051 + .hash = __VECS(hmac_sha384_tv_template)
3052 }
3053 }, {
3054 .alg = "hmac(sha512)",
3055 .test = alg_test_hash,
3056 .fips_allowed = 1,
3057 .suite = {
3058 - .hash = {
3059 - .vecs = hmac_sha512_tv_template,
3060 - .count = HMAC_SHA512_TEST_VECTORS
3061 - }
3062 + .hash = __VECS(hmac_sha512_tv_template)
3063 }
3064 }, {
3065 .alg = "jitterentropy_rng",
3066 @@ -3484,14 +3354,8 @@ static const struct alg_test_desc alg_te
3067 .fips_allowed = 1,
3068 .suite = {
3069 .cipher = {
3070 - .enc = {
3071 - .vecs = aes_kw_enc_tv_template,
3072 - .count = ARRAY_SIZE(aes_kw_enc_tv_template)
3073 - },
3074 - .dec = {
3075 - .vecs = aes_kw_dec_tv_template,
3076 - .count = ARRAY_SIZE(aes_kw_dec_tv_template)
3077 - }
3078 + .enc = __VECS(aes_kw_enc_tv_template),
3079 + .dec = __VECS(aes_kw_dec_tv_template)
3080 }
3081 }
3082 }, {
3083 @@ -3499,14 +3363,8 @@ static const struct alg_test_desc alg_te
3084 .test = alg_test_skcipher,
3085 .suite = {
3086 .cipher = {
3087 - .enc = {
3088 - .vecs = aes_lrw_enc_tv_template,
3089 - .count = AES_LRW_ENC_TEST_VECTORS
3090 - },
3091 - .dec = {
3092 - .vecs = aes_lrw_dec_tv_template,
3093 - .count = AES_LRW_DEC_TEST_VECTORS
3094 - }
3095 + .enc = __VECS(aes_lrw_enc_tv_template),
3096 + .dec = __VECS(aes_lrw_dec_tv_template)
3097 }
3098 }
3099 }, {
3100 @@ -3514,14 +3372,8 @@ static const struct alg_test_desc alg_te
3101 .test = alg_test_skcipher,
3102 .suite = {
3103 .cipher = {
3104 - .enc = {
3105 - .vecs = camellia_lrw_enc_tv_template,
3106 - .count = CAMELLIA_LRW_ENC_TEST_VECTORS
3107 - },
3108 - .dec = {
3109 - .vecs = camellia_lrw_dec_tv_template,
3110 - .count = CAMELLIA_LRW_DEC_TEST_VECTORS
3111 - }
3112 + .enc = __VECS(camellia_lrw_enc_tv_template),
3113 + .dec = __VECS(camellia_lrw_dec_tv_template)
3114 }
3115 }
3116 }, {
3117 @@ -3529,14 +3381,8 @@ static const struct alg_test_desc alg_te
3118 .test = alg_test_skcipher,
3119 .suite = {
3120 .cipher = {
3121 - .enc = {
3122 - .vecs = cast6_lrw_enc_tv_template,
3123 - .count = CAST6_LRW_ENC_TEST_VECTORS
3124 - },
3125 - .dec = {
3126 - .vecs = cast6_lrw_dec_tv_template,
3127 - .count = CAST6_LRW_DEC_TEST_VECTORS
3128 - }
3129 + .enc = __VECS(cast6_lrw_enc_tv_template),
3130 + .dec = __VECS(cast6_lrw_dec_tv_template)
3131 }
3132 }
3133 }, {
3134 @@ -3544,14 +3390,8 @@ static const struct alg_test_desc alg_te
3135 .test = alg_test_skcipher,
3136 .suite = {
3137 .cipher = {
3138 - .enc = {
3139 - .vecs = serpent_lrw_enc_tv_template,
3140 - .count = SERPENT_LRW_ENC_TEST_VECTORS
3141 - },
3142 - .dec = {
3143 - .vecs = serpent_lrw_dec_tv_template,
3144 - .count = SERPENT_LRW_DEC_TEST_VECTORS
3145 - }
3146 + .enc = __VECS(serpent_lrw_enc_tv_template),
3147 + .dec = __VECS(serpent_lrw_dec_tv_template)
3148 }
3149 }
3150 }, {
3151 @@ -3559,14 +3399,8 @@ static const struct alg_test_desc alg_te
3152 .test = alg_test_skcipher,
3153 .suite = {
3154 .cipher = {
3155 - .enc = {
3156 - .vecs = tf_lrw_enc_tv_template,
3157 - .count = TF_LRW_ENC_TEST_VECTORS
3158 - },
3159 - .dec = {
3160 - .vecs = tf_lrw_dec_tv_template,
3161 - .count = TF_LRW_DEC_TEST_VECTORS
3162 - }
3163 + .enc = __VECS(tf_lrw_enc_tv_template),
3164 + .dec = __VECS(tf_lrw_dec_tv_template)
3165 }
3166 }
3167 }, {
3168 @@ -3575,14 +3409,8 @@ static const struct alg_test_desc alg_te
3169 .fips_allowed = 1,
3170 .suite = {
3171 .comp = {
3172 - .comp = {
3173 - .vecs = lz4_comp_tv_template,
3174 - .count = LZ4_COMP_TEST_VECTORS
3175 - },
3176 - .decomp = {
3177 - .vecs = lz4_decomp_tv_template,
3178 - .count = LZ4_DECOMP_TEST_VECTORS
3179 - }
3180 + .comp = __VECS(lz4_comp_tv_template),
3181 + .decomp = __VECS(lz4_decomp_tv_template)
3182 }
3183 }
3184 }, {
3185 @@ -3591,14 +3419,8 @@ static const struct alg_test_desc alg_te
3186 .fips_allowed = 1,
3187 .suite = {
3188 .comp = {
3189 - .comp = {
3190 - .vecs = lz4hc_comp_tv_template,
3191 - .count = LZ4HC_COMP_TEST_VECTORS
3192 - },
3193 - .decomp = {
3194 - .vecs = lz4hc_decomp_tv_template,
3195 - .count = LZ4HC_DECOMP_TEST_VECTORS
3196 - }
3197 + .comp = __VECS(lz4hc_comp_tv_template),
3198 + .decomp = __VECS(lz4hc_decomp_tv_template)
3199 }
3200 }
3201 }, {
3202 @@ -3607,42 +3429,27 @@ static const struct alg_test_desc alg_te
3203 .fips_allowed = 1,
3204 .suite = {
3205 .comp = {
3206 - .comp = {
3207 - .vecs = lzo_comp_tv_template,
3208 - .count = LZO_COMP_TEST_VECTORS
3209 - },
3210 - .decomp = {
3211 - .vecs = lzo_decomp_tv_template,
3212 - .count = LZO_DECOMP_TEST_VECTORS
3213 - }
3214 + .comp = __VECS(lzo_comp_tv_template),
3215 + .decomp = __VECS(lzo_decomp_tv_template)
3216 }
3217 }
3218 }, {
3219 .alg = "md4",
3220 .test = alg_test_hash,
3221 .suite = {
3222 - .hash = {
3223 - .vecs = md4_tv_template,
3224 - .count = MD4_TEST_VECTORS
3225 - }
3226 + .hash = __VECS(md4_tv_template)
3227 }
3228 }, {
3229 .alg = "md5",
3230 .test = alg_test_hash,
3231 .suite = {
3232 - .hash = {
3233 - .vecs = md5_tv_template,
3234 - .count = MD5_TEST_VECTORS
3235 - }
3236 + .hash = __VECS(md5_tv_template)
3237 }
3238 }, {
3239 .alg = "michael_mic",
3240 .test = alg_test_hash,
3241 .suite = {
3242 - .hash = {
3243 - .vecs = michael_mic_tv_template,
3244 - .count = MICHAEL_MIC_TEST_VECTORS
3245 - }
3246 + .hash = __VECS(michael_mic_tv_template)
3247 }
3248 }, {
3249 .alg = "ofb(aes)",
3250 @@ -3650,14 +3457,8 @@ static const struct alg_test_desc alg_te
3251 .fips_allowed = 1,
3252 .suite = {
3253 .cipher = {
3254 - .enc = {
3255 - .vecs = aes_ofb_enc_tv_template,
3256 - .count = AES_OFB_ENC_TEST_VECTORS
3257 - },
3258 - .dec = {
3259 - .vecs = aes_ofb_dec_tv_template,
3260 - .count = AES_OFB_DEC_TEST_VECTORS
3261 - }
3262 + .enc = __VECS(aes_ofb_enc_tv_template),
3263 + .dec = __VECS(aes_ofb_dec_tv_template)
3264 }
3265 }
3266 }, {
3267 @@ -3665,24 +3466,15 @@ static const struct alg_test_desc alg_te
3268 .test = alg_test_skcipher,
3269 .suite = {
3270 .cipher = {
3271 - .enc = {
3272 - .vecs = fcrypt_pcbc_enc_tv_template,
3273 - .count = FCRYPT_ENC_TEST_VECTORS
3274 - },
3275 - .dec = {
3276 - .vecs = fcrypt_pcbc_dec_tv_template,
3277 - .count = FCRYPT_DEC_TEST_VECTORS
3278 - }
3279 + .enc = __VECS(fcrypt_pcbc_enc_tv_template),
3280 + .dec = __VECS(fcrypt_pcbc_dec_tv_template)
3281 }
3282 }
3283 }, {
3284 .alg = "poly1305",
3285 .test = alg_test_hash,
3286 .suite = {
3287 - .hash = {
3288 - .vecs = poly1305_tv_template,
3289 - .count = POLY1305_TEST_VECTORS
3290 - }
3291 + .hash = __VECS(poly1305_tv_template)
3292 }
3293 }, {
3294 .alg = "rfc3686(ctr(aes))",
3295 @@ -3690,14 +3482,8 @@ static const struct alg_test_desc alg_te
3296 .fips_allowed = 1,
3297 .suite = {
3298 .cipher = {
3299 - .enc = {
3300 - .vecs = aes_ctr_rfc3686_enc_tv_template,
3301 - .count = AES_CTR_3686_ENC_TEST_VECTORS
3302 - },
3303 - .dec = {
3304 - .vecs = aes_ctr_rfc3686_dec_tv_template,
3305 - .count = AES_CTR_3686_DEC_TEST_VECTORS
3306 - }
3307 + .enc = __VECS(aes_ctr_rfc3686_enc_tv_template),
3308 + .dec = __VECS(aes_ctr_rfc3686_dec_tv_template)
3309 }
3310 }
3311 }, {
3312 @@ -3706,14 +3492,8 @@ static const struct alg_test_desc alg_te
3313 .fips_allowed = 1,
3314 .suite = {
3315 .aead = {
3316 - .enc = {
3317 - .vecs = aes_gcm_rfc4106_enc_tv_template,
3318 - .count = AES_GCM_4106_ENC_TEST_VECTORS
3319 - },
3320 - .dec = {
3321 - .vecs = aes_gcm_rfc4106_dec_tv_template,
3322 - .count = AES_GCM_4106_DEC_TEST_VECTORS
3323 - }
3324 + .enc = __VECS(aes_gcm_rfc4106_enc_tv_template),
3325 + .dec = __VECS(aes_gcm_rfc4106_dec_tv_template)
3326 }
3327 }
3328 }, {
3329 @@ -3722,14 +3502,8 @@ static const struct alg_test_desc alg_te
3330 .fips_allowed = 1,
3331 .suite = {
3332 .aead = {
3333 - .enc = {
3334 - .vecs = aes_ccm_rfc4309_enc_tv_template,
3335 - .count = AES_CCM_4309_ENC_TEST_VECTORS
3336 - },
3337 - .dec = {
3338 - .vecs = aes_ccm_rfc4309_dec_tv_template,
3339 - .count = AES_CCM_4309_DEC_TEST_VECTORS
3340 - }
3341 + .enc = __VECS(aes_ccm_rfc4309_enc_tv_template),
3342 + .dec = __VECS(aes_ccm_rfc4309_dec_tv_template)
3343 }
3344 }
3345 }, {
3346 @@ -3737,14 +3511,8 @@ static const struct alg_test_desc alg_te
3347 .test = alg_test_aead,
3348 .suite = {
3349 .aead = {
3350 - .enc = {
3351 - .vecs = aes_gcm_rfc4543_enc_tv_template,
3352 - .count = AES_GCM_4543_ENC_TEST_VECTORS
3353 - },
3354 - .dec = {
3355 - .vecs = aes_gcm_rfc4543_dec_tv_template,
3356 - .count = AES_GCM_4543_DEC_TEST_VECTORS
3357 - },
3358 + .enc = __VECS(aes_gcm_rfc4543_enc_tv_template),
3359 + .dec = __VECS(aes_gcm_rfc4543_dec_tv_template),
3360 }
3361 }
3362 }, {
3363 @@ -3752,14 +3520,8 @@ static const struct alg_test_desc alg_te
3364 .test = alg_test_aead,
3365 .suite = {
3366 .aead = {
3367 - .enc = {
3368 - .vecs = rfc7539_enc_tv_template,
3369 - .count = RFC7539_ENC_TEST_VECTORS
3370 - },
3371 - .dec = {
3372 - .vecs = rfc7539_dec_tv_template,
3373 - .count = RFC7539_DEC_TEST_VECTORS
3374 - },
3375 + .enc = __VECS(rfc7539_enc_tv_template),
3376 + .dec = __VECS(rfc7539_dec_tv_template),
3377 }
3378 }
3379 }, {
3380 @@ -3767,71 +3529,47 @@ static const struct alg_test_desc alg_te
3381 .test = alg_test_aead,
3382 .suite = {
3383 .aead = {
3384 - .enc = {
3385 - .vecs = rfc7539esp_enc_tv_template,
3386 - .count = RFC7539ESP_ENC_TEST_VECTORS
3387 - },
3388 - .dec = {
3389 - .vecs = rfc7539esp_dec_tv_template,
3390 - .count = RFC7539ESP_DEC_TEST_VECTORS
3391 - },
3392 + .enc = __VECS(rfc7539esp_enc_tv_template),
3393 + .dec = __VECS(rfc7539esp_dec_tv_template),
3394 }
3395 }
3396 }, {
3397 .alg = "rmd128",
3398 .test = alg_test_hash,
3399 .suite = {
3400 - .hash = {
3401 - .vecs = rmd128_tv_template,
3402 - .count = RMD128_TEST_VECTORS
3403 - }
3404 + .hash = __VECS(rmd128_tv_template)
3405 }
3406 }, {
3407 .alg = "rmd160",
3408 .test = alg_test_hash,
3409 .suite = {
3410 - .hash = {
3411 - .vecs = rmd160_tv_template,
3412 - .count = RMD160_TEST_VECTORS
3413 - }
3414 + .hash = __VECS(rmd160_tv_template)
3415 }
3416 }, {
3417 .alg = "rmd256",
3418 .test = alg_test_hash,
3419 .suite = {
3420 - .hash = {
3421 - .vecs = rmd256_tv_template,
3422 - .count = RMD256_TEST_VECTORS
3423 - }
3424 + .hash = __VECS(rmd256_tv_template)
3425 }
3426 }, {
3427 .alg = "rmd320",
3428 .test = alg_test_hash,
3429 .suite = {
3430 - .hash = {
3431 - .vecs = rmd320_tv_template,
3432 - .count = RMD320_TEST_VECTORS
3433 - }
3434 + .hash = __VECS(rmd320_tv_template)
3435 }
3436 }, {
3437 .alg = "rsa",
3438 .test = alg_test_akcipher,
3439 .fips_allowed = 1,
3440 .suite = {
3441 - .akcipher = {
3442 - .vecs = rsa_tv_template,
3443 - .count = RSA_TEST_VECTORS
3444 - }
3445 + .akcipher = __VECS(rsa_tv_template)
3446 }
3447 }, {
3448 .alg = "salsa20",
3449 .test = alg_test_skcipher,
3450 .suite = {
3451 .cipher = {
3452 - .enc = {
3453 - .vecs = salsa20_stream_enc_tv_template,
3454 - .count = SALSA20_STREAM_ENC_TEST_VECTORS
3455 - }
3456 + .enc = __VECS(salsa20_stream_enc_tv_template)
3457 }
3458 }
3459 }, {
3460 @@ -3839,162 +3577,120 @@ static const struct alg_test_desc alg_te
3461 .test = alg_test_hash,
3462 .fips_allowed = 1,
3463 .suite = {
3464 - .hash = {
3465 - .vecs = sha1_tv_template,
3466 - .count = SHA1_TEST_VECTORS
3467 - }
3468 + .hash = __VECS(sha1_tv_template)
3469 }
3470 }, {
3471 .alg = "sha224",
3472 .test = alg_test_hash,
3473 .fips_allowed = 1,
3474 .suite = {
3475 - .hash = {
3476 - .vecs = sha224_tv_template,
3477 - .count = SHA224_TEST_VECTORS
3478 - }
3479 + .hash = __VECS(sha224_tv_template)
3480 }
3481 }, {
3482 .alg = "sha256",
3483 .test = alg_test_hash,
3484 .fips_allowed = 1,
3485 .suite = {
3486 - .hash = {
3487 - .vecs = sha256_tv_template,
3488 - .count = SHA256_TEST_VECTORS
3489 - }
3490 + .hash = __VECS(sha256_tv_template)
3491 }
3492 }, {
3493 .alg = "sha3-224",
3494 .test = alg_test_hash,
3495 .fips_allowed = 1,
3496 .suite = {
3497 - .hash = {
3498 - .vecs = sha3_224_tv_template,
3499 - .count = SHA3_224_TEST_VECTORS
3500 - }
3501 + .hash = __VECS(sha3_224_tv_template)
3502 }
3503 }, {
3504 .alg = "sha3-256",
3505 .test = alg_test_hash,
3506 .fips_allowed = 1,
3507 .suite = {
3508 - .hash = {
3509 - .vecs = sha3_256_tv_template,
3510 - .count = SHA3_256_TEST_VECTORS
3511 - }
3512 + .hash = __VECS(sha3_256_tv_template)
3513 }
3514 }, {
3515 .alg = "sha3-384",
3516 .test = alg_test_hash,
3517 .fips_allowed = 1,
3518 .suite = {
3519 - .hash = {
3520 - .vecs = sha3_384_tv_template,
3521 - .count = SHA3_384_TEST_VECTORS
3522 - }
3523 + .hash = __VECS(sha3_384_tv_template)
3524 }
3525 }, {
3526 .alg = "sha3-512",
3527 .test = alg_test_hash,
3528 .fips_allowed = 1,
3529 .suite = {
3530 - .hash = {
3531 - .vecs = sha3_512_tv_template,
3532 - .count = SHA3_512_TEST_VECTORS
3533 - }
3534 + .hash = __VECS(sha3_512_tv_template)
3535 }
3536 }, {
3537 .alg = "sha384",
3538 .test = alg_test_hash,
3539 .fips_allowed = 1,
3540 .suite = {
3541 - .hash = {
3542 - .vecs = sha384_tv_template,
3543 - .count = SHA384_TEST_VECTORS
3544 - }
3545 + .hash = __VECS(sha384_tv_template)
3546 }
3547 }, {
3548 .alg = "sha512",
3549 .test = alg_test_hash,
3550 .fips_allowed = 1,
3551 .suite = {
3552 - .hash = {
3553 - .vecs = sha512_tv_template,
3554 - .count = SHA512_TEST_VECTORS
3555 - }
3556 + .hash = __VECS(sha512_tv_template)
3557 }
3558 }, {
3559 .alg = "tgr128",
3560 .test = alg_test_hash,
3561 .suite = {
3562 - .hash = {
3563 - .vecs = tgr128_tv_template,
3564 - .count = TGR128_TEST_VECTORS
3565 - }
3566 + .hash = __VECS(tgr128_tv_template)
3567 }
3568 }, {
3569 .alg = "tgr160",
3570 .test = alg_test_hash,
3571 .suite = {
3572 - .hash = {
3573 - .vecs = tgr160_tv_template,
3574 - .count = TGR160_TEST_VECTORS
3575 - }
3576 + .hash = __VECS(tgr160_tv_template)
3577 }
3578 }, {
3579 .alg = "tgr192",
3580 .test = alg_test_hash,
3581 .suite = {
3582 - .hash = {
3583 - .vecs = tgr192_tv_template,
3584 - .count = TGR192_TEST_VECTORS
3585 + .hash = __VECS(tgr192_tv_template)
3586 + }
3587 + }, {
3588 + .alg = "tls10(hmac(sha1),cbc(aes))",
3589 + .test = alg_test_tls,
3590 + .suite = {
3591 + .tls = {
3592 + .enc = __VECS(tls_enc_tv_template),
3593 + .dec = __VECS(tls_dec_tv_template)
3594 }
3595 }
3596 }, {
3597 .alg = "vmac(aes)",
3598 .test = alg_test_hash,
3599 .suite = {
3600 - .hash = {
3601 - .vecs = aes_vmac128_tv_template,
3602 - .count = VMAC_AES_TEST_VECTORS
3603 - }
3604 + .hash = __VECS(aes_vmac128_tv_template)
3605 }
3606 }, {
3607 .alg = "wp256",
3608 .test = alg_test_hash,
3609 .suite = {
3610 - .hash = {
3611 - .vecs = wp256_tv_template,
3612 - .count = WP256_TEST_VECTORS
3613 - }
3614 + .hash = __VECS(wp256_tv_template)
3615 }
3616 }, {
3617 .alg = "wp384",
3618 .test = alg_test_hash,
3619 .suite = {
3620 - .hash = {
3621 - .vecs = wp384_tv_template,
3622 - .count = WP384_TEST_VECTORS
3623 - }
3624 + .hash = __VECS(wp384_tv_template)
3625 }
3626 }, {
3627 .alg = "wp512",
3628 .test = alg_test_hash,
3629 .suite = {
3630 - .hash = {
3631 - .vecs = wp512_tv_template,
3632 - .count = WP512_TEST_VECTORS
3633 - }
3634 + .hash = __VECS(wp512_tv_template)
3635 }
3636 }, {
3637 .alg = "xcbc(aes)",
3638 .test = alg_test_hash,
3639 .suite = {
3640 - .hash = {
3641 - .vecs = aes_xcbc128_tv_template,
3642 - .count = XCBC_AES_TEST_VECTORS
3643 - }
3644 + .hash = __VECS(aes_xcbc128_tv_template)
3645 }
3646 }, {
3647 .alg = "xts(aes)",
3648 @@ -4002,14 +3698,8 @@ static const struct alg_test_desc alg_te
3649 .fips_allowed = 1,
3650 .suite = {
3651 .cipher = {
3652 - .enc = {
3653 - .vecs = aes_xts_enc_tv_template,
3654 - .count = AES_XTS_ENC_TEST_VECTORS
3655 - },
3656 - .dec = {
3657 - .vecs = aes_xts_dec_tv_template,
3658 - .count = AES_XTS_DEC_TEST_VECTORS
3659 - }
3660 + .enc = __VECS(aes_xts_enc_tv_template),
3661 + .dec = __VECS(aes_xts_dec_tv_template)
3662 }
3663 }
3664 }, {
3665 @@ -4017,14 +3707,8 @@ static const struct alg_test_desc alg_te
3666 .test = alg_test_skcipher,
3667 .suite = {
3668 .cipher = {
3669 - .enc = {
3670 - .vecs = camellia_xts_enc_tv_template,
3671 - .count = CAMELLIA_XTS_ENC_TEST_VECTORS
3672 - },
3673 - .dec = {
3674 - .vecs = camellia_xts_dec_tv_template,
3675 - .count = CAMELLIA_XTS_DEC_TEST_VECTORS
3676 - }
3677 + .enc = __VECS(camellia_xts_enc_tv_template),
3678 + .dec = __VECS(camellia_xts_dec_tv_template)
3679 }
3680 }
3681 }, {
3682 @@ -4032,14 +3716,8 @@ static const struct alg_test_desc alg_te
3683 .test = alg_test_skcipher,
3684 .suite = {
3685 .cipher = {
3686 - .enc = {
3687 - .vecs = cast6_xts_enc_tv_template,
3688 - .count = CAST6_XTS_ENC_TEST_VECTORS
3689 - },
3690 - .dec = {
3691 - .vecs = cast6_xts_dec_tv_template,
3692 - .count = CAST6_XTS_DEC_TEST_VECTORS
3693 - }
3694 + .enc = __VECS(cast6_xts_enc_tv_template),
3695 + .dec = __VECS(cast6_xts_dec_tv_template)
3696 }
3697 }
3698 }, {
3699 @@ -4047,14 +3725,8 @@ static const struct alg_test_desc alg_te
3700 .test = alg_test_skcipher,
3701 .suite = {
3702 .cipher = {
3703 - .enc = {
3704 - .vecs = serpent_xts_enc_tv_template,
3705 - .count = SERPENT_XTS_ENC_TEST_VECTORS
3706 - },
3707 - .dec = {
3708 - .vecs = serpent_xts_dec_tv_template,
3709 - .count = SERPENT_XTS_DEC_TEST_VECTORS
3710 - }
3711 + .enc = __VECS(serpent_xts_enc_tv_template),
3712 + .dec = __VECS(serpent_xts_dec_tv_template)
3713 }
3714 }
3715 }, {
3716 @@ -4062,14 +3734,8 @@ static const struct alg_test_desc alg_te
3717 .test = alg_test_skcipher,
3718 .suite = {
3719 .cipher = {
3720 - .enc = {
3721 - .vecs = tf_xts_enc_tv_template,
3722 - .count = TF_XTS_ENC_TEST_VECTORS
3723 - },
3724 - .dec = {
3725 - .vecs = tf_xts_dec_tv_template,
3726 - .count = TF_XTS_DEC_TEST_VECTORS
3727 - }
3728 + .enc = __VECS(tf_xts_enc_tv_template),
3729 + .dec = __VECS(tf_xts_dec_tv_template)
3730 }
3731 }
3732 }
3733 --- a/crypto/testmgr.h
3734 +++ b/crypto/testmgr.h
3735 @@ -34,9 +34,9 @@
3736
3737 struct hash_testvec {
3738 /* only used with keyed hash algorithms */
3739 - char *key;
3740 - char *plaintext;
3741 - char *digest;
3742 + const char *key;
3743 + const char *plaintext;
3744 + const char *digest;
3745 unsigned char tap[MAX_TAP];
3746 unsigned short psize;
3747 unsigned char np;
3748 @@ -63,11 +63,11 @@ struct hash_testvec {
3749 */
3750
3751 struct cipher_testvec {
3752 - char *key;
3753 - char *iv;
3754 - char *iv_out;
3755 - char *input;
3756 - char *result;
3757 + const char *key;
3758 + const char *iv;
3759 + const char *iv_out;
3760 + const char *input;
3761 + const char *result;
3762 unsigned short tap[MAX_TAP];
3763 int np;
3764 unsigned char also_non_np;
3765 @@ -80,11 +80,11 @@ struct cipher_testvec {
3766 };
3767
3768 struct aead_testvec {
3769 - char *key;
3770 - char *iv;
3771 - char *input;
3772 - char *assoc;
3773 - char *result;
3774 + const char *key;
3775 + const char *iv;
3776 + const char *input;
3777 + const char *assoc;
3778 + const char *result;
3779 unsigned char tap[MAX_TAP];
3780 unsigned char atap[MAX_TAP];
3781 int np;
3782 @@ -99,10 +99,10 @@ struct aead_testvec {
3783 };
3784
3785 struct cprng_testvec {
3786 - char *key;
3787 - char *dt;
3788 - char *v;
3789 - char *result;
3790 + const char *key;
3791 + const char *dt;
3792 + const char *v;
3793 + const char *result;
3794 unsigned char klen;
3795 unsigned short dtlen;
3796 unsigned short vlen;
3797 @@ -111,24 +111,38 @@ struct cprng_testvec {
3798 };
3799
3800 struct drbg_testvec {
3801 - unsigned char *entropy;
3802 + const unsigned char *entropy;
3803 size_t entropylen;
3804 - unsigned char *entpra;
3805 - unsigned char *entprb;
3806 + const unsigned char *entpra;
3807 + const unsigned char *entprb;
3808 size_t entprlen;
3809 - unsigned char *addtla;
3810 - unsigned char *addtlb;
3811 + const unsigned char *addtla;
3812 + const unsigned char *addtlb;
3813 size_t addtllen;
3814 - unsigned char *pers;
3815 + const unsigned char *pers;
3816 size_t perslen;
3817 - unsigned char *expected;
3818 + const unsigned char *expected;
3819 size_t expectedlen;
3820 };
3821
3822 +struct tls_testvec {
3823 + char *key; /* wrapped keys for encryption and authentication */
3824 + char *iv; /* initialization vector */
3825 + char *input; /* input data */
3826 + char *assoc; /* associated data: seq num, type, version, input len */
3827 + char *result; /* result data */
3828 + unsigned char fail; /* the test failure is expected */
3829 + unsigned char novrfy; /* dec verification failure expected */
3830 + unsigned char klen; /* key length */
3831 + unsigned short ilen; /* input data length */
3832 + unsigned short alen; /* associated data length */
3833 + unsigned short rlen; /* result length */
3834 +};
3835 +
3836 struct akcipher_testvec {
3837 - unsigned char *key;
3838 - unsigned char *m;
3839 - unsigned char *c;
3840 + const unsigned char *key;
3841 + const unsigned char *m;
3842 + const unsigned char *c;
3843 unsigned int key_len;
3844 unsigned int m_size;
3845 unsigned int c_size;
3846 @@ -136,27 +150,227 @@ struct akcipher_testvec {
3847 };
3848
3849 struct kpp_testvec {
3850 - unsigned char *secret;
3851 - unsigned char *b_public;
3852 - unsigned char *expected_a_public;
3853 - unsigned char *expected_ss;
3854 + const unsigned char *secret;
3855 + const unsigned char *b_public;
3856 + const unsigned char *expected_a_public;
3857 + const unsigned char *expected_ss;
3858 unsigned short secret_size;
3859 unsigned short b_public_size;
3860 unsigned short expected_a_public_size;
3861 unsigned short expected_ss_size;
3862 };
3863
3864 -static char zeroed_string[48];
3865 +static const char zeroed_string[48];
3866
3867 /*
3868 - * RSA test vectors. Borrowed from openSSL.
3869 + * TLS1.0 synthetic test vectors
3870 */
3871 -#ifdef CONFIG_CRYPTO_FIPS
3872 -#define RSA_TEST_VECTORS 2
3873 +static struct tls_testvec tls_enc_tv_template[] = {
3874 + {
3875 +#ifdef __LITTLE_ENDIAN
3876 + .key = "\x08\x00" /* rta length */
3877 + "\x01\x00" /* rta type */
3878 +#else
3879 + .key = "\x00\x08" /* rta length */
3880 + "\x00\x01" /* rta type */
3881 +#endif
3882 + "\x00\x00\x00\x10" /* enc key length */
3883 + "authenticationkey20benckeyis16_bytes",
3884 + .klen = 8 + 20 + 16,
3885 + .iv = "iv0123456789abcd",
3886 + .input = "Single block msg",
3887 + .ilen = 16,
3888 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3889 + "\x00\x03\x01\x00\x10",
3890 + .alen = 13,
3891 + .result = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3892 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3893 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3894 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3895 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3896 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3897 + .rlen = 16 + 20 + 12,
3898 + }, {
3899 +#ifdef __LITTLE_ENDIAN
3900 + .key = "\x08\x00" /* rta length */
3901 + "\x01\x00" /* rta type */
3902 +#else
3903 + .key = "\x00\x08" /* rta length */
3904 + "\x00\x01" /* rta type */
3905 +#endif
3906 + "\x00\x00\x00\x10" /* enc key length */
3907 + "authenticationkey20benckeyis16_bytes",
3908 + .klen = 8 + 20 + 16,
3909 + .iv = "iv0123456789abcd",
3910 + .input = "",
3911 + .ilen = 0,
3912 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3913 + "\x00\x03\x01\x00\x00",
3914 + .alen = 13,
3915 + .result = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
3916 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
3917 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
3918 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
3919 + .rlen = 20 + 12,
3920 + }, {
3921 +#ifdef __LITTLE_ENDIAN
3922 + .key = "\x08\x00" /* rta length */
3923 + "\x01\x00" /* rta type */
3924 +#else
3925 + .key = "\x00\x08" /* rta length */
3926 + "\x00\x01" /* rta type */
3927 +#endif
3928 + "\x00\x00\x00\x10" /* enc key length */
3929 + "authenticationkey20benckeyis16_bytes",
3930 + .klen = 8 + 20 + 16,
3931 + .iv = "iv0123456789abcd",
3932 + .input = "285 bytes plaintext285 bytes plaintext285 bytes"
3933 + " plaintext285 bytes plaintext285 bytes plaintext285"
3934 + " bytes plaintext285 bytes plaintext285 bytes"
3935 + " plaintext285 bytes plaintext285 bytes plaintext285"
3936 + " bytes plaintext285 bytes plaintext285 bytes"
3937 + " plaintext285 bytes plaintext285 bytes plaintext285"
3938 + " bytes plaintext285 bytes plaintext",
3939 + .ilen = 285,
3940 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3941 + "\x00\x03\x01\x01\x1d",
3942 + .alen = 13,
3943 + .result = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
3944 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
3945 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
3946 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
3947 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
3948 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
3949 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
3950 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
3951 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
3952 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
3953 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
3954 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
3955 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
3956 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
3957 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
3958 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
3959 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
3960 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
3961 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
3962 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
3963 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
3964 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
3965 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
3966 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
3967 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
3968 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
3969 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
3970 + .rlen = 285 + 20 + 15,
3971 + }
3972 +};
3973 +
3974 +static struct tls_testvec tls_dec_tv_template[] = {
3975 + {
3976 +#ifdef __LITTLE_ENDIAN
3977 + .key = "\x08\x00" /* rta length */
3978 + "\x01\x00" /* rta type */
3979 +#else
3980 + .key = "\x00\x08" /* rta length */
3981 + "\x00\x01" /* rta type */
3982 +#endif
3983 + "\x00\x00\x00\x10" /* enc key length */
3984 + "authenticationkey20benckeyis16_bytes",
3985 + .klen = 8 + 20 + 16,
3986 + .iv = "iv0123456789abcd",
3987 + .input = "\xd5\xac\xb\xd2\xac\xad\x3f\xb1"
3988 + "\x59\x79\x1e\x91\x5f\x52\x14\x9c"
3989 + "\xc0\x75\xd8\x4c\x97\x0f\x07\x73"
3990 + "\xdc\x89\x47\x49\x49\xcb\x30\x6b"
3991 + "\x1b\x45\x23\xa1\xd0\x51\xcf\x02"
3992 + "\x2e\xa8\x5d\xa0\xfe\xca\x82\x61",
3993 + .ilen = 16 + 20 + 12,
3994 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
3995 + "\x00\x03\x01\x00\x30",
3996 + .alen = 13,
3997 + .result = "Single block msg",
3998 + .rlen = 16,
3999 + }, {
4000 +#ifdef __LITTLE_ENDIAN
4001 + .key = "\x08\x00" /* rta length */
4002 + "\x01\x00" /* rta type */
4003 #else
4004 -#define RSA_TEST_VECTORS 5
4005 + .key = "\x00\x08" /* rta length */
4006 + "\x00\x01" /* rta type */
4007 #endif
4008 -static struct akcipher_testvec rsa_tv_template[] = {
4009 + "\x00\x00\x00\x10" /* enc key length */
4010 + "authenticationkey20benckeyis16_bytes",
4011 + .klen = 8 + 20 + 16,
4012 + .iv = "iv0123456789abcd",
4013 + .input = "\x58\x2a\x11\xc\x86\x8e\x4b\x67"
4014 + "\x2d\x16\x26\x1a\xac\x4b\xe2\x1a"
4015 + "\xe9\x6a\xcc\x4d\x6f\x79\x8a\x45"
4016 + "\x1f\x4e\x27\xf2\xa7\x59\xb4\x5a",
4017 + .ilen = 20 + 12,
4018 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4019 + "\x00\x03\x01\x00\x20",
4020 + .alen = 13,
4021 + .result = "",
4022 + .rlen = 0,
4023 + }, {
4024 +#ifdef __LITTLE_ENDIAN
4025 + .key = "\x08\x00" /* rta length */
4026 + "\x01\x00" /* rta type */
4027 +#else
4028 + .key = "\x00\x08" /* rta length */
4029 + "\x00\x01" /* rta type */
4030 +#endif
4031 + "\x00\x00\x00\x10" /* enc key length */
4032 + "authenticationkey20benckeyis16_bytes",
4033 + .klen = 8 + 20 + 16,
4034 + .iv = "iv0123456789abcd",
4035 + .input = "\x80\x23\x82\x44\x14\x2a\x1d\x94\xc\xc2\x1d\xd"
4036 + "\x3a\x32\x89\x4c\x57\x30\xa8\x89\x76\x46\xcc\x90"
4037 + "\x1d\x88\xb8\xa6\x1a\x58\xe\x2d\xeb\x2c\xc7\x3a"
4038 + "\x52\x4e\xdb\xb3\x1e\x83\x11\xf5\x3c\xce\x6e\x94"
4039 + "\xd3\x26\x6a\x9a\xd\xbd\xc7\x98\xb9\xb3\x3a\x51"
4040 + "\x1e\x4\x84\x8a\x8f\x54\x9a\x51\x69\x9c\xce\x31"
4041 + "\x8d\x5d\x8b\xee\x5f\x70\xc\xc9\xb8\x50\x54\xf8"
4042 + "\xb2\x4a\x7a\xcd\xeb\x7a\x82\x81\xc6\x41\xc8\x50"
4043 + "\x91\x8d\xc8\xed\xcd\x40\x8f\x55\xd1\xec\xc9\xac"
4044 + "\x15\x18\xf9\x20\xa0\xed\x18\xa1\xe3\x56\xe3\x14"
4045 + "\xe5\xe8\x66\x63\x20\xed\xe4\x62\x9d\xa3\xa4\x1d"
4046 + "\x81\x89\x18\xf2\x36\xae\xc8\x8a\x2b\xbc\xc3\xb8"
4047 + "\x80\xf\x97\x21\x36\x39\x8\x84\x23\x18\x9e\x9c"
4048 + "\x72\x32\x75\x2d\x2e\xf9\x60\xb\xe8\xcc\xd9\x74"
4049 + "\x4\x1b\x8e\x99\xc1\x94\xee\xd0\xac\x4e\xfc\x7e"
4050 + "\xf1\x96\xb3\xe7\x14\xb8\xf2\xc\x25\x97\x82\x6b"
4051 + "\xbd\x0\x65\xab\x5c\xe3\x16\xfb\x68\xef\xea\x9d"
4052 + "\xff\x44\x1d\x2a\x44\xf5\xc8\x56\x77\xb7\xbf\x13"
4053 + "\xc8\x54\xdb\x92\xfe\x16\x4c\xbe\x18\xe9\xb\x8d"
4054 + "\xb\xd4\x43\x58\x43\xaa\xf4\x3\x80\x97\x62\xd5"
4055 + "\xdf\x3c\x28\xaa\xee\x48\x4b\x55\x41\x1b\x31\x2"
4056 + "\xbe\xa0\x1c\xbd\xb7\x22\x2a\xe5\x53\x72\x73\x20"
4057 + "\x44\x4f\xe6\x1\x2b\x34\x33\x11\x7d\xfb\x10\xc1"
4058 + "\x66\x7c\xa6\xf4\x48\x36\x5e\x2\xda\x41\x4b\x3e"
4059 + "\xe7\x80\x17\x17\xce\xf1\x3e\x6a\x8e\x26\xf3\xb7"
4060 + "\x2b\x85\xd\x31\x8d\xba\x6c\x22\xb4\x28\x55\x7e"
4061 + "\x2a\x9e\x26\xf1\x3d\x21\xac\x65",
4062 +
4063 + .ilen = 285 + 20 + 15,
4064 + .assoc = "\x00\x01\x02\x03\x04\x05\x06\x07"
4065 + "\x00\x03\x01\x01\x40",
4066 + .alen = 13,
4067 + .result = "285 bytes plaintext285 bytes plaintext285 bytes"
4068 + " plaintext285 bytes plaintext285 bytes plaintext285"
4069 + " bytes plaintext285 bytes plaintext285 bytes"
4070 + " plaintext285 bytes plaintext285 bytes plaintext285"
4071 + " bytes plaintext285 bytes plaintext285 bytes"
4072 + " plaintext285 bytes plaintext285 bytes plaintext",
4073 + .rlen = 285,
4074 + }
4075 +};
4076 +
4077 +/*
4078 + * RSA test vectors. Borrowed from openSSL.
4079 + */
4080 +static const struct akcipher_testvec rsa_tv_template[] = {
4081 {
4082 #ifndef CONFIG_CRYPTO_FIPS
4083 .key =
4084 @@ -340,6 +554,7 @@ static struct akcipher_testvec rsa_tv_te
4085 .m_size = 8,
4086 .c_size = 256,
4087 .public_key_vec = true,
4088 +#ifndef CONFIG_CRYPTO_FIPS
4089 }, {
4090 .key =
4091 "\x30\x82\x09\x29" /* sequence of 2345 bytes */
4092 @@ -538,12 +753,11 @@ static struct akcipher_testvec rsa_tv_te
4093 .key_len = 2349,
4094 .m_size = 8,
4095 .c_size = 512,
4096 +#endif
4097 }
4098 };
4099
4100 -#define DH_TEST_VECTORS 2
4101 -
4102 -struct kpp_testvec dh_tv_template[] = {
4103 +static const struct kpp_testvec dh_tv_template[] = {
4104 {
4105 .secret =
4106 #ifdef __LITTLE_ENDIAN
4107 @@ -760,12 +974,7 @@ struct kpp_testvec dh_tv_template[] = {
4108 }
4109 };
4110
4111 -#ifdef CONFIG_CRYPTO_FIPS
4112 -#define ECDH_TEST_VECTORS 1
4113 -#else
4114 -#define ECDH_TEST_VECTORS 2
4115 -#endif
4116 -struct kpp_testvec ecdh_tv_template[] = {
4117 +static const struct kpp_testvec ecdh_tv_template[] = {
4118 {
4119 #ifndef CONFIG_CRYPTO_FIPS
4120 .secret =
4121 @@ -856,9 +1065,7 @@ struct kpp_testvec ecdh_tv_template[] =
4122 /*
4123 * MD4 test vectors from RFC1320
4124 */
4125 -#define MD4_TEST_VECTORS 7
4126 -
4127 -static struct hash_testvec md4_tv_template [] = {
4128 +static const struct hash_testvec md4_tv_template[] = {
4129 {
4130 .plaintext = "",
4131 .digest = "\x31\xd6\xcf\xe0\xd1\x6a\xe9\x31"
4132 @@ -899,8 +1106,7 @@ static struct hash_testvec md4_tv_templa
4133 },
4134 };
4135
4136 -#define SHA3_224_TEST_VECTORS 3
4137 -static struct hash_testvec sha3_224_tv_template[] = {
4138 +static const struct hash_testvec sha3_224_tv_template[] = {
4139 {
4140 .plaintext = "",
4141 .digest = "\x6b\x4e\x03\x42\x36\x67\xdb\xb7"
4142 @@ -925,8 +1131,7 @@ static struct hash_testvec sha3_224_tv_t
4143 },
4144 };
4145
4146 -#define SHA3_256_TEST_VECTORS 3
4147 -static struct hash_testvec sha3_256_tv_template[] = {
4148 +static const struct hash_testvec sha3_256_tv_template[] = {
4149 {
4150 .plaintext = "",
4151 .digest = "\xa7\xff\xc6\xf8\xbf\x1e\xd7\x66"
4152 @@ -952,8 +1157,7 @@ static struct hash_testvec sha3_256_tv_t
4153 };
4154
4155
4156 -#define SHA3_384_TEST_VECTORS 3
4157 -static struct hash_testvec sha3_384_tv_template[] = {
4158 +static const struct hash_testvec sha3_384_tv_template[] = {
4159 {
4160 .plaintext = "",
4161 .digest = "\x0c\x63\xa7\x5b\x84\x5e\x4f\x7d"
4162 @@ -985,8 +1189,7 @@ static struct hash_testvec sha3_384_tv_t
4163 };
4164
4165
4166 -#define SHA3_512_TEST_VECTORS 3
4167 -static struct hash_testvec sha3_512_tv_template[] = {
4168 +static const struct hash_testvec sha3_512_tv_template[] = {
4169 {
4170 .plaintext = "",
4171 .digest = "\xa6\x9f\x73\xcc\xa2\x3a\x9a\xc5"
4172 @@ -1027,9 +1230,7 @@ static struct hash_testvec sha3_512_tv_t
4173 /*
4174 * MD5 test vectors from RFC1321
4175 */
4176 -#define MD5_TEST_VECTORS 7
4177 -
4178 -static struct hash_testvec md5_tv_template[] = {
4179 +static const struct hash_testvec md5_tv_template[] = {
4180 {
4181 .digest = "\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04"
4182 "\xe9\x80\x09\x98\xec\xf8\x42\x7e",
4183 @@ -1073,9 +1274,7 @@ static struct hash_testvec md5_tv_templa
4184 /*
4185 * RIPEMD-128 test vectors from ISO/IEC 10118-3:2004(E)
4186 */
4187 -#define RMD128_TEST_VECTORS 10
4188 -
4189 -static struct hash_testvec rmd128_tv_template[] = {
4190 +static const struct hash_testvec rmd128_tv_template[] = {
4191 {
4192 .digest = "\xcd\xf2\x62\x13\xa1\x50\xdc\x3e"
4193 "\xcb\x61\x0f\x18\xf6\xb3\x8b\x46",
4194 @@ -1137,9 +1336,7 @@ static struct hash_testvec rmd128_tv_tem
4195 /*
4196 * RIPEMD-160 test vectors from ISO/IEC 10118-3:2004(E)
4197 */
4198 -#define RMD160_TEST_VECTORS 10
4199 -
4200 -static struct hash_testvec rmd160_tv_template[] = {
4201 +static const struct hash_testvec rmd160_tv_template[] = {
4202 {
4203 .digest = "\x9c\x11\x85\xa5\xc5\xe9\xfc\x54\x61\x28"
4204 "\x08\x97\x7e\xe8\xf5\x48\xb2\x25\x8d\x31",
4205 @@ -1201,9 +1398,7 @@ static struct hash_testvec rmd160_tv_tem
4206 /*
4207 * RIPEMD-256 test vectors
4208 */
4209 -#define RMD256_TEST_VECTORS 8
4210 -
4211 -static struct hash_testvec rmd256_tv_template[] = {
4212 +static const struct hash_testvec rmd256_tv_template[] = {
4213 {
4214 .digest = "\x02\xba\x4c\x4e\x5f\x8e\xcd\x18"
4215 "\x77\xfc\x52\xd6\x4d\x30\xe3\x7a"
4216 @@ -1269,9 +1464,7 @@ static struct hash_testvec rmd256_tv_tem
4217 /*
4218 * RIPEMD-320 test vectors
4219 */
4220 -#define RMD320_TEST_VECTORS 8
4221 -
4222 -static struct hash_testvec rmd320_tv_template[] = {
4223 +static const struct hash_testvec rmd320_tv_template[] = {
4224 {
4225 .digest = "\x22\xd6\x5d\x56\x61\x53\x6c\xdc\x75\xc1"
4226 "\xfd\xf5\xc6\xde\x7b\x41\xb9\xf2\x73\x25"
4227 @@ -1334,36 +1527,49 @@ static struct hash_testvec rmd320_tv_tem
4228 }
4229 };
4230
4231 -#define CRCT10DIF_TEST_VECTORS 3
4232 -static struct hash_testvec crct10dif_tv_template[] = {
4233 +static const struct hash_testvec crct10dif_tv_template[] = {
4234 {
4235 - .plaintext = "abc",
4236 - .psize = 3,
4237 -#ifdef __LITTLE_ENDIAN
4238 - .digest = "\x3b\x44",
4239 -#else
4240 - .digest = "\x44\x3b",
4241 -#endif
4242 - }, {
4243 - .plaintext = "1234567890123456789012345678901234567890"
4244 - "123456789012345678901234567890123456789",
4245 - .psize = 79,
4246 -#ifdef __LITTLE_ENDIAN
4247 - .digest = "\x70\x4b",
4248 -#else
4249 - .digest = "\x4b\x70",
4250 -#endif
4251 - }, {
4252 - .plaintext =
4253 - "abcddddddddddddddddddddddddddddddddddddddddddddddddddddd",
4254 - .psize = 56,
4255 -#ifdef __LITTLE_ENDIAN
4256 - .digest = "\xe3\x9c",
4257 -#else
4258 - .digest = "\x9c\xe3",
4259 -#endif
4260 - .np = 2,
4261 - .tap = { 28, 28 }
4262 + .plaintext = "abc",
4263 + .psize = 3,
4264 + .digest = (u8 *)(u16 []){ 0x443b },
4265 + }, {
4266 + .plaintext = "1234567890123456789012345678901234567890"
4267 + "123456789012345678901234567890123456789",
4268 + .psize = 79,
4269 + .digest = (u8 *)(u16 []){ 0x4b70 },
4270 + .np = 2,
4271 + .tap = { 63, 16 },
4272 + }, {
4273 + .plaintext = "abcdddddddddddddddddddddddddddddddddddddddd"
4274 + "ddddddddddddd",
4275 + .psize = 56,
4276 + .digest = (u8 *)(u16 []){ 0x9ce3 },
4277 + .np = 8,
4278 + .tap = { 1, 2, 28, 7, 6, 5, 4, 3 },
4279 + }, {
4280 + .plaintext = "1234567890123456789012345678901234567890"
4281 + "1234567890123456789012345678901234567890"
4282 + "1234567890123456789012345678901234567890"
4283 + "1234567890123456789012345678901234567890"
4284 + "1234567890123456789012345678901234567890"
4285 + "1234567890123456789012345678901234567890"
4286 + "1234567890123456789012345678901234567890"
4287 + "123456789012345678901234567890123456789",
4288 + .psize = 319,
4289 + .digest = (u8 *)(u16 []){ 0x44c6 },
4290 + }, {
4291 + .plaintext = "1234567890123456789012345678901234567890"
4292 + "1234567890123456789012345678901234567890"
4293 + "1234567890123456789012345678901234567890"
4294 + "1234567890123456789012345678901234567890"
4295 + "1234567890123456789012345678901234567890"
4296 + "1234567890123456789012345678901234567890"
4297 + "1234567890123456789012345678901234567890"
4298 + "123456789012345678901234567890123456789",
4299 + .psize = 319,
4300 + .digest = (u8 *)(u16 []){ 0x44c6 },
4301 + .np = 4,
4302 + .tap = { 1, 255, 57, 6 },
4303 }
4304 };
4305
4306 @@ -1371,9 +1577,7 @@ static struct hash_testvec crct10dif_tv_
4307 * SHA1 test vectors from from FIPS PUB 180-1
4308 * Long vector from CAVS 5.0
4309 */
4310 -#define SHA1_TEST_VECTORS 6
4311 -
4312 -static struct hash_testvec sha1_tv_template[] = {
4313 +static const struct hash_testvec sha1_tv_template[] = {
4314 {
4315 .plaintext = "",
4316 .psize = 0,
4317 @@ -1563,9 +1767,7 @@ static struct hash_testvec sha1_tv_templ
4318 /*
4319 * SHA224 test vectors from from FIPS PUB 180-2
4320 */
4321 -#define SHA224_TEST_VECTORS 5
4322 -
4323 -static struct hash_testvec sha224_tv_template[] = {
4324 +static const struct hash_testvec sha224_tv_template[] = {
4325 {
4326 .plaintext = "",
4327 .psize = 0,
4328 @@ -1737,9 +1939,7 @@ static struct hash_testvec sha224_tv_tem
4329 /*
4330 * SHA256 test vectors from from NIST
4331 */
4332 -#define SHA256_TEST_VECTORS 5
4333 -
4334 -static struct hash_testvec sha256_tv_template[] = {
4335 +static const struct hash_testvec sha256_tv_template[] = {
4336 {
4337 .plaintext = "",
4338 .psize = 0,
4339 @@ -1910,9 +2110,7 @@ static struct hash_testvec sha256_tv_tem
4340 /*
4341 * SHA384 test vectors from from NIST and kerneli
4342 */
4343 -#define SHA384_TEST_VECTORS 6
4344 -
4345 -static struct hash_testvec sha384_tv_template[] = {
4346 +static const struct hash_testvec sha384_tv_template[] = {
4347 {
4348 .plaintext = "",
4349 .psize = 0,
4350 @@ -2104,9 +2302,7 @@ static struct hash_testvec sha384_tv_tem
4351 /*
4352 * SHA512 test vectors from from NIST and kerneli
4353 */
4354 -#define SHA512_TEST_VECTORS 6
4355 -
4356 -static struct hash_testvec sha512_tv_template[] = {
4357 +static const struct hash_testvec sha512_tv_template[] = {
4358 {
4359 .plaintext = "",
4360 .psize = 0,
4361 @@ -2313,9 +2509,7 @@ static struct hash_testvec sha512_tv_tem
4362 * by Vincent Rijmen and Paulo S. L. M. Barreto as part of the NESSIE
4363 * submission
4364 */
4365 -#define WP512_TEST_VECTORS 8
4366 -
4367 -static struct hash_testvec wp512_tv_template[] = {
4368 +static const struct hash_testvec wp512_tv_template[] = {
4369 {
4370 .plaintext = "",
4371 .psize = 0,
4372 @@ -2411,9 +2605,7 @@ static struct hash_testvec wp512_tv_temp
4373 },
4374 };
4375
4376 -#define WP384_TEST_VECTORS 8
4377 -
4378 -static struct hash_testvec wp384_tv_template[] = {
4379 +static const struct hash_testvec wp384_tv_template[] = {
4380 {
4381 .plaintext = "",
4382 .psize = 0,
4383 @@ -2493,9 +2685,7 @@ static struct hash_testvec wp384_tv_temp
4384 },
4385 };
4386
4387 -#define WP256_TEST_VECTORS 8
4388 -
4389 -static struct hash_testvec wp256_tv_template[] = {
4390 +static const struct hash_testvec wp256_tv_template[] = {
4391 {
4392 .plaintext = "",
4393 .psize = 0,
4394 @@ -2562,9 +2752,7 @@ static struct hash_testvec wp256_tv_temp
4395 /*
4396 * TIGER test vectors from Tiger website
4397 */
4398 -#define TGR192_TEST_VECTORS 6
4399 -
4400 -static struct hash_testvec tgr192_tv_template[] = {
4401 +static const struct hash_testvec tgr192_tv_template[] = {
4402 {
4403 .plaintext = "",
4404 .psize = 0,
4405 @@ -2607,9 +2795,7 @@ static struct hash_testvec tgr192_tv_tem
4406 },
4407 };
4408
4409 -#define TGR160_TEST_VECTORS 6
4410 -
4411 -static struct hash_testvec tgr160_tv_template[] = {
4412 +static const struct hash_testvec tgr160_tv_template[] = {
4413 {
4414 .plaintext = "",
4415 .psize = 0,
4416 @@ -2652,9 +2838,7 @@ static struct hash_testvec tgr160_tv_tem
4417 },
4418 };
4419
4420 -#define TGR128_TEST_VECTORS 6
4421 -
4422 -static struct hash_testvec tgr128_tv_template[] = {
4423 +static const struct hash_testvec tgr128_tv_template[] = {
4424 {
4425 .plaintext = "",
4426 .psize = 0,
4427 @@ -2691,9 +2875,7 @@ static struct hash_testvec tgr128_tv_tem
4428 },
4429 };
4430
4431 -#define GHASH_TEST_VECTORS 6
4432 -
4433 -static struct hash_testvec ghash_tv_template[] =
4434 +static const struct hash_testvec ghash_tv_template[] =
4435 {
4436 {
4437 .key = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
4438 @@ -2808,9 +2990,7 @@ static struct hash_testvec ghash_tv_temp
4439 * HMAC-MD5 test vectors from RFC2202
4440 * (These need to be fixed to not use strlen).
4441 */
4442 -#define HMAC_MD5_TEST_VECTORS 7
4443 -
4444 -static struct hash_testvec hmac_md5_tv_template[] =
4445 +static const struct hash_testvec hmac_md5_tv_template[] =
4446 {
4447 {
4448 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4449 @@ -2890,9 +3070,7 @@ static struct hash_testvec hmac_md5_tv_t
4450 /*
4451 * HMAC-RIPEMD128 test vectors from RFC2286
4452 */
4453 -#define HMAC_RMD128_TEST_VECTORS 7
4454 -
4455 -static struct hash_testvec hmac_rmd128_tv_template[] = {
4456 +static const struct hash_testvec hmac_rmd128_tv_template[] = {
4457 {
4458 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4459 .ksize = 16,
4460 @@ -2971,9 +3149,7 @@ static struct hash_testvec hmac_rmd128_t
4461 /*
4462 * HMAC-RIPEMD160 test vectors from RFC2286
4463 */
4464 -#define HMAC_RMD160_TEST_VECTORS 7
4465 -
4466 -static struct hash_testvec hmac_rmd160_tv_template[] = {
4467 +static const struct hash_testvec hmac_rmd160_tv_template[] = {
4468 {
4469 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4470 .ksize = 20,
4471 @@ -3052,9 +3228,7 @@ static struct hash_testvec hmac_rmd160_t
4472 /*
4473 * HMAC-SHA1 test vectors from RFC2202
4474 */
4475 -#define HMAC_SHA1_TEST_VECTORS 7
4476 -
4477 -static struct hash_testvec hmac_sha1_tv_template[] = {
4478 +static const struct hash_testvec hmac_sha1_tv_template[] = {
4479 {
4480 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
4481 .ksize = 20,
4482 @@ -3135,9 +3309,7 @@ static struct hash_testvec hmac_sha1_tv_
4483 /*
4484 * SHA224 HMAC test vectors from RFC4231
4485 */
4486 -#define HMAC_SHA224_TEST_VECTORS 4
4487 -
4488 -static struct hash_testvec hmac_sha224_tv_template[] = {
4489 +static const struct hash_testvec hmac_sha224_tv_template[] = {
4490 {
4491 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4492 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4493 @@ -3250,9 +3422,7 @@ static struct hash_testvec hmac_sha224_t
4494 * HMAC-SHA256 test vectors from
4495 * draft-ietf-ipsec-ciph-sha-256-01.txt
4496 */
4497 -#define HMAC_SHA256_TEST_VECTORS 10
4498 -
4499 -static struct hash_testvec hmac_sha256_tv_template[] = {
4500 +static const struct hash_testvec hmac_sha256_tv_template[] = {
4501 {
4502 .key = "\x01\x02\x03\x04\x05\x06\x07\x08"
4503 "\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10"
4504 @@ -3387,9 +3557,7 @@ static struct hash_testvec hmac_sha256_t
4505 },
4506 };
4507
4508 -#define CMAC_AES_TEST_VECTORS 6
4509 -
4510 -static struct hash_testvec aes_cmac128_tv_template[] = {
4511 +static const struct hash_testvec aes_cmac128_tv_template[] = {
4512 { /* From NIST Special Publication 800-38B, AES-128 */
4513 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4514 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4515 @@ -3464,9 +3632,67 @@ static struct hash_testvec aes_cmac128_t
4516 }
4517 };
4518
4519 -#define CMAC_DES3_EDE_TEST_VECTORS 4
4520 +static const struct hash_testvec aes_cbcmac_tv_template[] = {
4521 + {
4522 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4523 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4524 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4525 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a",
4526 + .digest = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60"
4527 + "\xa8\x9e\xca\xf3\x24\x66\xef\x97",
4528 + .psize = 16,
4529 + .ksize = 16,
4530 + }, {
4531 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4532 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4533 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4534 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4535 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4536 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4537 + "\x30",
4538 + .digest = "\x9d\x0d\xd0\x63\xfb\xcb\x24\x43"
4539 + "\xf8\xf2\x76\x03\xac\x39\xb0\x9d",
4540 + .psize = 33,
4541 + .ksize = 16,
4542 + .np = 2,
4543 + .tap = { 7, 26 },
4544 + }, {
4545 + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
4546 + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
4547 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4548 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4549 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4550 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4551 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4552 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4553 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4554 + "\xad\x2b\x41\x7b\xe6\x6c\x37",
4555 + .digest = "\xc0\x71\x73\xb8\xa0\x2c\x11\x7c"
4556 + "\xaf\xdc\xb2\xf8\x89\x32\xa3\x3a",
4557 + .psize = 63,
4558 + .ksize = 16,
4559 + }, {
4560 + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
4561 + "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
4562 + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
4563 + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
4564 + .plaintext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
4565 + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
4566 + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
4567 + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
4568 + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
4569 + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
4570 + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
4571 + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10"
4572 + "\x1c",
4573 + .digest = "\x6a\x4e\xdb\x21\x47\x51\xdf\x4f"
4574 + "\xa8\x4d\x4c\x10\x3b\x72\x7d\xd6",
4575 + .psize = 65,
4576 + .ksize = 32,
4577 + }
4578 +};
4579
4580 -static struct hash_testvec des3_ede_cmac64_tv_template[] = {
4581 +static const struct hash_testvec des3_ede_cmac64_tv_template[] = {
4582 /*
4583 * From NIST Special Publication 800-38B, Three Key TDEA
4584 * Corrected test vectors from:
4585 @@ -3512,9 +3738,7 @@ static struct hash_testvec des3_ede_cmac
4586 }
4587 };
4588
4589 -#define XCBC_AES_TEST_VECTORS 6
4590 -
4591 -static struct hash_testvec aes_xcbc128_tv_template[] = {
4592 +static const struct hash_testvec aes_xcbc128_tv_template[] = {
4593 {
4594 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4595 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4596 @@ -3580,36 +3804,35 @@ static struct hash_testvec aes_xcbc128_t
4597 }
4598 };
4599
4600 -#define VMAC_AES_TEST_VECTORS 11
4601 -static char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4602 - '\x02', '\x03', '\x02', '\x02',
4603 - '\x02', '\x04', '\x01', '\x07',
4604 - '\x04', '\x01', '\x04', '\x03',};
4605 -static char vmac_string2[128] = {'a', 'b', 'c',};
4606 -static char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4607 - 'a', 'b', 'c', 'a', 'b', 'c',
4608 - 'a', 'b', 'c', 'a', 'b', 'c',
4609 - 'a', 'b', 'c', 'a', 'b', 'c',
4610 - 'a', 'b', 'c', 'a', 'b', 'c',
4611 - 'a', 'b', 'c', 'a', 'b', 'c',
4612 - 'a', 'b', 'c', 'a', 'b', 'c',
4613 - 'a', 'b', 'c', 'a', 'b', 'c',
4614 - };
4615 -
4616 -static char vmac_string4[17] = {'b', 'c', 'e', 'f',
4617 - 'i', 'j', 'l', 'm',
4618 - 'o', 'p', 'r', 's',
4619 - 't', 'u', 'w', 'x', 'z'};
4620 -
4621 -static char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4622 - 'o', 'l', 'k', ']', '%',
4623 - '9', '2', '7', '!', 'A'};
4624 -
4625 -static char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4626 - 'i', '!', '#', 'w', '0',
4627 - 'z', '/', '4', 'A', 'n'};
4628 +static const char vmac_string1[128] = {'\x01', '\x01', '\x01', '\x01',
4629 + '\x02', '\x03', '\x02', '\x02',
4630 + '\x02', '\x04', '\x01', '\x07',
4631 + '\x04', '\x01', '\x04', '\x03',};
4632 +static const char vmac_string2[128] = {'a', 'b', 'c',};
4633 +static const char vmac_string3[128] = {'a', 'b', 'c', 'a', 'b', 'c',
4634 + 'a', 'b', 'c', 'a', 'b', 'c',
4635 + 'a', 'b', 'c', 'a', 'b', 'c',
4636 + 'a', 'b', 'c', 'a', 'b', 'c',
4637 + 'a', 'b', 'c', 'a', 'b', 'c',
4638 + 'a', 'b', 'c', 'a', 'b', 'c',
4639 + 'a', 'b', 'c', 'a', 'b', 'c',
4640 + 'a', 'b', 'c', 'a', 'b', 'c',
4641 + };
4642 +
4643 +static const char vmac_string4[17] = {'b', 'c', 'e', 'f',
4644 + 'i', 'j', 'l', 'm',
4645 + 'o', 'p', 'r', 's',
4646 + 't', 'u', 'w', 'x', 'z'};
4647 +
4648 +static const char vmac_string5[127] = {'r', 'm', 'b', 't', 'c',
4649 + 'o', 'l', 'k', ']', '%',
4650 + '9', '2', '7', '!', 'A'};
4651 +
4652 +static const char vmac_string6[129] = {'p', 't', '*', '7', 'l',
4653 + 'i', '!', '#', 'w', '0',
4654 + 'z', '/', '4', 'A', 'n'};
4655
4656 -static struct hash_testvec aes_vmac128_tv_template[] = {
4657 +static const struct hash_testvec aes_vmac128_tv_template[] = {
4658 {
4659 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
4660 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
4661 @@ -3687,9 +3910,7 @@ static struct hash_testvec aes_vmac128_t
4662 * SHA384 HMAC test vectors from RFC4231
4663 */
4664
4665 -#define HMAC_SHA384_TEST_VECTORS 4
4666 -
4667 -static struct hash_testvec hmac_sha384_tv_template[] = {
4668 +static const struct hash_testvec hmac_sha384_tv_template[] = {
4669 {
4670 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4671 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4672 @@ -3787,9 +4008,7 @@ static struct hash_testvec hmac_sha384_t
4673 * SHA512 HMAC test vectors from RFC4231
4674 */
4675
4676 -#define HMAC_SHA512_TEST_VECTORS 4
4677 -
4678 -static struct hash_testvec hmac_sha512_tv_template[] = {
4679 +static const struct hash_testvec hmac_sha512_tv_template[] = {
4680 {
4681 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4682 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4683 @@ -3894,9 +4113,7 @@ static struct hash_testvec hmac_sha512_t
4684 },
4685 };
4686
4687 -#define HMAC_SHA3_224_TEST_VECTORS 4
4688 -
4689 -static struct hash_testvec hmac_sha3_224_tv_template[] = {
4690 +static const struct hash_testvec hmac_sha3_224_tv_template[] = {
4691 {
4692 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4693 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4694 @@ -3985,9 +4202,7 @@ static struct hash_testvec hmac_sha3_224
4695 },
4696 };
4697
4698 -#define HMAC_SHA3_256_TEST_VECTORS 4
4699 -
4700 -static struct hash_testvec hmac_sha3_256_tv_template[] = {
4701 +static const struct hash_testvec hmac_sha3_256_tv_template[] = {
4702 {
4703 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4704 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4705 @@ -4076,9 +4291,7 @@ static struct hash_testvec hmac_sha3_256
4706 },
4707 };
4708
4709 -#define HMAC_SHA3_384_TEST_VECTORS 4
4710 -
4711 -static struct hash_testvec hmac_sha3_384_tv_template[] = {
4712 +static const struct hash_testvec hmac_sha3_384_tv_template[] = {
4713 {
4714 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4715 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4716 @@ -4175,9 +4388,7 @@ static struct hash_testvec hmac_sha3_384
4717 },
4718 };
4719
4720 -#define HMAC_SHA3_512_TEST_VECTORS 4
4721 -
4722 -static struct hash_testvec hmac_sha3_512_tv_template[] = {
4723 +static const struct hash_testvec hmac_sha3_512_tv_template[] = {
4724 {
4725 .key = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4726 "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
4727 @@ -4286,9 +4497,7 @@ static struct hash_testvec hmac_sha3_512
4728 * Poly1305 test vectors from RFC7539 A.3.
4729 */
4730
4731 -#define POLY1305_TEST_VECTORS 11
4732 -
4733 -static struct hash_testvec poly1305_tv_template[] = {
4734 +static const struct hash_testvec poly1305_tv_template[] = {
4735 { /* Test Vector #1 */
4736 .plaintext = "\x00\x00\x00\x00\x00\x00\x00\x00"
4737 "\x00\x00\x00\x00\x00\x00\x00\x00"
4738 @@ -4575,20 +4784,7 @@ static struct hash_testvec poly1305_tv_t
4739 /*
4740 * DES test vectors.
4741 */
4742 -#define DES_ENC_TEST_VECTORS 11
4743 -#define DES_DEC_TEST_VECTORS 5
4744 -#define DES_CBC_ENC_TEST_VECTORS 6
4745 -#define DES_CBC_DEC_TEST_VECTORS 5
4746 -#define DES_CTR_ENC_TEST_VECTORS 2
4747 -#define DES_CTR_DEC_TEST_VECTORS 2
4748 -#define DES3_EDE_ENC_TEST_VECTORS 4
4749 -#define DES3_EDE_DEC_TEST_VECTORS 4
4750 -#define DES3_EDE_CBC_ENC_TEST_VECTORS 2
4751 -#define DES3_EDE_CBC_DEC_TEST_VECTORS 2
4752 -#define DES3_EDE_CTR_ENC_TEST_VECTORS 2
4753 -#define DES3_EDE_CTR_DEC_TEST_VECTORS 2
4754 -
4755 -static struct cipher_testvec des_enc_tv_template[] = {
4756 +static const struct cipher_testvec des_enc_tv_template[] = {
4757 { /* From Applied Cryptography */
4758 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4759 .klen = 8,
4760 @@ -4762,7 +4958,7 @@ static struct cipher_testvec des_enc_tv_
4761 },
4762 };
4763
4764 -static struct cipher_testvec des_dec_tv_template[] = {
4765 +static const struct cipher_testvec des_dec_tv_template[] = {
4766 { /* From Applied Cryptography */
4767 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4768 .klen = 8,
4769 @@ -4872,7 +5068,7 @@ static struct cipher_testvec des_dec_tv_
4770 },
4771 };
4772
4773 -static struct cipher_testvec des_cbc_enc_tv_template[] = {
4774 +static const struct cipher_testvec des_cbc_enc_tv_template[] = {
4775 { /* From OpenSSL */
4776 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4777 .klen = 8,
4778 @@ -4998,7 +5194,7 @@ static struct cipher_testvec des_cbc_enc
4779 },
4780 };
4781
4782 -static struct cipher_testvec des_cbc_dec_tv_template[] = {
4783 +static const struct cipher_testvec des_cbc_dec_tv_template[] = {
4784 { /* FIPS Pub 81 */
4785 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
4786 .klen = 8,
4787 @@ -5107,7 +5303,7 @@ static struct cipher_testvec des_cbc_dec
4788 },
4789 };
4790
4791 -static struct cipher_testvec des_ctr_enc_tv_template[] = {
4792 +static const struct cipher_testvec des_ctr_enc_tv_template[] = {
4793 { /* Generated with Crypto++ */
4794 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4795 .klen = 8,
4796 @@ -5253,7 +5449,7 @@ static struct cipher_testvec des_ctr_enc
4797 },
4798 };
4799
4800 -static struct cipher_testvec des_ctr_dec_tv_template[] = {
4801 +static const struct cipher_testvec des_ctr_dec_tv_template[] = {
4802 { /* Generated with Crypto++ */
4803 .key = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
4804 .klen = 8,
4805 @@ -5399,7 +5595,7 @@ static struct cipher_testvec des_ctr_dec
4806 },
4807 };
4808
4809 -static struct cipher_testvec des3_ede_enc_tv_template[] = {
4810 +static const struct cipher_testvec des3_ede_enc_tv_template[] = {
4811 { /* These are from openssl */
4812 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4813 "\x55\x55\x55\x55\x55\x55\x55\x55"
4814 @@ -5564,7 +5760,7 @@ static struct cipher_testvec des3_ede_en
4815 },
4816 };
4817
4818 -static struct cipher_testvec des3_ede_dec_tv_template[] = {
4819 +static const struct cipher_testvec des3_ede_dec_tv_template[] = {
4820 { /* These are from openssl */
4821 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4822 "\x55\x55\x55\x55\x55\x55\x55\x55"
4823 @@ -5729,7 +5925,7 @@ static struct cipher_testvec des3_ede_de
4824 },
4825 };
4826
4827 -static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4828 +static const struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
4829 { /* Generated from openssl */
4830 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4831 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4832 @@ -5909,7 +6105,7 @@ static struct cipher_testvec des3_ede_cb
4833 },
4834 };
4835
4836 -static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4837 +static const struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
4838 { /* Generated from openssl */
4839 .key = "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24"
4840 "\x44\x4D\x99\x5A\x12\xD6\x40\xC0"
4841 @@ -6089,7 +6285,7 @@ static struct cipher_testvec des3_ede_cb
4842 },
4843 };
4844
4845 -static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4846 +static const struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
4847 { /* Generated with Crypto++ */
4848 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4849 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4850 @@ -6367,7 +6563,7 @@ static struct cipher_testvec des3_ede_ct
4851 },
4852 };
4853
4854 -static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4855 +static const struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
4856 { /* Generated with Crypto++ */
4857 .key = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
4858 "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
4859 @@ -6648,14 +6844,7 @@ static struct cipher_testvec des3_ede_ct
4860 /*
4861 * Blowfish test vectors.
4862 */
4863 -#define BF_ENC_TEST_VECTORS 7
4864 -#define BF_DEC_TEST_VECTORS 7
4865 -#define BF_CBC_ENC_TEST_VECTORS 2
4866 -#define BF_CBC_DEC_TEST_VECTORS 2
4867 -#define BF_CTR_ENC_TEST_VECTORS 2
4868 -#define BF_CTR_DEC_TEST_VECTORS 2
4869 -
4870 -static struct cipher_testvec bf_enc_tv_template[] = {
4871 +static const struct cipher_testvec bf_enc_tv_template[] = {
4872 { /* DES test vectors from OpenSSL */
4873 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4874 .klen = 8,
4875 @@ -6847,7 +7036,7 @@ static struct cipher_testvec bf_enc_tv_t
4876 },
4877 };
4878
4879 -static struct cipher_testvec bf_dec_tv_template[] = {
4880 +static const struct cipher_testvec bf_dec_tv_template[] = {
4881 { /* DES test vectors from OpenSSL */
4882 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
4883 .klen = 8,
4884 @@ -7039,7 +7228,7 @@ static struct cipher_testvec bf_dec_tv_t
4885 },
4886 };
4887
4888 -static struct cipher_testvec bf_cbc_enc_tv_template[] = {
4889 +static const struct cipher_testvec bf_cbc_enc_tv_template[] = {
4890 { /* From OpenSSL */
4891 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4892 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4893 @@ -7196,7 +7385,7 @@ static struct cipher_testvec bf_cbc_enc_
4894 },
4895 };
4896
4897 -static struct cipher_testvec bf_cbc_dec_tv_template[] = {
4898 +static const struct cipher_testvec bf_cbc_dec_tv_template[] = {
4899 { /* From OpenSSL */
4900 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
4901 "\xf0\xe1\xd2\xc3\xb4\xa5\x96\x87",
4902 @@ -7353,7 +7542,7 @@ static struct cipher_testvec bf_cbc_dec_
4903 },
4904 };
4905
4906 -static struct cipher_testvec bf_ctr_enc_tv_template[] = {
4907 +static const struct cipher_testvec bf_ctr_enc_tv_template[] = {
4908 { /* Generated with Crypto++ */
4909 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4910 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4911 @@ -7765,7 +7954,7 @@ static struct cipher_testvec bf_ctr_enc_
4912 },
4913 };
4914
4915 -static struct cipher_testvec bf_ctr_dec_tv_template[] = {
4916 +static const struct cipher_testvec bf_ctr_dec_tv_template[] = {
4917 { /* Generated with Crypto++ */
4918 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4919 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4920 @@ -8180,18 +8369,7 @@ static struct cipher_testvec bf_ctr_dec_
4921 /*
4922 * Twofish test vectors.
4923 */
4924 -#define TF_ENC_TEST_VECTORS 4
4925 -#define TF_DEC_TEST_VECTORS 4
4926 -#define TF_CBC_ENC_TEST_VECTORS 5
4927 -#define TF_CBC_DEC_TEST_VECTORS 5
4928 -#define TF_CTR_ENC_TEST_VECTORS 2
4929 -#define TF_CTR_DEC_TEST_VECTORS 2
4930 -#define TF_LRW_ENC_TEST_VECTORS 8
4931 -#define TF_LRW_DEC_TEST_VECTORS 8
4932 -#define TF_XTS_ENC_TEST_VECTORS 5
4933 -#define TF_XTS_DEC_TEST_VECTORS 5
4934 -
4935 -static struct cipher_testvec tf_enc_tv_template[] = {
4936 +static const struct cipher_testvec tf_enc_tv_template[] = {
4937 {
4938 .key = zeroed_string,
4939 .klen = 16,
4940 @@ -8359,7 +8537,7 @@ static struct cipher_testvec tf_enc_tv_t
4941 },
4942 };
4943
4944 -static struct cipher_testvec tf_dec_tv_template[] = {
4945 +static const struct cipher_testvec tf_dec_tv_template[] = {
4946 {
4947 .key = zeroed_string,
4948 .klen = 16,
4949 @@ -8527,7 +8705,7 @@ static struct cipher_testvec tf_dec_tv_t
4950 },
4951 };
4952
4953 -static struct cipher_testvec tf_cbc_enc_tv_template[] = {
4954 +static const struct cipher_testvec tf_cbc_enc_tv_template[] = {
4955 { /* Generated with Nettle */
4956 .key = zeroed_string,
4957 .klen = 16,
4958 @@ -8710,7 +8888,7 @@ static struct cipher_testvec tf_cbc_enc_
4959 },
4960 };
4961
4962 -static struct cipher_testvec tf_cbc_dec_tv_template[] = {
4963 +static const struct cipher_testvec tf_cbc_dec_tv_template[] = {
4964 { /* Reverse of the first four above */
4965 .key = zeroed_string,
4966 .klen = 16,
4967 @@ -8893,7 +9071,7 @@ static struct cipher_testvec tf_cbc_dec_
4968 },
4969 };
4970
4971 -static struct cipher_testvec tf_ctr_enc_tv_template[] = {
4972 +static const struct cipher_testvec tf_ctr_enc_tv_template[] = {
4973 { /* Generated with Crypto++ */
4974 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4975 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4976 @@ -9304,7 +9482,7 @@ static struct cipher_testvec tf_ctr_enc_
4977 },
4978 };
4979
4980 -static struct cipher_testvec tf_ctr_dec_tv_template[] = {
4981 +static const struct cipher_testvec tf_ctr_dec_tv_template[] = {
4982 { /* Generated with Crypto++ */
4983 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
4984 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
4985 @@ -9715,7 +9893,7 @@ static struct cipher_testvec tf_ctr_dec_
4986 },
4987 };
4988
4989 -static struct cipher_testvec tf_lrw_enc_tv_template[] = {
4990 +static const struct cipher_testvec tf_lrw_enc_tv_template[] = {
4991 /* Generated from AES-LRW test vectors */
4992 {
4993 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
4994 @@ -9967,7 +10145,7 @@ static struct cipher_testvec tf_lrw_enc_
4995 },
4996 };
4997
4998 -static struct cipher_testvec tf_lrw_dec_tv_template[] = {
4999 +static const struct cipher_testvec tf_lrw_dec_tv_template[] = {
5000 /* Generated from AES-LRW test vectors */
5001 /* same as enc vectors with input and result reversed */
5002 {
5003 @@ -10220,7 +10398,7 @@ static struct cipher_testvec tf_lrw_dec_
5004 },
5005 };
5006
5007 -static struct cipher_testvec tf_xts_enc_tv_template[] = {
5008 +static const struct cipher_testvec tf_xts_enc_tv_template[] = {
5009 /* Generated from AES-XTS test vectors */
5010 {
5011 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5012 @@ -10562,7 +10740,7 @@ static struct cipher_testvec tf_xts_enc_
5013 },
5014 };
5015
5016 -static struct cipher_testvec tf_xts_dec_tv_template[] = {
5017 +static const struct cipher_testvec tf_xts_dec_tv_template[] = {
5018 /* Generated from AES-XTS test vectors */
5019 /* same as enc vectors with input and result reversed */
5020 {
5021 @@ -10909,25 +11087,7 @@ static struct cipher_testvec tf_xts_dec_
5022 * Serpent test vectors. These are backwards because Serpent writes
5023 * octet sequences in right-to-left mode.
5024 */
5025 -#define SERPENT_ENC_TEST_VECTORS 5
5026 -#define SERPENT_DEC_TEST_VECTORS 5
5027 -
5028 -#define TNEPRES_ENC_TEST_VECTORS 4
5029 -#define TNEPRES_DEC_TEST_VECTORS 4
5030 -
5031 -#define SERPENT_CBC_ENC_TEST_VECTORS 1
5032 -#define SERPENT_CBC_DEC_TEST_VECTORS 1
5033 -
5034 -#define SERPENT_CTR_ENC_TEST_VECTORS 2
5035 -#define SERPENT_CTR_DEC_TEST_VECTORS 2
5036 -
5037 -#define SERPENT_LRW_ENC_TEST_VECTORS 8
5038 -#define SERPENT_LRW_DEC_TEST_VECTORS 8
5039 -
5040 -#define SERPENT_XTS_ENC_TEST_VECTORS 5
5041 -#define SERPENT_XTS_DEC_TEST_VECTORS 5
5042 -
5043 -static struct cipher_testvec serpent_enc_tv_template[] = {
5044 +static const struct cipher_testvec serpent_enc_tv_template[] = {
5045 {
5046 .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
5047 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5048 @@ -11103,7 +11263,7 @@ static struct cipher_testvec serpent_enc
5049 },
5050 };
5051
5052 -static struct cipher_testvec tnepres_enc_tv_template[] = {
5053 +static const struct cipher_testvec tnepres_enc_tv_template[] = {
5054 { /* KeySize=128, PT=0, I=1 */
5055 .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
5056 "\x00\x00\x00\x00\x00\x00\x00\x00",
5057 @@ -11153,7 +11313,7 @@ static struct cipher_testvec tnepres_enc
5058 };
5059
5060
5061 -static struct cipher_testvec serpent_dec_tv_template[] = {
5062 +static const struct cipher_testvec serpent_dec_tv_template[] = {
5063 {
5064 .input = "\x12\x07\xfc\xce\x9b\xd0\xd6\x47"
5065 "\x6a\xe9\x8f\xbe\xd1\x43\xa0\xe2",
5066 @@ -11329,7 +11489,7 @@ static struct cipher_testvec serpent_dec
5067 },
5068 };
5069
5070 -static struct cipher_testvec tnepres_dec_tv_template[] = {
5071 +static const struct cipher_testvec tnepres_dec_tv_template[] = {
5072 {
5073 .input = "\x41\xcc\x6b\x31\x59\x31\x45\x97"
5074 "\x6d\x6f\xbb\x38\x4b\x37\x21\x28",
5075 @@ -11370,7 +11530,7 @@ static struct cipher_testvec tnepres_dec
5076 },
5077 };
5078
5079 -static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5080 +static const struct cipher_testvec serpent_cbc_enc_tv_template[] = {
5081 { /* Generated with Crypto++ */
5082 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5083 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5084 @@ -11511,7 +11671,7 @@ static struct cipher_testvec serpent_cbc
5085 },
5086 };
5087
5088 -static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5089 +static const struct cipher_testvec serpent_cbc_dec_tv_template[] = {
5090 { /* Generated with Crypto++ */
5091 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5092 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5093 @@ -11652,7 +11812,7 @@ static struct cipher_testvec serpent_cbc
5094 },
5095 };
5096
5097 -static struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5098 +static const struct cipher_testvec serpent_ctr_enc_tv_template[] = {
5099 { /* Generated with Crypto++ */
5100 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5101 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5102 @@ -12063,7 +12223,7 @@ static struct cipher_testvec serpent_ctr
5103 },
5104 };
5105
5106 -static struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5107 +static const struct cipher_testvec serpent_ctr_dec_tv_template[] = {
5108 { /* Generated with Crypto++ */
5109 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5110 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5111 @@ -12474,7 +12634,7 @@ static struct cipher_testvec serpent_ctr
5112 },
5113 };
5114
5115 -static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5116 +static const struct cipher_testvec serpent_lrw_enc_tv_template[] = {
5117 /* Generated from AES-LRW test vectors */
5118 {
5119 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5120 @@ -12726,7 +12886,7 @@ static struct cipher_testvec serpent_lrw
5121 },
5122 };
5123
5124 -static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5125 +static const struct cipher_testvec serpent_lrw_dec_tv_template[] = {
5126 /* Generated from AES-LRW test vectors */
5127 /* same as enc vectors with input and result reversed */
5128 {
5129 @@ -12979,7 +13139,7 @@ static struct cipher_testvec serpent_lrw
5130 },
5131 };
5132
5133 -static struct cipher_testvec serpent_xts_enc_tv_template[] = {
5134 +static const struct cipher_testvec serpent_xts_enc_tv_template[] = {
5135 /* Generated from AES-XTS test vectors */
5136 {
5137 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5138 @@ -13321,7 +13481,7 @@ static struct cipher_testvec serpent_xts
5139 },
5140 };
5141
5142 -static struct cipher_testvec serpent_xts_dec_tv_template[] = {
5143 +static const struct cipher_testvec serpent_xts_dec_tv_template[] = {
5144 /* Generated from AES-XTS test vectors */
5145 /* same as enc vectors with input and result reversed */
5146 {
5147 @@ -13665,18 +13825,7 @@ static struct cipher_testvec serpent_xts
5148 };
5149
5150 /* Cast6 test vectors from RFC 2612 */
5151 -#define CAST6_ENC_TEST_VECTORS 4
5152 -#define CAST6_DEC_TEST_VECTORS 4
5153 -#define CAST6_CBC_ENC_TEST_VECTORS 1
5154 -#define CAST6_CBC_DEC_TEST_VECTORS 1
5155 -#define CAST6_CTR_ENC_TEST_VECTORS 2
5156 -#define CAST6_CTR_DEC_TEST_VECTORS 2
5157 -#define CAST6_LRW_ENC_TEST_VECTORS 1
5158 -#define CAST6_LRW_DEC_TEST_VECTORS 1
5159 -#define CAST6_XTS_ENC_TEST_VECTORS 1
5160 -#define CAST6_XTS_DEC_TEST_VECTORS 1
5161 -
5162 -static struct cipher_testvec cast6_enc_tv_template[] = {
5163 +static const struct cipher_testvec cast6_enc_tv_template[] = {
5164 {
5165 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5166 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5167 @@ -13847,7 +13996,7 @@ static struct cipher_testvec cast6_enc_t
5168 },
5169 };
5170
5171 -static struct cipher_testvec cast6_dec_tv_template[] = {
5172 +static const struct cipher_testvec cast6_dec_tv_template[] = {
5173 {
5174 .key = "\x23\x42\xbb\x9e\xfa\x38\x54\x2c"
5175 "\x0a\xf7\x56\x47\xf2\x9f\x61\x5d",
5176 @@ -14018,7 +14167,7 @@ static struct cipher_testvec cast6_dec_t
5177 },
5178 };
5179
5180 -static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5181 +static const struct cipher_testvec cast6_cbc_enc_tv_template[] = {
5182 { /* Generated from TF test vectors */
5183 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5184 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5185 @@ -14159,7 +14308,7 @@ static struct cipher_testvec cast6_cbc_e
5186 },
5187 };
5188
5189 -static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5190 +static const struct cipher_testvec cast6_cbc_dec_tv_template[] = {
5191 { /* Generated from TF test vectors */
5192 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5193 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5194 @@ -14300,7 +14449,7 @@ static struct cipher_testvec cast6_cbc_d
5195 },
5196 };
5197
5198 -static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5199 +static const struct cipher_testvec cast6_ctr_enc_tv_template[] = {
5200 { /* Generated from TF test vectors */
5201 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5202 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5203 @@ -14457,7 +14606,7 @@ static struct cipher_testvec cast6_ctr_e
5204 },
5205 };
5206
5207 -static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5208 +static const struct cipher_testvec cast6_ctr_dec_tv_template[] = {
5209 { /* Generated from TF test vectors */
5210 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5211 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
5212 @@ -14614,7 +14763,7 @@ static struct cipher_testvec cast6_ctr_d
5213 },
5214 };
5215
5216 -static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5217 +static const struct cipher_testvec cast6_lrw_enc_tv_template[] = {
5218 { /* Generated from TF test vectors */
5219 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5220 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5221 @@ -14761,7 +14910,7 @@ static struct cipher_testvec cast6_lrw_e
5222 },
5223 };
5224
5225 -static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5226 +static const struct cipher_testvec cast6_lrw_dec_tv_template[] = {
5227 { /* Generated from TF test vectors */
5228 .key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
5229 "\x23\x84\xcb\x1c\x77\xd6\x19\x5d"
5230 @@ -14908,7 +15057,7 @@ static struct cipher_testvec cast6_lrw_d
5231 },
5232 };
5233
5234 -static struct cipher_testvec cast6_xts_enc_tv_template[] = {
5235 +static const struct cipher_testvec cast6_xts_enc_tv_template[] = {
5236 { /* Generated from TF test vectors */
5237 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5238 "\x23\x53\x60\x28\x74\x71\x35\x26"
5239 @@ -15057,7 +15206,7 @@ static struct cipher_testvec cast6_xts_e
5240 },
5241 };
5242
5243 -static struct cipher_testvec cast6_xts_dec_tv_template[] = {
5244 +static const struct cipher_testvec cast6_xts_dec_tv_template[] = {
5245 { /* Generated from TF test vectors */
5246 .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
5247 "\x23\x53\x60\x28\x74\x71\x35\x26"
5248 @@ -15210,39 +15359,7 @@ static struct cipher_testvec cast6_xts_d
5249 /*
5250 * AES test vectors.
5251 */
5252 -#define AES_ENC_TEST_VECTORS 4
5253 -#define AES_DEC_TEST_VECTORS 4
5254 -#define AES_CBC_ENC_TEST_VECTORS 5
5255 -#define AES_CBC_DEC_TEST_VECTORS 5
5256 -#define HMAC_MD5_ECB_CIPHER_NULL_ENC_TEST_VECTORS 2
5257 -#define HMAC_MD5_ECB_CIPHER_NULL_DEC_TEST_VECTORS 2
5258 -#define HMAC_SHA1_ECB_CIPHER_NULL_ENC_TEST_VEC 2
5259 -#define HMAC_SHA1_ECB_CIPHER_NULL_DEC_TEST_VEC 2
5260 -#define HMAC_SHA1_AES_CBC_ENC_TEST_VEC 7
5261 -#define HMAC_SHA256_AES_CBC_ENC_TEST_VEC 7
5262 -#define HMAC_SHA512_AES_CBC_ENC_TEST_VEC 7
5263 -#define AES_LRW_ENC_TEST_VECTORS 8
5264 -#define AES_LRW_DEC_TEST_VECTORS 8
5265 -#define AES_XTS_ENC_TEST_VECTORS 5
5266 -#define AES_XTS_DEC_TEST_VECTORS 5
5267 -#define AES_CTR_ENC_TEST_VECTORS 5
5268 -#define AES_CTR_DEC_TEST_VECTORS 5
5269 -#define AES_OFB_ENC_TEST_VECTORS 1
5270 -#define AES_OFB_DEC_TEST_VECTORS 1
5271 -#define AES_CTR_3686_ENC_TEST_VECTORS 7
5272 -#define AES_CTR_3686_DEC_TEST_VECTORS 6
5273 -#define AES_GCM_ENC_TEST_VECTORS 9
5274 -#define AES_GCM_DEC_TEST_VECTORS 8
5275 -#define AES_GCM_4106_ENC_TEST_VECTORS 23
5276 -#define AES_GCM_4106_DEC_TEST_VECTORS 23
5277 -#define AES_GCM_4543_ENC_TEST_VECTORS 1
5278 -#define AES_GCM_4543_DEC_TEST_VECTORS 2
5279 -#define AES_CCM_ENC_TEST_VECTORS 8
5280 -#define AES_CCM_DEC_TEST_VECTORS 7
5281 -#define AES_CCM_4309_ENC_TEST_VECTORS 7
5282 -#define AES_CCM_4309_DEC_TEST_VECTORS 10
5283 -
5284 -static struct cipher_testvec aes_enc_tv_template[] = {
5285 +static const struct cipher_testvec aes_enc_tv_template[] = {
5286 { /* From FIPS-197 */
5287 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5288 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5289 @@ -15414,7 +15531,7 @@ static struct cipher_testvec aes_enc_tv_
5290 },
5291 };
5292
5293 -static struct cipher_testvec aes_dec_tv_template[] = {
5294 +static const struct cipher_testvec aes_dec_tv_template[] = {
5295 { /* From FIPS-197 */
5296 .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
5297 "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
5298 @@ -15586,7 +15703,7 @@ static struct cipher_testvec aes_dec_tv_
5299 },
5300 };
5301
5302 -static struct cipher_testvec aes_cbc_enc_tv_template[] = {
5303 +static const struct cipher_testvec aes_cbc_enc_tv_template[] = {
5304 { /* From RFC 3602 */
5305 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5306 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5307 @@ -15808,7 +15925,7 @@ static struct cipher_testvec aes_cbc_enc
5308 },
5309 };
5310
5311 -static struct cipher_testvec aes_cbc_dec_tv_template[] = {
5312 +static const struct cipher_testvec aes_cbc_dec_tv_template[] = {
5313 { /* From RFC 3602 */
5314 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
5315 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
5316 @@ -16030,7 +16147,7 @@ static struct cipher_testvec aes_cbc_dec
5317 },
5318 };
5319
5320 -static struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5321 +static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
5322 { /* Input data from RFC 2410 Case 1 */
5323 #ifdef __LITTLE_ENDIAN
5324 .key = "\x08\x00" /* rta length */
5325 @@ -16072,7 +16189,7 @@ static struct aead_testvec hmac_md5_ecb_
5326 },
5327 };
5328
5329 -static struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5330 +static const struct aead_testvec hmac_md5_ecb_cipher_null_dec_tv_template[] = {
5331 {
5332 #ifdef __LITTLE_ENDIAN
5333 .key = "\x08\x00" /* rta length */
5334 @@ -16114,7 +16231,7 @@ static struct aead_testvec hmac_md5_ecb_
5335 },
5336 };
5337
5338 -static struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5339 +static const struct aead_testvec hmac_sha1_aes_cbc_enc_tv_temp[] = {
5340 { /* RFC 3602 Case 1 */
5341 #ifdef __LITTLE_ENDIAN
5342 .key = "\x08\x00" /* rta length */
5343 @@ -16383,7 +16500,7 @@ static struct aead_testvec hmac_sha1_aes
5344 },
5345 };
5346
5347 -static struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5348 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_enc_tv_temp[] = {
5349 { /* Input data from RFC 2410 Case 1 */
5350 #ifdef __LITTLE_ENDIAN
5351 .key = "\x08\x00" /* rta length */
5352 @@ -16429,7 +16546,7 @@ static struct aead_testvec hmac_sha1_ecb
5353 },
5354 };
5355
5356 -static struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5357 +static const struct aead_testvec hmac_sha1_ecb_cipher_null_dec_tv_temp[] = {
5358 {
5359 #ifdef __LITTLE_ENDIAN
5360 .key = "\x08\x00" /* rta length */
5361 @@ -16475,7 +16592,7 @@ static struct aead_testvec hmac_sha1_ecb
5362 },
5363 };
5364
5365 -static struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5366 +static const struct aead_testvec hmac_sha256_aes_cbc_enc_tv_temp[] = {
5367 { /* RFC 3602 Case 1 */
5368 #ifdef __LITTLE_ENDIAN
5369 .key = "\x08\x00" /* rta length */
5370 @@ -16758,7 +16875,7 @@ static struct aead_testvec hmac_sha256_a
5371 },
5372 };
5373
5374 -static struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5375 +static const struct aead_testvec hmac_sha512_aes_cbc_enc_tv_temp[] = {
5376 { /* RFC 3602 Case 1 */
5377 #ifdef __LITTLE_ENDIAN
5378 .key = "\x08\x00" /* rta length */
5379 @@ -17097,9 +17214,7 @@ static struct aead_testvec hmac_sha512_a
5380 },
5381 };
5382
5383 -#define HMAC_SHA1_DES_CBC_ENC_TEST_VEC 1
5384 -
5385 -static struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5386 +static const struct aead_testvec hmac_sha1_des_cbc_enc_tv_temp[] = {
5387 { /*Generated with cryptopp*/
5388 #ifdef __LITTLE_ENDIAN
5389 .key = "\x08\x00" /* rta length */
5390 @@ -17158,9 +17273,7 @@ static struct aead_testvec hmac_sha1_des
5391 },
5392 };
5393
5394 -#define HMAC_SHA224_DES_CBC_ENC_TEST_VEC 1
5395 -
5396 -static struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5397 +static const struct aead_testvec hmac_sha224_des_cbc_enc_tv_temp[] = {
5398 { /*Generated with cryptopp*/
5399 #ifdef __LITTLE_ENDIAN
5400 .key = "\x08\x00" /* rta length */
5401 @@ -17219,9 +17332,7 @@ static struct aead_testvec hmac_sha224_d
5402 },
5403 };
5404
5405 -#define HMAC_SHA256_DES_CBC_ENC_TEST_VEC 1
5406 -
5407 -static struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5408 +static const struct aead_testvec hmac_sha256_des_cbc_enc_tv_temp[] = {
5409 { /*Generated with cryptopp*/
5410 #ifdef __LITTLE_ENDIAN
5411 .key = "\x08\x00" /* rta length */
5412 @@ -17282,9 +17393,7 @@ static struct aead_testvec hmac_sha256_d
5413 },
5414 };
5415
5416 -#define HMAC_SHA384_DES_CBC_ENC_TEST_VEC 1
5417 -
5418 -static struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5419 +static const struct aead_testvec hmac_sha384_des_cbc_enc_tv_temp[] = {
5420 { /*Generated with cryptopp*/
5421 #ifdef __LITTLE_ENDIAN
5422 .key = "\x08\x00" /* rta length */
5423 @@ -17349,9 +17458,7 @@ static struct aead_testvec hmac_sha384_d
5424 },
5425 };
5426
5427 -#define HMAC_SHA512_DES_CBC_ENC_TEST_VEC 1
5428 -
5429 -static struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5430 +static const struct aead_testvec hmac_sha512_des_cbc_enc_tv_temp[] = {
5431 { /*Generated with cryptopp*/
5432 #ifdef __LITTLE_ENDIAN
5433 .key = "\x08\x00" /* rta length */
5434 @@ -17420,9 +17527,7 @@ static struct aead_testvec hmac_sha512_d
5435 },
5436 };
5437
5438 -#define HMAC_SHA1_DES3_EDE_CBC_ENC_TEST_VEC 1
5439 -
5440 -static struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5441 +static const struct aead_testvec hmac_sha1_des3_ede_cbc_enc_tv_temp[] = {
5442 { /*Generated with cryptopp*/
5443 #ifdef __LITTLE_ENDIAN
5444 .key = "\x08\x00" /* rta length */
5445 @@ -17483,9 +17588,7 @@ static struct aead_testvec hmac_sha1_des
5446 },
5447 };
5448
5449 -#define HMAC_SHA224_DES3_EDE_CBC_ENC_TEST_VEC 1
5450 -
5451 -static struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5452 +static const struct aead_testvec hmac_sha224_des3_ede_cbc_enc_tv_temp[] = {
5453 { /*Generated with cryptopp*/
5454 #ifdef __LITTLE_ENDIAN
5455 .key = "\x08\x00" /* rta length */
5456 @@ -17546,9 +17649,7 @@ static struct aead_testvec hmac_sha224_d
5457 },
5458 };
5459
5460 -#define HMAC_SHA256_DES3_EDE_CBC_ENC_TEST_VEC 1
5461 -
5462 -static struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5463 +static const struct aead_testvec hmac_sha256_des3_ede_cbc_enc_tv_temp[] = {
5464 { /*Generated with cryptopp*/
5465 #ifdef __LITTLE_ENDIAN
5466 .key = "\x08\x00" /* rta length */
5467 @@ -17611,9 +17712,7 @@ static struct aead_testvec hmac_sha256_d
5468 },
5469 };
5470
5471 -#define HMAC_SHA384_DES3_EDE_CBC_ENC_TEST_VEC 1
5472 -
5473 -static struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5474 +static const struct aead_testvec hmac_sha384_des3_ede_cbc_enc_tv_temp[] = {
5475 { /*Generated with cryptopp*/
5476 #ifdef __LITTLE_ENDIAN
5477 .key = "\x08\x00" /* rta length */
5478 @@ -17680,9 +17779,7 @@ static struct aead_testvec hmac_sha384_d
5479 },
5480 };
5481
5482 -#define HMAC_SHA512_DES3_EDE_CBC_ENC_TEST_VEC 1
5483 -
5484 -static struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5485 +static const struct aead_testvec hmac_sha512_des3_ede_cbc_enc_tv_temp[] = {
5486 { /*Generated with cryptopp*/
5487 #ifdef __LITTLE_ENDIAN
5488 .key = "\x08\x00" /* rta length */
5489 @@ -17753,7 +17850,7 @@ static struct aead_testvec hmac_sha512_d
5490 },
5491 };
5492
5493 -static struct cipher_testvec aes_lrw_enc_tv_template[] = {
5494 +static const struct cipher_testvec aes_lrw_enc_tv_template[] = {
5495 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5496 { /* LRW-32-AES 1 */
5497 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
5498 @@ -18006,7 +18103,7 @@ static struct cipher_testvec aes_lrw_enc
5499 }
5500 };
5501
5502 -static struct cipher_testvec aes_lrw_dec_tv_template[] = {
5503 +static const struct cipher_testvec aes_lrw_dec_tv_template[] = {
5504 /* from http://grouper.ieee.org/groups/1619/email/pdf00017.pdf */
5505 /* same as enc vectors with input and result reversed */
5506 { /* LRW-32-AES 1 */
5507 @@ -18260,7 +18357,7 @@ static struct cipher_testvec aes_lrw_dec
5508 }
5509 };
5510
5511 -static struct cipher_testvec aes_xts_enc_tv_template[] = {
5512 +static const struct cipher_testvec aes_xts_enc_tv_template[] = {
5513 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5514 { /* XTS-AES 1 */
5515 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5516 @@ -18603,7 +18700,7 @@ static struct cipher_testvec aes_xts_enc
5517 }
5518 };
5519
5520 -static struct cipher_testvec aes_xts_dec_tv_template[] = {
5521 +static const struct cipher_testvec aes_xts_dec_tv_template[] = {
5522 /* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf */
5523 { /* XTS-AES 1 */
5524 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
5525 @@ -18947,7 +19044,7 @@ static struct cipher_testvec aes_xts_dec
5526 };
5527
5528
5529 -static struct cipher_testvec aes_ctr_enc_tv_template[] = {
5530 +static const struct cipher_testvec aes_ctr_enc_tv_template[] = {
5531 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5532 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5533 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5534 @@ -19302,7 +19399,7 @@ static struct cipher_testvec aes_ctr_enc
5535 },
5536 };
5537
5538 -static struct cipher_testvec aes_ctr_dec_tv_template[] = {
5539 +static const struct cipher_testvec aes_ctr_dec_tv_template[] = {
5540 { /* From NIST Special Publication 800-38A, Appendix F.5 */
5541 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5542 "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
5543 @@ -19657,7 +19754,7 @@ static struct cipher_testvec aes_ctr_dec
5544 },
5545 };
5546
5547 -static struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5548 +static const struct cipher_testvec aes_ctr_rfc3686_enc_tv_template[] = {
5549 { /* From RFC 3686 */
5550 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5551 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5552 @@ -20789,7 +20886,7 @@ static struct cipher_testvec aes_ctr_rfc
5553 },
5554 };
5555
5556 -static struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5557 +static const struct cipher_testvec aes_ctr_rfc3686_dec_tv_template[] = {
5558 { /* From RFC 3686 */
5559 .key = "\xae\x68\x52\xf8\x12\x10\x67\xcc"
5560 "\x4b\xf7\xa5\x76\x55\x77\xf3\x9e"
5561 @@ -20880,7 +20977,7 @@ static struct cipher_testvec aes_ctr_rfc
5562 },
5563 };
5564
5565 -static struct cipher_testvec aes_ofb_enc_tv_template[] = {
5566 +static const struct cipher_testvec aes_ofb_enc_tv_template[] = {
5567 /* From NIST Special Publication 800-38A, Appendix F.5 */
5568 {
5569 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5570 @@ -20909,7 +21006,7 @@ static struct cipher_testvec aes_ofb_enc
5571 }
5572 };
5573
5574 -static struct cipher_testvec aes_ofb_dec_tv_template[] = {
5575 +static const struct cipher_testvec aes_ofb_dec_tv_template[] = {
5576 /* From NIST Special Publication 800-38A, Appendix F.5 */
5577 {
5578 .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
5579 @@ -20938,7 +21035,7 @@ static struct cipher_testvec aes_ofb_dec
5580 }
5581 };
5582
5583 -static struct aead_testvec aes_gcm_enc_tv_template[] = {
5584 +static const struct aead_testvec aes_gcm_enc_tv_template[] = {
5585 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5586 .key = zeroed_string,
5587 .klen = 16,
5588 @@ -21098,7 +21195,7 @@ static struct aead_testvec aes_gcm_enc_t
5589 }
5590 };
5591
5592 -static struct aead_testvec aes_gcm_dec_tv_template[] = {
5593 +static const struct aead_testvec aes_gcm_dec_tv_template[] = {
5594 { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
5595 .key = zeroed_string,
5596 .klen = 32,
5597 @@ -21300,7 +21397,7 @@ static struct aead_testvec aes_gcm_dec_t
5598 }
5599 };
5600
5601 -static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5602 +static const struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
5603 { /* Generated using Crypto++ */
5604 .key = zeroed_string,
5605 .klen = 20,
5606 @@ -21913,7 +22010,7 @@ static struct aead_testvec aes_gcm_rfc41
5607 }
5608 };
5609
5610 -static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5611 +static const struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
5612 { /* Generated using Crypto++ */
5613 .key = zeroed_string,
5614 .klen = 20,
5615 @@ -22527,7 +22624,7 @@ static struct aead_testvec aes_gcm_rfc41
5616 }
5617 };
5618
5619 -static struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5620 +static const struct aead_testvec aes_gcm_rfc4543_enc_tv_template[] = {
5621 { /* From draft-mcgrew-gcm-test-01 */
5622 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5623 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5624 @@ -22558,7 +22655,7 @@ static struct aead_testvec aes_gcm_rfc45
5625 }
5626 };
5627
5628 -static struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5629 +static const struct aead_testvec aes_gcm_rfc4543_dec_tv_template[] = {
5630 { /* From draft-mcgrew-gcm-test-01 */
5631 .key = "\x4c\x80\xcd\xef\xbb\x5d\x10\xda"
5632 "\x90\x6a\xc7\x3c\x36\x13\xa6\x34"
5633 @@ -22617,7 +22714,7 @@ static struct aead_testvec aes_gcm_rfc45
5634 },
5635 };
5636
5637 -static struct aead_testvec aes_ccm_enc_tv_template[] = {
5638 +static const struct aead_testvec aes_ccm_enc_tv_template[] = {
5639 { /* From RFC 3610 */
5640 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5641 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5642 @@ -22901,7 +22998,7 @@ static struct aead_testvec aes_ccm_enc_t
5643 }
5644 };
5645
5646 -static struct aead_testvec aes_ccm_dec_tv_template[] = {
5647 +static const struct aead_testvec aes_ccm_dec_tv_template[] = {
5648 { /* From RFC 3610 */
5649 .key = "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
5650 "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf",
5651 @@ -23233,7 +23330,7 @@ static struct aead_testvec aes_ccm_dec_t
5652 * These vectors are copied/generated from the ones for rfc4106 with
5653 * the key truncated by one byte..
5654 */
5655 -static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5656 +static const struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
5657 { /* Generated using Crypto++ */
5658 .key = zeroed_string,
5659 .klen = 19,
5660 @@ -23846,7 +23943,7 @@ static struct aead_testvec aes_ccm_rfc43
5661 }
5662 };
5663
5664 -static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5665 +static const struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
5666 { /* Generated using Crypto++ */
5667 .key = zeroed_string,
5668 .klen = 19,
5669 @@ -24462,9 +24559,7 @@ static struct aead_testvec aes_ccm_rfc43
5670 /*
5671 * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
5672 */
5673 -#define RFC7539_ENC_TEST_VECTORS 2
5674 -#define RFC7539_DEC_TEST_VECTORS 2
5675 -static struct aead_testvec rfc7539_enc_tv_template[] = {
5676 +static const struct aead_testvec rfc7539_enc_tv_template[] = {
5677 {
5678 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5679 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5680 @@ -24596,7 +24691,7 @@ static struct aead_testvec rfc7539_enc_t
5681 },
5682 };
5683
5684 -static struct aead_testvec rfc7539_dec_tv_template[] = {
5685 +static const struct aead_testvec rfc7539_dec_tv_template[] = {
5686 {
5687 .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
5688 "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
5689 @@ -24731,9 +24826,7 @@ static struct aead_testvec rfc7539_dec_t
5690 /*
5691 * draft-irtf-cfrg-chacha20-poly1305
5692 */
5693 -#define RFC7539ESP_DEC_TEST_VECTORS 1
5694 -#define RFC7539ESP_ENC_TEST_VECTORS 1
5695 -static struct aead_testvec rfc7539esp_enc_tv_template[] = {
5696 +static const struct aead_testvec rfc7539esp_enc_tv_template[] = {
5697 {
5698 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5699 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5700 @@ -24821,7 +24914,7 @@ static struct aead_testvec rfc7539esp_en
5701 },
5702 };
5703
5704 -static struct aead_testvec rfc7539esp_dec_tv_template[] = {
5705 +static const struct aead_testvec rfc7539esp_dec_tv_template[] = {
5706 {
5707 .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
5708 "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
5709 @@ -24917,7 +25010,7 @@ static struct aead_testvec rfc7539esp_de
5710 * semiblock of the ciphertext from the test vector. For decryption, iv is
5711 * the first semiblock of the ciphertext.
5712 */
5713 -static struct cipher_testvec aes_kw_enc_tv_template[] = {
5714 +static const struct cipher_testvec aes_kw_enc_tv_template[] = {
5715 {
5716 .key = "\x75\x75\xda\x3a\x93\x60\x7c\xc2"
5717 "\xbf\xd8\xce\xc7\xaa\xdf\xd9\xa6",
5718 @@ -24932,7 +25025,7 @@ static struct cipher_testvec aes_kw_enc_
5719 },
5720 };
5721
5722 -static struct cipher_testvec aes_kw_dec_tv_template[] = {
5723 +static const struct cipher_testvec aes_kw_dec_tv_template[] = {
5724 {
5725 .key = "\x80\xaa\x99\x73\x27\xa4\x80\x6b"
5726 "\x6a\x7a\x41\xa5\x2b\x86\xc3\x71"
5727 @@ -24955,9 +25048,7 @@ static struct cipher_testvec aes_kw_dec_
5728 * http://csrc.nist.gov/groups/STM/cavp/documents/rng/RNGVS.pdf
5729 * Only AES-128 is supported at this time.
5730 */
5731 -#define ANSI_CPRNG_AES_TEST_VECTORS 6
5732 -
5733 -static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5734 +static const struct cprng_testvec ansi_cprng_aes_tv_template[] = {
5735 {
5736 .key = "\xf3\xb1\x66\x6d\x13\x60\x72\x42"
5737 "\xed\x06\x1c\xab\xb8\xd4\x62\x02",
5738 @@ -25053,7 +25144,7 @@ static struct cprng_testvec ansi_cprng_a
5739 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5740 * w/o personalization string, w/ and w/o additional input string).
5741 */
5742 -static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5743 +static const struct drbg_testvec drbg_pr_sha256_tv_template[] = {
5744 {
5745 .entropy = (unsigned char *)
5746 "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
5747 @@ -25211,7 +25302,7 @@ static struct drbg_testvec drbg_pr_sha25
5748 },
5749 };
5750
5751 -static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5752 +static const struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
5753 {
5754 .entropy = (unsigned char *)
5755 "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
5756 @@ -25369,7 +25460,7 @@ static struct drbg_testvec drbg_pr_hmac_
5757 },
5758 };
5759
5760 -static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5761 +static const struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
5762 {
5763 .entropy = (unsigned char *)
5764 "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
5765 @@ -25493,7 +25584,7 @@ static struct drbg_testvec drbg_pr_ctr_a
5766 * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
5767 * w/o personalization string, w/ and w/o additional input string).
5768 */
5769 -static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5770 +static const struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
5771 {
5772 .entropy = (unsigned char *)
5773 "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
5774 @@ -25615,7 +25706,7 @@ static struct drbg_testvec drbg_nopr_sha
5775 },
5776 };
5777
5778 -static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5779 +static const struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
5780 {
5781 .entropy = (unsigned char *)
5782 "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
5783 @@ -25737,7 +25828,7 @@ static struct drbg_testvec drbg_nopr_hma
5784 },
5785 };
5786
5787 -static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5788 +static const struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
5789 {
5790 .entropy = (unsigned char *)
5791 "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
5792 @@ -25761,7 +25852,7 @@ static struct drbg_testvec drbg_nopr_ctr
5793 },
5794 };
5795
5796 -static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5797 +static const struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
5798 {
5799 .entropy = (unsigned char *)
5800 "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
5801 @@ -25785,7 +25876,7 @@ static struct drbg_testvec drbg_nopr_ctr
5802 },
5803 };
5804
5805 -static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5806 +static const struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
5807 {
5808 .entropy = (unsigned char *)
5809 "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
5810 @@ -25874,14 +25965,7 @@ static struct drbg_testvec drbg_nopr_ctr
5811 };
5812
5813 /* Cast5 test vectors from RFC 2144 */
5814 -#define CAST5_ENC_TEST_VECTORS 4
5815 -#define CAST5_DEC_TEST_VECTORS 4
5816 -#define CAST5_CBC_ENC_TEST_VECTORS 1
5817 -#define CAST5_CBC_DEC_TEST_VECTORS 1
5818 -#define CAST5_CTR_ENC_TEST_VECTORS 2
5819 -#define CAST5_CTR_DEC_TEST_VECTORS 2
5820 -
5821 -static struct cipher_testvec cast5_enc_tv_template[] = {
5822 +static const struct cipher_testvec cast5_enc_tv_template[] = {
5823 {
5824 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5825 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5826 @@ -26042,7 +26126,7 @@ static struct cipher_testvec cast5_enc_t
5827 },
5828 };
5829
5830 -static struct cipher_testvec cast5_dec_tv_template[] = {
5831 +static const struct cipher_testvec cast5_dec_tv_template[] = {
5832 {
5833 .key = "\x01\x23\x45\x67\x12\x34\x56\x78"
5834 "\x23\x45\x67\x89\x34\x56\x78\x9a",
5835 @@ -26203,7 +26287,7 @@ static struct cipher_testvec cast5_dec_t
5836 },
5837 };
5838
5839 -static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5840 +static const struct cipher_testvec cast5_cbc_enc_tv_template[] = {
5841 { /* Generated from TF test vectors */
5842 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5843 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5844 @@ -26341,7 +26425,7 @@ static struct cipher_testvec cast5_cbc_e
5845 },
5846 };
5847
5848 -static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5849 +static const struct cipher_testvec cast5_cbc_dec_tv_template[] = {
5850 { /* Generated from TF test vectors */
5851 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5852 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5853 @@ -26479,7 +26563,7 @@ static struct cipher_testvec cast5_cbc_d
5854 },
5855 };
5856
5857 -static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5858 +static const struct cipher_testvec cast5_ctr_enc_tv_template[] = {
5859 { /* Generated from TF test vectors */
5860 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5861 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5862 @@ -26630,7 +26714,7 @@ static struct cipher_testvec cast5_ctr_e
5863 },
5864 };
5865
5866 -static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5867 +static const struct cipher_testvec cast5_ctr_dec_tv_template[] = {
5868 { /* Generated from TF test vectors */
5869 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
5870 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A",
5871 @@ -26784,10 +26868,7 @@ static struct cipher_testvec cast5_ctr_d
5872 /*
5873 * ARC4 test vectors from OpenSSL
5874 */
5875 -#define ARC4_ENC_TEST_VECTORS 7
5876 -#define ARC4_DEC_TEST_VECTORS 7
5877 -
5878 -static struct cipher_testvec arc4_enc_tv_template[] = {
5879 +static const struct cipher_testvec arc4_enc_tv_template[] = {
5880 {
5881 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5882 .klen = 8,
5883 @@ -26853,7 +26934,7 @@ static struct cipher_testvec arc4_enc_tv
5884 },
5885 };
5886
5887 -static struct cipher_testvec arc4_dec_tv_template[] = {
5888 +static const struct cipher_testvec arc4_dec_tv_template[] = {
5889 {
5890 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef",
5891 .klen = 8,
5892 @@ -26922,10 +27003,7 @@ static struct cipher_testvec arc4_dec_tv
5893 /*
5894 * TEA test vectors
5895 */
5896 -#define TEA_ENC_TEST_VECTORS 4
5897 -#define TEA_DEC_TEST_VECTORS 4
5898 -
5899 -static struct cipher_testvec tea_enc_tv_template[] = {
5900 +static const struct cipher_testvec tea_enc_tv_template[] = {
5901 {
5902 .key = zeroed_string,
5903 .klen = 16,
5904 @@ -26968,7 +27046,7 @@ static struct cipher_testvec tea_enc_tv_
5905 }
5906 };
5907
5908 -static struct cipher_testvec tea_dec_tv_template[] = {
5909 +static const struct cipher_testvec tea_dec_tv_template[] = {
5910 {
5911 .key = zeroed_string,
5912 .klen = 16,
5913 @@ -27014,10 +27092,7 @@ static struct cipher_testvec tea_dec_tv_
5914 /*
5915 * XTEA test vectors
5916 */
5917 -#define XTEA_ENC_TEST_VECTORS 4
5918 -#define XTEA_DEC_TEST_VECTORS 4
5919 -
5920 -static struct cipher_testvec xtea_enc_tv_template[] = {
5921 +static const struct cipher_testvec xtea_enc_tv_template[] = {
5922 {
5923 .key = zeroed_string,
5924 .klen = 16,
5925 @@ -27060,7 +27135,7 @@ static struct cipher_testvec xtea_enc_tv
5926 }
5927 };
5928
5929 -static struct cipher_testvec xtea_dec_tv_template[] = {
5930 +static const struct cipher_testvec xtea_dec_tv_template[] = {
5931 {
5932 .key = zeroed_string,
5933 .klen = 16,
5934 @@ -27106,10 +27181,7 @@ static struct cipher_testvec xtea_dec_tv
5935 /*
5936 * KHAZAD test vectors.
5937 */
5938 -#define KHAZAD_ENC_TEST_VECTORS 5
5939 -#define KHAZAD_DEC_TEST_VECTORS 5
5940 -
5941 -static struct cipher_testvec khazad_enc_tv_template[] = {
5942 +static const struct cipher_testvec khazad_enc_tv_template[] = {
5943 {
5944 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5945 "\x00\x00\x00\x00\x00\x00\x00\x00",
5946 @@ -27155,7 +27227,7 @@ static struct cipher_testvec khazad_enc_
5947 },
5948 };
5949
5950 -static struct cipher_testvec khazad_dec_tv_template[] = {
5951 +static const struct cipher_testvec khazad_dec_tv_template[] = {
5952 {
5953 .key = "\x80\x00\x00\x00\x00\x00\x00\x00"
5954 "\x00\x00\x00\x00\x00\x00\x00\x00",
5955 @@ -27205,12 +27277,7 @@ static struct cipher_testvec khazad_dec_
5956 * Anubis test vectors.
5957 */
5958
5959 -#define ANUBIS_ENC_TEST_VECTORS 5
5960 -#define ANUBIS_DEC_TEST_VECTORS 5
5961 -#define ANUBIS_CBC_ENC_TEST_VECTORS 2
5962 -#define ANUBIS_CBC_DEC_TEST_VECTORS 2
5963 -
5964 -static struct cipher_testvec anubis_enc_tv_template[] = {
5965 +static const struct cipher_testvec anubis_enc_tv_template[] = {
5966 {
5967 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5968 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5969 @@ -27273,7 +27340,7 @@ static struct cipher_testvec anubis_enc_
5970 },
5971 };
5972
5973 -static struct cipher_testvec anubis_dec_tv_template[] = {
5974 +static const struct cipher_testvec anubis_dec_tv_template[] = {
5975 {
5976 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5977 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5978 @@ -27336,7 +27403,7 @@ static struct cipher_testvec anubis_dec_
5979 },
5980 };
5981
5982 -static struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5983 +static const struct cipher_testvec anubis_cbc_enc_tv_template[] = {
5984 {
5985 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5986 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5987 @@ -27371,7 +27438,7 @@ static struct cipher_testvec anubis_cbc_
5988 },
5989 };
5990
5991 -static struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5992 +static const struct cipher_testvec anubis_cbc_dec_tv_template[] = {
5993 {
5994 .key = "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe"
5995 "\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe",
5996 @@ -27409,10 +27476,7 @@ static struct cipher_testvec anubis_cbc_
5997 /*
5998 * XETA test vectors
5999 */
6000 -#define XETA_ENC_TEST_VECTORS 4
6001 -#define XETA_DEC_TEST_VECTORS 4
6002 -
6003 -static struct cipher_testvec xeta_enc_tv_template[] = {
6004 +static const struct cipher_testvec xeta_enc_tv_template[] = {
6005 {
6006 .key = zeroed_string,
6007 .klen = 16,
6008 @@ -27455,7 +27519,7 @@ static struct cipher_testvec xeta_enc_tv
6009 }
6010 };
6011
6012 -static struct cipher_testvec xeta_dec_tv_template[] = {
6013 +static const struct cipher_testvec xeta_dec_tv_template[] = {
6014 {
6015 .key = zeroed_string,
6016 .klen = 16,
6017 @@ -27501,10 +27565,7 @@ static struct cipher_testvec xeta_dec_tv
6018 /*
6019 * FCrypt test vectors
6020 */
6021 -#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
6022 -#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
6023 -
6024 -static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6025 +static const struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
6026 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6027 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6028 .klen = 8,
6029 @@ -27565,7 +27626,7 @@ static struct cipher_testvec fcrypt_pcbc
6030 }
6031 };
6032
6033 -static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6034 +static const struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
6035 { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
6036 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6037 .klen = 8,
6038 @@ -27629,18 +27690,7 @@ static struct cipher_testvec fcrypt_pcbc
6039 /*
6040 * CAMELLIA test vectors.
6041 */
6042 -#define CAMELLIA_ENC_TEST_VECTORS 4
6043 -#define CAMELLIA_DEC_TEST_VECTORS 4
6044 -#define CAMELLIA_CBC_ENC_TEST_VECTORS 3
6045 -#define CAMELLIA_CBC_DEC_TEST_VECTORS 3
6046 -#define CAMELLIA_CTR_ENC_TEST_VECTORS 2
6047 -#define CAMELLIA_CTR_DEC_TEST_VECTORS 2
6048 -#define CAMELLIA_LRW_ENC_TEST_VECTORS 8
6049 -#define CAMELLIA_LRW_DEC_TEST_VECTORS 8
6050 -#define CAMELLIA_XTS_ENC_TEST_VECTORS 5
6051 -#define CAMELLIA_XTS_DEC_TEST_VECTORS 5
6052 -
6053 -static struct cipher_testvec camellia_enc_tv_template[] = {
6054 +static const struct cipher_testvec camellia_enc_tv_template[] = {
6055 {
6056 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6057 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6058 @@ -27940,7 +27990,7 @@ static struct cipher_testvec camellia_en
6059 },
6060 };
6061
6062 -static struct cipher_testvec camellia_dec_tv_template[] = {
6063 +static const struct cipher_testvec camellia_dec_tv_template[] = {
6064 {
6065 .key = "\x01\x23\x45\x67\x89\xab\xcd\xef"
6066 "\xfe\xdc\xba\x98\x76\x54\x32\x10",
6067 @@ -28240,7 +28290,7 @@ static struct cipher_testvec camellia_de
6068 },
6069 };
6070
6071 -static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6072 +static const struct cipher_testvec camellia_cbc_enc_tv_template[] = {
6073 {
6074 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6075 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6076 @@ -28536,7 +28586,7 @@ static struct cipher_testvec camellia_cb
6077 },
6078 };
6079
6080 -static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6081 +static const struct cipher_testvec camellia_cbc_dec_tv_template[] = {
6082 {
6083 .key = "\x06\xa9\x21\x40\x36\xb8\xa1\x5b"
6084 "\x51\x2e\x03\xd5\x34\x12\x00\x06",
6085 @@ -28832,7 +28882,7 @@ static struct cipher_testvec camellia_cb
6086 },
6087 };
6088
6089 -static struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6090 +static const struct cipher_testvec camellia_ctr_enc_tv_template[] = {
6091 { /* Generated with Crypto++ */
6092 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6093 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6094 @@ -29499,7 +29549,7 @@ static struct cipher_testvec camellia_ct
6095 },
6096 };
6097
6098 -static struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6099 +static const struct cipher_testvec camellia_ctr_dec_tv_template[] = {
6100 { /* Generated with Crypto++ */
6101 .key = "\x85\x62\x3F\x1C\xF9\xD6\x1C\xF9"
6102 "\xD6\xB3\x90\x6D\x4A\x90\x6D\x4A"
6103 @@ -30166,7 +30216,7 @@ static struct cipher_testvec camellia_ct
6104 },
6105 };
6106
6107 -static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6108 +static const struct cipher_testvec camellia_lrw_enc_tv_template[] = {
6109 /* Generated from AES-LRW test vectors */
6110 {
6111 .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
6112 @@ -30418,7 +30468,7 @@ static struct cipher_testvec camellia_lr
6113 },
6114 };
6115
6116 -static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6117 +static const struct cipher_testvec camellia_lrw_dec_tv_template[] = {
6118 /* Generated from AES-LRW test vectors */
6119 /* same as enc vectors with input and result reversed */
6120 {
6121 @@ -30671,7 +30721,7 @@ static struct cipher_testvec camellia_lr
6122 },
6123 };
6124
6125 -static struct cipher_testvec camellia_xts_enc_tv_template[] = {
6126 +static const struct cipher_testvec camellia_xts_enc_tv_template[] = {
6127 /* Generated from AES-XTS test vectors */
6128 {
6129 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6130 @@ -31013,7 +31063,7 @@ static struct cipher_testvec camellia_xt
6131 },
6132 };
6133
6134 -static struct cipher_testvec camellia_xts_dec_tv_template[] = {
6135 +static const struct cipher_testvec camellia_xts_dec_tv_template[] = {
6136 /* Generated from AES-XTS test vectors */
6137 /* same as enc vectors with input and result reversed */
6138 {
6139 @@ -31359,10 +31409,7 @@ static struct cipher_testvec camellia_xt
6140 /*
6141 * SEED test vectors
6142 */
6143 -#define SEED_ENC_TEST_VECTORS 4
6144 -#define SEED_DEC_TEST_VECTORS 4
6145 -
6146 -static struct cipher_testvec seed_enc_tv_template[] = {
6147 +static const struct cipher_testvec seed_enc_tv_template[] = {
6148 {
6149 .key = zeroed_string,
6150 .klen = 16,
6151 @@ -31404,7 +31451,7 @@ static struct cipher_testvec seed_enc_tv
6152 }
6153 };
6154
6155 -static struct cipher_testvec seed_dec_tv_template[] = {
6156 +static const struct cipher_testvec seed_dec_tv_template[] = {
6157 {
6158 .key = zeroed_string,
6159 .klen = 16,
6160 @@ -31446,8 +31493,7 @@ static struct cipher_testvec seed_dec_tv
6161 }
6162 };
6163
6164 -#define SALSA20_STREAM_ENC_TEST_VECTORS 5
6165 -static struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6166 +static const struct cipher_testvec salsa20_stream_enc_tv_template[] = {
6167 /*
6168 * Testvectors from verified.test-vectors submitted to ECRYPT.
6169 * They are truncated to size 39, 64, 111, 129 to test a variety
6170 @@ -32616,8 +32662,7 @@ static struct cipher_testvec salsa20_str
6171 },
6172 };
6173
6174 -#define CHACHA20_ENC_TEST_VECTORS 4
6175 -static struct cipher_testvec chacha20_enc_tv_template[] = {
6176 +static const struct cipher_testvec chacha20_enc_tv_template[] = {
6177 { /* RFC7539 A.2. Test Vector #1 */
6178 .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
6179 "\x00\x00\x00\x00\x00\x00\x00\x00"
6180 @@ -33128,9 +33173,7 @@ static struct cipher_testvec chacha20_en
6181 /*
6182 * CTS (Cipher Text Stealing) mode tests
6183 */
6184 -#define CTS_MODE_ENC_TEST_VECTORS 6
6185 -#define CTS_MODE_DEC_TEST_VECTORS 6
6186 -static struct cipher_testvec cts_mode_enc_tv_template[] = {
6187 +static const struct cipher_testvec cts_mode_enc_tv_template[] = {
6188 { /* from rfc3962 */
6189 .klen = 16,
6190 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6191 @@ -33232,7 +33275,7 @@ static struct cipher_testvec cts_mode_en
6192 }
6193 };
6194
6195 -static struct cipher_testvec cts_mode_dec_tv_template[] = {
6196 +static const struct cipher_testvec cts_mode_dec_tv_template[] = {
6197 { /* from rfc3962 */
6198 .klen = 16,
6199 .key = "\x63\x68\x69\x63\x6b\x65\x6e\x20"
6200 @@ -33350,10 +33393,7 @@ struct comp_testvec {
6201 * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
6202 */
6203
6204 -#define DEFLATE_COMP_TEST_VECTORS 2
6205 -#define DEFLATE_DECOMP_TEST_VECTORS 2
6206 -
6207 -static struct comp_testvec deflate_comp_tv_template[] = {
6208 +static const struct comp_testvec deflate_comp_tv_template[] = {
6209 {
6210 .inlen = 70,
6211 .outlen = 38,
6212 @@ -33389,7 +33429,7 @@ static struct comp_testvec deflate_comp_
6213 },
6214 };
6215
6216 -static struct comp_testvec deflate_decomp_tv_template[] = {
6217 +static const struct comp_testvec deflate_decomp_tv_template[] = {
6218 {
6219 .inlen = 122,
6220 .outlen = 191,
6221 @@ -33428,10 +33468,7 @@ static struct comp_testvec deflate_decom
6222 /*
6223 * LZO test vectors (null-terminated strings).
6224 */
6225 -#define LZO_COMP_TEST_VECTORS 2
6226 -#define LZO_DECOMP_TEST_VECTORS 2
6227 -
6228 -static struct comp_testvec lzo_comp_tv_template[] = {
6229 +static const struct comp_testvec lzo_comp_tv_template[] = {
6230 {
6231 .inlen = 70,
6232 .outlen = 57,
6233 @@ -33471,7 +33508,7 @@ static struct comp_testvec lzo_comp_tv_t
6234 },
6235 };
6236
6237 -static struct comp_testvec lzo_decomp_tv_template[] = {
6238 +static const struct comp_testvec lzo_decomp_tv_template[] = {
6239 {
6240 .inlen = 133,
6241 .outlen = 159,
6242 @@ -33514,7 +33551,7 @@ static struct comp_testvec lzo_decomp_tv
6243 */
6244 #define MICHAEL_MIC_TEST_VECTORS 6
6245
6246 -static struct hash_testvec michael_mic_tv_template[] = {
6247 +static const struct hash_testvec michael_mic_tv_template[] = {
6248 {
6249 .key = "\x00\x00\x00\x00\x00\x00\x00\x00",
6250 .ksize = 8,
6251 @@ -33562,9 +33599,7 @@ static struct hash_testvec michael_mic_t
6252 /*
6253 * CRC32 test vectors
6254 */
6255 -#define CRC32_TEST_VECTORS 14
6256 -
6257 -static struct hash_testvec crc32_tv_template[] = {
6258 +static const struct hash_testvec crc32_tv_template[] = {
6259 {
6260 .key = "\x87\xa9\xcb\xed",
6261 .ksize = 4,
6262 @@ -33996,9 +34031,7 @@ static struct hash_testvec crc32_tv_temp
6263 /*
6264 * CRC32C test vectors
6265 */
6266 -#define CRC32C_TEST_VECTORS 15
6267 -
6268 -static struct hash_testvec crc32c_tv_template[] = {
6269 +static const struct hash_testvec crc32c_tv_template[] = {
6270 {
6271 .psize = 0,
6272 .digest = "\x00\x00\x00\x00",
6273 @@ -34434,9 +34467,7 @@ static struct hash_testvec crc32c_tv_tem
6274 /*
6275 * Blakcifn CRC test vectors
6276 */
6277 -#define BFIN_CRC_TEST_VECTORS 6
6278 -
6279 -static struct hash_testvec bfin_crc_tv_template[] = {
6280 +static const struct hash_testvec bfin_crc_tv_template[] = {
6281 {
6282 .psize = 0,
6283 .digest = "\x00\x00\x00\x00",
6284 @@ -34521,9 +34552,6 @@ static struct hash_testvec bfin_crc_tv_t
6285
6286 };
6287
6288 -#define LZ4_COMP_TEST_VECTORS 1
6289 -#define LZ4_DECOMP_TEST_VECTORS 1
6290 -
6291 static struct comp_testvec lz4_comp_tv_template[] = {
6292 {
6293 .inlen = 70,
6294 @@ -34554,9 +34582,6 @@ static struct comp_testvec lz4_decomp_tv
6295 },
6296 };
6297
6298 -#define LZ4HC_COMP_TEST_VECTORS 1
6299 -#define LZ4HC_DECOMP_TEST_VECTORS 1
6300 -
6301 static struct comp_testvec lz4hc_comp_tv_template[] = {
6302 {
6303 .inlen = 70,
6304 --- /dev/null
6305 +++ b/crypto/tls.c
6306 @@ -0,0 +1,607 @@
6307 +/*
6308 + * Copyright 2013 Freescale Semiconductor, Inc.
6309 + * Copyright 2017 NXP Semiconductor, Inc.
6310 + *
6311 + * This program is free software; you can redistribute it and/or modify it
6312 + * under the terms of the GNU General Public License as published by the Free
6313 + * Software Foundation; either version 2 of the License, or (at your option)
6314 + * any later version.
6315 + *
6316 + */
6317 +
6318 +#include <crypto/internal/aead.h>
6319 +#include <crypto/internal/hash.h>
6320 +#include <crypto/internal/skcipher.h>
6321 +#include <crypto/authenc.h>
6322 +#include <crypto/null.h>
6323 +#include <crypto/scatterwalk.h>
6324 +#include <linux/err.h>
6325 +#include <linux/init.h>
6326 +#include <linux/module.h>
6327 +#include <linux/rtnetlink.h>
6328 +
6329 +struct tls_instance_ctx {
6330 + struct crypto_ahash_spawn auth;
6331 + struct crypto_skcipher_spawn enc;
6332 +};
6333 +
6334 +struct crypto_tls_ctx {
6335 + unsigned int reqoff;
6336 + struct crypto_ahash *auth;
6337 + struct crypto_skcipher *enc;
6338 + struct crypto_skcipher *null;
6339 +};
6340 +
6341 +struct tls_request_ctx {
6342 + /*
6343 + * cryptlen holds the payload length in the case of encryption or
6344 + * payload_len + icv_len + padding_len in case of decryption
6345 + */
6346 + unsigned int cryptlen;
6347 + /* working space for partial results */
6348 + struct scatterlist tmp[2];
6349 + struct scatterlist cipher[2];
6350 + struct scatterlist dst[2];
6351 + char tail[];
6352 +};
6353 +
6354 +struct async_op {
6355 + struct completion completion;
6356 + int err;
6357 +};
6358 +
6359 +static void tls_async_op_done(struct crypto_async_request *req, int err)
6360 +{
6361 + struct async_op *areq = req->data;
6362 +
6363 + if (err == -EINPROGRESS)
6364 + return;
6365 +
6366 + areq->err = err;
6367 + complete(&areq->completion);
6368 +}
6369 +
6370 +static int crypto_tls_setkey(struct crypto_aead *tls, const u8 *key,
6371 + unsigned int keylen)
6372 +{
6373 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6374 + struct crypto_ahash *auth = ctx->auth;
6375 + struct crypto_skcipher *enc = ctx->enc;
6376 + struct crypto_authenc_keys keys;
6377 + int err = -EINVAL;
6378 +
6379 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
6380 + goto badkey;
6381 +
6382 + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
6383 + crypto_ahash_set_flags(auth, crypto_aead_get_flags(tls) &
6384 + CRYPTO_TFM_REQ_MASK);
6385 + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
6386 + crypto_aead_set_flags(tls, crypto_ahash_get_flags(auth) &
6387 + CRYPTO_TFM_RES_MASK);
6388 +
6389 + if (err)
6390 + goto out;
6391 +
6392 + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
6393 + crypto_skcipher_set_flags(enc, crypto_aead_get_flags(tls) &
6394 + CRYPTO_TFM_REQ_MASK);
6395 + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen);
6396 + crypto_aead_set_flags(tls, crypto_skcipher_get_flags(enc) &
6397 + CRYPTO_TFM_RES_MASK);
6398 +
6399 +out:
6400 + return err;
6401 +
6402 +badkey:
6403 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
6404 + goto out;
6405 +}
6406 +
6407 +/**
6408 + * crypto_tls_genicv - Calculate hmac digest for a TLS record
6409 + * @hash: (output) buffer to save the digest into
6410 + * @src: (input) scatterlist with the assoc and payload data
6411 + * @srclen: (input) size of the source buffer (assoclen + cryptlen)
6412 + * @req: (input) aead request
6413 + **/
6414 +static int crypto_tls_genicv(u8 *hash, struct scatterlist *src,
6415 + unsigned int srclen, struct aead_request *req)
6416 +{
6417 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6418 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6419 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6420 + struct async_op ahash_op;
6421 + struct ahash_request *ahreq = (void *)(treq_ctx->tail + ctx->reqoff);
6422 + unsigned int flags = CRYPTO_TFM_REQ_MAY_SLEEP;
6423 + int err = -EBADMSG;
6424 +
6425 + /* Bail out if the request assoc len is 0 */
6426 + if (!req->assoclen)
6427 + return err;
6428 +
6429 + init_completion(&ahash_op.completion);
6430 +
6431 + /* the hash transform to be executed comes from the original request */
6432 + ahash_request_set_tfm(ahreq, ctx->auth);
6433 + /* prepare the hash request with input data and result pointer */
6434 + ahash_request_set_crypt(ahreq, src, hash, srclen);
6435 + /* set the notifier for when the async hash function returns */
6436 + ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
6437 + tls_async_op_done, &ahash_op);
6438 +
6439 + /* Calculate the digest on the given data. The result is put in hash */
6440 + err = crypto_ahash_digest(ahreq);
6441 + if (err == -EINPROGRESS) {
6442 + err = wait_for_completion_interruptible(&ahash_op.completion);
6443 + if (!err)
6444 + err = ahash_op.err;
6445 + }
6446 +
6447 + return err;
6448 +}
6449 +
6450 +/**
6451 + * crypto_tls_gen_padicv - Calculate and pad hmac digest for a TLS record
6452 + * @hash: (output) buffer to save the digest and padding into
6453 + * @phashlen: (output) the size of digest + padding
6454 + * @req: (input) aead request
6455 + **/
6456 +static int crypto_tls_gen_padicv(u8 *hash, unsigned int *phashlen,
6457 + struct aead_request *req)
6458 +{
6459 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6460 + unsigned int hash_size = crypto_aead_authsize(tls);
6461 + unsigned int block_size = crypto_aead_blocksize(tls);
6462 + unsigned int srclen = req->cryptlen + hash_size;
6463 + unsigned int icvlen = req->cryptlen + req->assoclen;
6464 + unsigned int padlen;
6465 + int err;
6466 +
6467 + err = crypto_tls_genicv(hash, req->src, icvlen, req);
6468 + if (err)
6469 + goto out;
6470 +
6471 + /* add padding after digest */
6472 + padlen = block_size - (srclen % block_size);
6473 + memset(hash + hash_size, padlen - 1, padlen);
6474 +
6475 + *phashlen = hash_size + padlen;
6476 +out:
6477 + return err;
6478 +}
6479 +
6480 +static int crypto_tls_copy_data(struct aead_request *req,
6481 + struct scatterlist *src,
6482 + struct scatterlist *dst,
6483 + unsigned int len)
6484 +{
6485 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6486 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6487 + SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
6488 +
6489 + skcipher_request_set_tfm(skreq, ctx->null);
6490 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6491 + NULL, NULL);
6492 + skcipher_request_set_crypt(skreq, src, dst, len, NULL);
6493 +
6494 + return crypto_skcipher_encrypt(skreq);
6495 +}
6496 +
6497 +static int crypto_tls_encrypt(struct aead_request *req)
6498 +{
6499 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6500 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6501 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6502 + struct skcipher_request *skreq;
6503 + struct scatterlist *cipher = treq_ctx->cipher;
6504 + struct scatterlist *tmp = treq_ctx->tmp;
6505 + struct scatterlist *sg, *src, *dst;
6506 + unsigned int cryptlen, phashlen;
6507 + u8 *hash = treq_ctx->tail;
6508 + int err;
6509 +
6510 + /*
6511 + * The hash result is saved at the beginning of the tls request ctx
6512 + * and is aligned as required by the hash transform. Enough space was
6513 + * allocated in crypto_tls_init_tfm to accommodate the difference. The
6514 + * requests themselves start later at treq_ctx->tail + ctx->reqoff so
6515 + * the result is not overwritten by the second (cipher) request.
6516 + */
6517 + hash = (u8 *)ALIGN((unsigned long)hash +
6518 + crypto_ahash_alignmask(ctx->auth),
6519 + crypto_ahash_alignmask(ctx->auth) + 1);
6520 +
6521 + /*
6522 + * STEP 1: create ICV together with necessary padding
6523 + */
6524 + err = crypto_tls_gen_padicv(hash, &phashlen, req);
6525 + if (err)
6526 + return err;
6527 +
6528 + /*
6529 + * STEP 2: Hash and padding are combined with the payload
6530 + * depending on the form it arrives. Scatter tables must have at least
6531 + * one page of data before chaining with another table and can't have
6532 + * an empty data page. The following code addresses these requirements.
6533 + *
6534 + * If the payload is empty, only the hash is encrypted, otherwise the
6535 + * payload scatterlist is merged with the hash. A special merging case
6536 + * is when the payload has only one page of data. In that case the
6537 + * payload page is moved to another scatterlist and prepared there for
6538 + * encryption.
6539 + */
6540 + if (req->cryptlen) {
6541 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6542 +
6543 + sg_init_table(cipher, 2);
6544 + sg_set_buf(cipher + 1, hash, phashlen);
6545 +
6546 + if (sg_is_last(src)) {
6547 + sg_set_page(cipher, sg_page(src), req->cryptlen,
6548 + src->offset);
6549 + src = cipher;
6550 + } else {
6551 + unsigned int rem_len = req->cryptlen;
6552 +
6553 + for (sg = src; rem_len > sg->length; sg = sg_next(sg))
6554 + rem_len -= min(rem_len, sg->length);
6555 +
6556 + sg_set_page(cipher, sg_page(sg), rem_len, sg->offset);
6557 + sg_chain(sg, 1, cipher);
6558 + }
6559 + } else {
6560 + sg_init_one(cipher, hash, phashlen);
6561 + src = cipher;
6562 + }
6563 +
6564 + /**
6565 + * If src != dst copy the associated data from source to destination.
6566 + * In both cases fast-forward passed the associated data in the dest.
6567 + */
6568 + if (req->src != req->dst) {
6569 + err = crypto_tls_copy_data(req, req->src, req->dst,
6570 + req->assoclen);
6571 + if (err)
6572 + return err;
6573 + }
6574 + dst = scatterwalk_ffwd(treq_ctx->dst, req->dst, req->assoclen);
6575 +
6576 + /*
6577 + * STEP 3: encrypt the frame and return the result
6578 + */
6579 + cryptlen = req->cryptlen + phashlen;
6580 +
6581 + /*
6582 + * The hash and the cipher are applied at different times and their
6583 + * requests can use the same memory space without interference
6584 + */
6585 + skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6586 + skcipher_request_set_tfm(skreq, ctx->enc);
6587 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6588 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6589 + req->base.complete, req->base.data);
6590 + /*
6591 + * Apply the cipher transform. The result will be in req->dst when the
6592 + * asynchronuous call terminates
6593 + */
6594 + err = crypto_skcipher_encrypt(skreq);
6595 +
6596 + return err;
6597 +}
6598 +
6599 +static int crypto_tls_decrypt(struct aead_request *req)
6600 +{
6601 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
6602 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tls);
6603 + struct tls_request_ctx *treq_ctx = aead_request_ctx(req);
6604 + unsigned int cryptlen = req->cryptlen;
6605 + unsigned int hash_size = crypto_aead_authsize(tls);
6606 + unsigned int block_size = crypto_aead_blocksize(tls);
6607 + struct skcipher_request *skreq = (void *)(treq_ctx->tail + ctx->reqoff);
6608 + struct scatterlist *tmp = treq_ctx->tmp;
6609 + struct scatterlist *src, *dst;
6610 +
6611 + u8 padding[255]; /* padding can be 0-255 bytes */
6612 + u8 pad_size;
6613 + u16 *len_field;
6614 + u8 *ihash, *hash = treq_ctx->tail;
6615 +
6616 + int paderr = 0;
6617 + int err = -EINVAL;
6618 + int i;
6619 + struct async_op ciph_op;
6620 +
6621 + /*
6622 + * Rule out bad packets. The input packet length must be at least one
6623 + * byte more than the hash_size
6624 + */
6625 + if (cryptlen <= hash_size || cryptlen % block_size)
6626 + goto out;
6627 +
6628 + /*
6629 + * Step 1 - Decrypt the source. Fast-forward past the associated data
6630 + * to the encrypted data. The result will be overwritten in place so
6631 + * that the decrypted data will be adjacent to the associated data. The
6632 + * last step (computing the hash) will have it's input data already
6633 + * prepared and ready to be accessed at req->src.
6634 + */
6635 + src = scatterwalk_ffwd(tmp, req->src, req->assoclen);
6636 + dst = src;
6637 +
6638 + init_completion(&ciph_op.completion);
6639 + skcipher_request_set_tfm(skreq, ctx->enc);
6640 + skcipher_request_set_callback(skreq, aead_request_flags(req),
6641 + tls_async_op_done, &ciph_op);
6642 + skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
6643 + err = crypto_skcipher_decrypt(skreq);
6644 + if (err == -EINPROGRESS) {
6645 + err = wait_for_completion_interruptible(&ciph_op.completion);
6646 + if (!err)
6647 + err = ciph_op.err;
6648 + }
6649 + if (err)
6650 + goto out;
6651 +
6652 + /*
6653 + * Step 2 - Verify padding
6654 + * Retrieve the last byte of the payload; this is the padding size.
6655 + */
6656 + cryptlen -= 1;
6657 + scatterwalk_map_and_copy(&pad_size, dst, cryptlen, 1, 0);
6658 +
6659 + /* RFC recommendation for invalid padding size. */
6660 + if (cryptlen < pad_size + hash_size) {
6661 + pad_size = 0;
6662 + paderr = -EBADMSG;
6663 + }
6664 + cryptlen -= pad_size;
6665 + scatterwalk_map_and_copy(padding, dst, cryptlen, pad_size, 0);
6666 +
6667 + /* Padding content must be equal with pad_size. We verify it all */
6668 + for (i = 0; i < pad_size; i++)
6669 + if (padding[i] != pad_size)
6670 + paderr = -EBADMSG;
6671 +
6672 + /*
6673 + * Step 3 - Verify hash
6674 + * Align the digest result as required by the hash transform. Enough
6675 + * space was allocated in crypto_tls_init_tfm
6676 + */
6677 + hash = (u8 *)ALIGN((unsigned long)hash +
6678 + crypto_ahash_alignmask(ctx->auth),
6679 + crypto_ahash_alignmask(ctx->auth) + 1);
6680 + /*
6681 + * Two bytes at the end of the associated data make the length field.
6682 + * It must be updated with the length of the cleartext message before
6683 + * the hash is calculated.
6684 + */
6685 + len_field = sg_virt(req->src) + req->assoclen - 2;
6686 + cryptlen -= hash_size;
6687 + *len_field = htons(cryptlen);
6688 +
6689 + /* This is the hash from the decrypted packet. Save it for later */
6690 + ihash = hash + hash_size;
6691 + scatterwalk_map_and_copy(ihash, dst, cryptlen, hash_size, 0);
6692 +
6693 + /* Now compute and compare our ICV with the one from the packet */
6694 + err = crypto_tls_genicv(hash, req->src, cryptlen + req->assoclen, req);
6695 + if (!err)
6696 + err = memcmp(hash, ihash, hash_size) ? -EBADMSG : 0;
6697 +
6698 + if (req->src != req->dst) {
6699 + err = crypto_tls_copy_data(req, req->src, req->dst, cryptlen +
6700 + req->assoclen);
6701 + if (err)
6702 + goto out;
6703 + }
6704 +
6705 + /* return the first found error */
6706 + if (paderr)
6707 + err = paderr;
6708 +
6709 +out:
6710 + aead_request_complete(req, err);
6711 + return err;
6712 +}
6713 +
6714 +static int crypto_tls_init_tfm(struct crypto_aead *tfm)
6715 +{
6716 + struct aead_instance *inst = aead_alg_instance(tfm);
6717 + struct tls_instance_ctx *ictx = aead_instance_ctx(inst);
6718 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6719 + struct crypto_ahash *auth;
6720 + struct crypto_skcipher *enc;
6721 + struct crypto_skcipher *null;
6722 + int err;
6723 +
6724 + auth = crypto_spawn_ahash(&ictx->auth);
6725 + if (IS_ERR(auth))
6726 + return PTR_ERR(auth);
6727 +
6728 + enc = crypto_spawn_skcipher(&ictx->enc);
6729 + err = PTR_ERR(enc);
6730 + if (IS_ERR(enc))
6731 + goto err_free_ahash;
6732 +
6733 + null = crypto_get_default_null_skcipher2();
6734 + err = PTR_ERR(null);
6735 + if (IS_ERR(null))
6736 + goto err_free_skcipher;
6737 +
6738 + ctx->auth = auth;
6739 + ctx->enc = enc;
6740 + ctx->null = null;
6741 +
6742 + /*
6743 + * Allow enough space for two digests. The two digests will be compared
6744 + * during the decryption phase. One will come from the decrypted packet
6745 + * and the other will be calculated. For encryption, one digest is
6746 + * padded (up to a cipher blocksize) and chained with the payload
6747 + */
6748 + ctx->reqoff = ALIGN(crypto_ahash_digestsize(auth) +
6749 + crypto_ahash_alignmask(auth),
6750 + crypto_ahash_alignmask(auth) + 1) +
6751 + max(crypto_ahash_digestsize(auth),
6752 + crypto_skcipher_blocksize(enc));
6753 +
6754 + crypto_aead_set_reqsize(tfm,
6755 + sizeof(struct tls_request_ctx) +
6756 + ctx->reqoff +
6757 + max_t(unsigned int,
6758 + crypto_ahash_reqsize(auth) +
6759 + sizeof(struct ahash_request),
6760 + crypto_skcipher_reqsize(enc) +
6761 + sizeof(struct skcipher_request)));
6762 +
6763 + return 0;
6764 +
6765 +err_free_skcipher:
6766 + crypto_free_skcipher(enc);
6767 +err_free_ahash:
6768 + crypto_free_ahash(auth);
6769 + return err;
6770 +}
6771 +
6772 +static void crypto_tls_exit_tfm(struct crypto_aead *tfm)
6773 +{
6774 + struct crypto_tls_ctx *ctx = crypto_aead_ctx(tfm);
6775 +
6776 + crypto_free_ahash(ctx->auth);
6777 + crypto_free_skcipher(ctx->enc);
6778 + crypto_put_default_null_skcipher2();
6779 +}
6780 +
6781 +static void crypto_tls_free(struct aead_instance *inst)
6782 +{
6783 + struct tls_instance_ctx *ctx = aead_instance_ctx(inst);
6784 +
6785 + crypto_drop_skcipher(&ctx->enc);
6786 + crypto_drop_ahash(&ctx->auth);
6787 + kfree(inst);
6788 +}
6789 +
6790 +static int crypto_tls_create(struct crypto_template *tmpl, struct rtattr **tb)
6791 +{
6792 + struct crypto_attr_type *algt;
6793 + struct aead_instance *inst;
6794 + struct hash_alg_common *auth;
6795 + struct crypto_alg *auth_base;
6796 + struct skcipher_alg *enc;
6797 + struct tls_instance_ctx *ctx;
6798 + const char *enc_name;
6799 + int err;
6800 +
6801 + algt = crypto_get_attr_type(tb);
6802 + if (IS_ERR(algt))
6803 + return PTR_ERR(algt);
6804 +
6805 + if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
6806 + return -EINVAL;
6807 +
6808 + auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
6809 + CRYPTO_ALG_TYPE_AHASH_MASK |
6810 + crypto_requires_sync(algt->type, algt->mask));
6811 + if (IS_ERR(auth))
6812 + return PTR_ERR(auth);
6813 +
6814 + auth_base = &auth->base;
6815 +
6816 + enc_name = crypto_attr_alg_name(tb[2]);
6817 + err = PTR_ERR(enc_name);
6818 + if (IS_ERR(enc_name))
6819 + goto out_put_auth;
6820 +
6821 + inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
6822 + err = -ENOMEM;
6823 + if (!inst)
6824 + goto out_put_auth;
6825 +
6826 + ctx = aead_instance_ctx(inst);
6827 +
6828 + err = crypto_init_ahash_spawn(&ctx->auth, auth,
6829 + aead_crypto_instance(inst));
6830 + if (err)
6831 + goto err_free_inst;
6832 +
6833 + crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
6834 + err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
6835 + crypto_requires_sync(algt->type,
6836 + algt->mask));
6837 + if (err)
6838 + goto err_drop_auth;
6839 +
6840 + enc = crypto_spawn_skcipher_alg(&ctx->enc);
6841 +
6842 + err = -ENAMETOOLONG;
6843 + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
6844 + "tls10(%s,%s)", auth_base->cra_name,
6845 + enc->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
6846 + goto err_drop_enc;
6847 +
6848 + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
6849 + "tls10(%s,%s)", auth_base->cra_driver_name,
6850 + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
6851 + goto err_drop_enc;
6852 +
6853 + inst->alg.base.cra_flags = (auth_base->cra_flags |
6854 + enc->base.cra_flags) & CRYPTO_ALG_ASYNC;
6855 + inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
6856 + auth_base->cra_priority;
6857 + inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
6858 + inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
6859 + enc->base.cra_alignmask;
6860 + inst->alg.base.cra_ctxsize = sizeof(struct crypto_tls_ctx);
6861 +
6862 + inst->alg.ivsize = crypto_skcipher_alg_ivsize(enc);
6863 + inst->alg.chunksize = crypto_skcipher_alg_chunksize(enc);
6864 + inst->alg.maxauthsize = auth->digestsize;
6865 +
6866 + inst->alg.init = crypto_tls_init_tfm;
6867 + inst->alg.exit = crypto_tls_exit_tfm;
6868 +
6869 + inst->alg.setkey = crypto_tls_setkey;
6870 + inst->alg.encrypt = crypto_tls_encrypt;
6871 + inst->alg.decrypt = crypto_tls_decrypt;
6872 +
6873 + inst->free = crypto_tls_free;
6874 +
6875 + err = aead_register_instance(tmpl, inst);
6876 + if (err)
6877 + goto err_drop_enc;
6878 +
6879 +out:
6880 + crypto_mod_put(auth_base);
6881 + return err;
6882 +
6883 +err_drop_enc:
6884 + crypto_drop_skcipher(&ctx->enc);
6885 +err_drop_auth:
6886 + crypto_drop_ahash(&ctx->auth);
6887 +err_free_inst:
6888 + kfree(inst);
6889 +out_put_auth:
6890 + goto out;
6891 +}
6892 +
6893 +static struct crypto_template crypto_tls_tmpl = {
6894 + .name = "tls10",
6895 + .create = crypto_tls_create,
6896 + .module = THIS_MODULE,
6897 +};
6898 +
6899 +static int __init crypto_tls_module_init(void)
6900 +{
6901 + return crypto_register_template(&crypto_tls_tmpl);
6902 +}
6903 +
6904 +static void __exit crypto_tls_module_exit(void)
6905 +{
6906 + crypto_unregister_template(&crypto_tls_tmpl);
6907 +}
6908 +
6909 +module_init(crypto_tls_module_init);
6910 +module_exit(crypto_tls_module_exit);
6911 +
6912 +MODULE_LICENSE("GPL");
6913 +MODULE_DESCRIPTION("TLS 1.0 record encryption");
6914 --- a/drivers/crypto/caam/Kconfig
6915 +++ b/drivers/crypto/caam/Kconfig
6916 @@ -1,6 +1,11 @@
6917 +config CRYPTO_DEV_FSL_CAAM_COMMON
6918 + tristate
6919 +
6920 config CRYPTO_DEV_FSL_CAAM
6921 - tristate "Freescale CAAM-Multicore driver backend"
6922 + tristate "Freescale CAAM-Multicore platform driver backend"
6923 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
6924 + select CRYPTO_DEV_FSL_CAAM_COMMON
6925 + select SOC_BUS
6926 help
6927 Enables the driver module for Freescale's Cryptographic Accelerator
6928 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
6929 @@ -11,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
6930 To compile this driver as a module, choose M here: the module
6931 will be called caam.
6932
6933 +if CRYPTO_DEV_FSL_CAAM
6934 +
6935 +config CRYPTO_DEV_FSL_CAAM_DEBUG
6936 + bool "Enable debug output in CAAM driver"
6937 + help
6938 + Selecting this will enable printing of various debug
6939 + information in the CAAM driver.
6940 +
6941 config CRYPTO_DEV_FSL_CAAM_JR
6942 tristate "Freescale CAAM Job Ring driver backend"
6943 - depends on CRYPTO_DEV_FSL_CAAM
6944 default y
6945 help
6946 Enables the driver module for Job Rings which are part of
6947 @@ -24,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
6948 To compile this driver as a module, choose M here: the module
6949 will be called caam_jr.
6950
6951 +if CRYPTO_DEV_FSL_CAAM_JR
6952 +
6953 config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6954 int "Job Ring size"
6955 - depends on CRYPTO_DEV_FSL_CAAM_JR
6956 range 2 9
6957 default "9"
6958 help
6959 @@ -44,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
6960
6961 config CRYPTO_DEV_FSL_CAAM_INTC
6962 bool "Job Ring interrupt coalescing"
6963 - depends on CRYPTO_DEV_FSL_CAAM_JR
6964 help
6965 Enable the Job Ring's interrupt coalescing feature.
6966
6967 @@ -74,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
6968
6969 config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6970 tristate "Register algorithm implementations with the Crypto API"
6971 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6972 default y
6973 select CRYPTO_AEAD
6974 select CRYPTO_AUTHENC
6975 @@ -87,9 +98,25 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
6976 To compile this as a module, choose M here: the module
6977 will be called caamalg.
6978
6979 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
6980 + tristate "Queue Interface as Crypto API backend"
6981 + depends on FSL_SDK_DPA && NET
6982 + default y
6983 + select CRYPTO_AUTHENC
6984 + select CRYPTO_BLKCIPHER
6985 + help
6986 + Selecting this will use CAAM Queue Interface (QI) for sending
6987 + & receiving crypto jobs to/from CAAM. This gives better performance
6988 + than job ring interface when the number of cores are more than the
6989 + number of job rings assigned to the kernel. The number of portals
6990 + assigned to the kernel should also be more than the number of
6991 + job rings.
6992 +
6993 + To compile this as a module, choose M here: the module
6994 + will be called caamalg_qi.
6995 +
6996 config CRYPTO_DEV_FSL_CAAM_AHASH_API
6997 tristate "Register hash algorithm implementations with Crypto API"
6998 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
6999 default y
7000 select CRYPTO_HASH
7001 help
7002 @@ -101,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
7003
7004 config CRYPTO_DEV_FSL_CAAM_PKC_API
7005 tristate "Register public key cryptography implementations with Crypto API"
7006 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7007 default y
7008 select CRYPTO_RSA
7009 help
7010 @@ -113,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
7011
7012 config CRYPTO_DEV_FSL_CAAM_RNG_API
7013 tristate "Register caam device for hwrng API"
7014 - depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
7015 default y
7016 select CRYPTO_RNG
7017 select HW_RANDOM
7018 @@ -124,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
7019 To compile this as a module, choose M here: the module
7020 will be called caamrng.
7021
7022 -config CRYPTO_DEV_FSL_CAAM_IMX
7023 - def_bool SOC_IMX6 || SOC_IMX7D
7024 - depends on CRYPTO_DEV_FSL_CAAM
7025 +endif # CRYPTO_DEV_FSL_CAAM_JR
7026
7027 -config CRYPTO_DEV_FSL_CAAM_DEBUG
7028 - bool "Enable debug output in CAAM driver"
7029 - depends on CRYPTO_DEV_FSL_CAAM
7030 - help
7031 - Selecting this will enable printing of various debug
7032 - information in the CAAM driver.
7033 +endif # CRYPTO_DEV_FSL_CAAM
7034 +
7035 +config CRYPTO_DEV_FSL_DPAA2_CAAM
7036 + tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
7037 + depends on FSL_MC_DPIO
7038 + select CRYPTO_DEV_FSL_CAAM_COMMON
7039 + select CRYPTO_BLKCIPHER
7040 + select CRYPTO_AUTHENC
7041 + select CRYPTO_AEAD
7042 + select CRYPTO_HASH
7043 + ---help---
7044 + CAAM driver for QorIQ Data Path Acceleration Architecture 2.
7045 + It handles DPSECI DPAA2 objects that sit on the Management Complex
7046 + (MC) fsl-mc bus.
7047 +
7048 + To compile this as a module, choose M here: the module
7049 + will be called dpaa2_caam.
7050 +
7051 +config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
7052 + def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
7053 + CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
7054 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7055 +
7056 +config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
7057 + def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
7058 + CRYPTO_DEV_FSL_DPAA2_CAAM)
7059 --- a/drivers/crypto/caam/Makefile
7060 +++ b/drivers/crypto/caam/Makefile
7061 @@ -5,13 +5,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
7062 ccflags-y := -DDEBUG
7063 endif
7064
7065 +ccflags-y += -DVERSION=\"\"
7066 +
7067 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
7068 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
7069 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
7070 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
7071 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
7072 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
7073 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
7074 +obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
7075 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
7076 obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
7077
7078 caam-objs := ctrl.o
7079 -caam_jr-objs := jr.o key_gen.o error.o
7080 +caam_jr-objs := jr.o key_gen.o
7081 caam_pkc-y := caampkc.o pkc_desc.o
7082 +ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
7083 + ccflags-y += -DCONFIG_CAAM_QI
7084 + caam-objs += qi.o
7085 +endif
7086 +
7087 +obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
7088 +
7089 +dpaa2_caam-y := caamalg_qi2.o dpseci.o
7090 --- a/drivers/crypto/caam/caamalg.c
7091 +++ b/drivers/crypto/caam/caamalg.c
7092 @@ -2,6 +2,7 @@
7093 * caam - Freescale FSL CAAM support for crypto API
7094 *
7095 * Copyright 2008-2011 Freescale Semiconductor, Inc.
7096 + * Copyright 2016 NXP
7097 *
7098 * Based on talitos crypto API driver.
7099 *
7100 @@ -53,6 +54,7 @@
7101 #include "error.h"
7102 #include "sg_sw_sec4.h"
7103 #include "key_gen.h"
7104 +#include "caamalg_desc.h"
7105
7106 /*
7107 * crypto alg
7108 @@ -62,8 +64,6 @@
7109 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
7110 CTR_RFC3686_NONCE_SIZE + \
7111 SHA512_DIGEST_SIZE * 2)
7112 -/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
7113 -#define CAAM_MAX_IV_LENGTH 16
7114
7115 #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
7116 #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7117 @@ -71,37 +71,6 @@
7118 #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
7119 CAAM_CMD_SZ * 5)
7120
7121 -/* length of descriptors text */
7122 -#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
7123 -#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
7124 -#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
7125 -#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 10 * CAAM_CMD_SZ)
7126 -
7127 -/* Note: Nonce is counted in enckeylen */
7128 -#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
7129 -
7130 -#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
7131 -#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
7132 -#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
7133 -
7134 -#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
7135 -#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
7136 -#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
7137 -
7138 -#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
7139 -#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7140 -#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
7141 -
7142 -#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
7143 -#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
7144 -#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
7145 -
7146 -#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
7147 -#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
7148 - 20 * CAAM_CMD_SZ)
7149 -#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
7150 - 15 * CAAM_CMD_SZ)
7151 -
7152 #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
7153 #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
7154
7155 @@ -112,47 +81,11 @@
7156 #define debug(format, arg...)
7157 #endif
7158
7159 -#ifdef DEBUG
7160 -#include <linux/highmem.h>
7161 -
7162 -static void dbg_dump_sg(const char *level, const char *prefix_str,
7163 - int prefix_type, int rowsize, int groupsize,
7164 - struct scatterlist *sg, size_t tlen, bool ascii,
7165 - bool may_sleep)
7166 -{
7167 - struct scatterlist *it;
7168 - void *it_page;
7169 - size_t len;
7170 - void *buf;
7171 -
7172 - for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
7173 - /*
7174 - * make sure the scatterlist's page
7175 - * has a valid virtual memory mapping
7176 - */
7177 - it_page = kmap_atomic(sg_page(it));
7178 - if (unlikely(!it_page)) {
7179 - printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
7180 - return;
7181 - }
7182 -
7183 - buf = it_page + it->offset;
7184 - len = min_t(size_t, tlen, it->length);
7185 - print_hex_dump(level, prefix_str, prefix_type, rowsize,
7186 - groupsize, buf, len, ascii);
7187 - tlen -= len;
7188 -
7189 - kunmap_atomic(it_page);
7190 - }
7191 -}
7192 -#endif
7193 -
7194 static struct list_head alg_list;
7195
7196 struct caam_alg_entry {
7197 int class1_alg_type;
7198 int class2_alg_type;
7199 - int alg_op;
7200 bool rfc3686;
7201 bool geniv;
7202 };
7203 @@ -163,302 +96,70 @@ struct caam_aead_alg {
7204 bool registered;
7205 };
7206
7207 -/* Set DK bit in class 1 operation if shared */
7208 -static inline void append_dec_op1(u32 *desc, u32 type)
7209 -{
7210 - u32 *jump_cmd, *uncond_jump_cmd;
7211 -
7212 - /* DK bit is valid only for AES */
7213 - if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
7214 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7215 - OP_ALG_DECRYPT);
7216 - return;
7217 - }
7218 -
7219 - jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
7220 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7221 - OP_ALG_DECRYPT);
7222 - uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7223 - set_jump_tgt_here(desc, jump_cmd);
7224 - append_operation(desc, type | OP_ALG_AS_INITFINAL |
7225 - OP_ALG_DECRYPT | OP_ALG_AAI_DK);
7226 - set_jump_tgt_here(desc, uncond_jump_cmd);
7227 -}
7228 -
7229 -/*
7230 - * For aead functions, read payload and write payload,
7231 - * both of which are specified in req->src and req->dst
7232 - */
7233 -static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
7234 -{
7235 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7236 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
7237 - KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
7238 -}
7239 -
7240 -/*
7241 - * For ablkcipher encrypt and decrypt, read from req->src and
7242 - * write to req->dst
7243 - */
7244 -static inline void ablkcipher_append_src_dst(u32 *desc)
7245 -{
7246 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7247 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7248 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
7249 - KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7250 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7251 -}
7252 -
7253 /*
7254 * per-session context
7255 */
7256 struct caam_ctx {
7257 - struct device *jrdev;
7258 u32 sh_desc_enc[DESC_MAX_USED_LEN];
7259 u32 sh_desc_dec[DESC_MAX_USED_LEN];
7260 u32 sh_desc_givenc[DESC_MAX_USED_LEN];
7261 + u8 key[CAAM_MAX_KEY_SIZE];
7262 dma_addr_t sh_desc_enc_dma;
7263 dma_addr_t sh_desc_dec_dma;
7264 dma_addr_t sh_desc_givenc_dma;
7265 - u32 class1_alg_type;
7266 - u32 class2_alg_type;
7267 - u32 alg_op;
7268 - u8 key[CAAM_MAX_KEY_SIZE];
7269 dma_addr_t key_dma;
7270 - unsigned int enckeylen;
7271 - unsigned int split_key_len;
7272 - unsigned int split_key_pad_len;
7273 + struct device *jrdev;
7274 + struct alginfo adata;
7275 + struct alginfo cdata;
7276 unsigned int authsize;
7277 };
7278
7279 -static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
7280 - int keys_fit_inline, bool is_rfc3686)
7281 -{
7282 - u32 *nonce;
7283 - unsigned int enckeylen = ctx->enckeylen;
7284 -
7285 - /*
7286 - * RFC3686 specific:
7287 - * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
7288 - * | enckeylen = encryption key size + nonce size
7289 - */
7290 - if (is_rfc3686)
7291 - enckeylen -= CTR_RFC3686_NONCE_SIZE;
7292 -
7293 - if (keys_fit_inline) {
7294 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7295 - ctx->split_key_len, CLASS_2 |
7296 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7297 - append_key_as_imm(desc, (void *)ctx->key +
7298 - ctx->split_key_pad_len, enckeylen,
7299 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7300 - } else {
7301 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7302 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7303 - append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
7304 - enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7305 - }
7306 -
7307 - /* Load Counter into CONTEXT1 reg */
7308 - if (is_rfc3686) {
7309 - nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
7310 - enckeylen);
7311 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
7312 - LDST_CLASS_IND_CCB |
7313 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
7314 - append_move(desc,
7315 - MOVE_SRC_OUTFIFO |
7316 - MOVE_DEST_CLASS1CTX |
7317 - (16 << MOVE_OFFSET_SHIFT) |
7318 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
7319 - }
7320 -}
7321 -
7322 -static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
7323 - int keys_fit_inline, bool is_rfc3686)
7324 -{
7325 - u32 *key_jump_cmd;
7326 -
7327 - /* Note: Context registers are saved. */
7328 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
7329 -
7330 - /* Skip if already shared */
7331 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7332 - JUMP_COND_SHRD);
7333 -
7334 - append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7335 -
7336 - set_jump_tgt_here(desc, key_jump_cmd);
7337 -}
7338 -
7339 static int aead_null_set_sh_desc(struct crypto_aead *aead)
7340 {
7341 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7342 struct device *jrdev = ctx->jrdev;
7343 - bool keys_fit_inline = false;
7344 - u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
7345 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
7346 u32 *desc;
7347 + int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
7348 + ctx->adata.keylen_pad;
7349
7350 /*
7351 * Job Descriptor and Shared Descriptors
7352 * must all fit into the 64-word Descriptor h/w Buffer
7353 */
7354 - if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
7355 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7356 - keys_fit_inline = true;
7357 + if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) {
7358 + ctx->adata.key_inline = true;
7359 + ctx->adata.key_virt = ctx->key;
7360 + } else {
7361 + ctx->adata.key_inline = false;
7362 + ctx->adata.key_dma = ctx->key_dma;
7363 + }
7364
7365 /* aead_encrypt shared descriptor */
7366 desc = ctx->sh_desc_enc;
7367 -
7368 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7369 -
7370 - /* Skip if already shared */
7371 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7372 - JUMP_COND_SHRD);
7373 - if (keys_fit_inline)
7374 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7375 - ctx->split_key_len, CLASS_2 |
7376 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7377 - else
7378 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7379 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7380 - set_jump_tgt_here(desc, key_jump_cmd);
7381 -
7382 - /* assoclen + cryptlen = seqinlen */
7383 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
7384 -
7385 - /* Prepare to read and write cryptlen + assoclen bytes */
7386 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7387 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7388 -
7389 - /*
7390 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7391 - * thus need to do some magic, i.e. self-patch the descriptor
7392 - * buffer.
7393 - */
7394 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7395 - MOVE_DEST_MATH3 |
7396 - (0x6 << MOVE_LEN_SHIFT));
7397 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
7398 - MOVE_DEST_DESCBUF |
7399 - MOVE_WAITCOMP |
7400 - (0x8 << MOVE_LEN_SHIFT));
7401 -
7402 - /* Class 2 operation */
7403 - append_operation(desc, ctx->class2_alg_type |
7404 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7405 -
7406 - /* Read and write cryptlen bytes */
7407 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7408 -
7409 - set_move_tgt_here(desc, read_move_cmd);
7410 - set_move_tgt_here(desc, write_move_cmd);
7411 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7412 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7413 - MOVE_AUX_LS);
7414 -
7415 - /* Write ICV */
7416 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7417 - LDST_SRCDST_BYTE_CONTEXT);
7418 -
7419 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7420 - desc_bytes(desc),
7421 - DMA_TO_DEVICE);
7422 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7423 - dev_err(jrdev, "unable to map shared descriptor\n");
7424 - return -ENOMEM;
7425 - }
7426 -#ifdef DEBUG
7427 - print_hex_dump(KERN_ERR,
7428 - "aead null enc shdesc@"__stringify(__LINE__)": ",
7429 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7430 - desc_bytes(desc), 1);
7431 -#endif
7432 + cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
7433 + ctrlpriv->era);
7434 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7435 + desc_bytes(desc), DMA_TO_DEVICE);
7436
7437 /*
7438 * Job Descriptor and Shared Descriptors
7439 * must all fit into the 64-word Descriptor h/w Buffer
7440 */
7441 - keys_fit_inline = false;
7442 - if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
7443 - ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
7444 - keys_fit_inline = true;
7445 -
7446 - desc = ctx->sh_desc_dec;
7447 + if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) {
7448 + ctx->adata.key_inline = true;
7449 + ctx->adata.key_virt = ctx->key;
7450 + } else {
7451 + ctx->adata.key_inline = false;
7452 + ctx->adata.key_dma = ctx->key_dma;
7453 + }
7454
7455 /* aead_decrypt shared descriptor */
7456 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7457 -
7458 - /* Skip if already shared */
7459 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7460 - JUMP_COND_SHRD);
7461 - if (keys_fit_inline)
7462 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
7463 - ctx->split_key_len, CLASS_2 |
7464 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7465 - else
7466 - append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
7467 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
7468 - set_jump_tgt_here(desc, key_jump_cmd);
7469 -
7470 - /* Class 2 operation */
7471 - append_operation(desc, ctx->class2_alg_type |
7472 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7473 -
7474 - /* assoclen + cryptlen = seqoutlen */
7475 - append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7476 -
7477 - /* Prepare to read and write cryptlen + assoclen bytes */
7478 - append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
7479 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
7480 -
7481 - /*
7482 - * MOVE_LEN opcode is not available in all SEC HW revisions,
7483 - * thus need to do some magic, i.e. self-patch the descriptor
7484 - * buffer.
7485 - */
7486 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
7487 - MOVE_DEST_MATH2 |
7488 - (0x6 << MOVE_LEN_SHIFT));
7489 - write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
7490 - MOVE_DEST_DESCBUF |
7491 - MOVE_WAITCOMP |
7492 - (0x8 << MOVE_LEN_SHIFT));
7493 -
7494 - /* Read and write cryptlen bytes */
7495 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
7496 -
7497 - /*
7498 - * Insert a NOP here, since we need at least 4 instructions between
7499 - * code patching the descriptor buffer and the location being patched.
7500 - */
7501 - jump_cmd = append_jump(desc, JUMP_TEST_ALL);
7502 - set_jump_tgt_here(desc, jump_cmd);
7503 -
7504 - set_move_tgt_here(desc, read_move_cmd);
7505 - set_move_tgt_here(desc, write_move_cmd);
7506 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7507 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
7508 - MOVE_AUX_LS);
7509 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7510 -
7511 - /* Load ICV */
7512 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7513 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7514 -
7515 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7516 - desc_bytes(desc),
7517 - DMA_TO_DEVICE);
7518 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7519 - dev_err(jrdev, "unable to map shared descriptor\n");
7520 - return -ENOMEM;
7521 - }
7522 -#ifdef DEBUG
7523 - print_hex_dump(KERN_ERR,
7524 - "aead null dec shdesc@"__stringify(__LINE__)": ",
7525 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7526 - desc_bytes(desc), 1);
7527 -#endif
7528 + desc = ctx->sh_desc_dec;
7529 + cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
7530 + ctrlpriv->era);
7531 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7532 + desc_bytes(desc), DMA_TO_DEVICE);
7533
7534 return 0;
7535 }
7536 @@ -470,12 +171,12 @@ static int aead_set_sh_desc(struct crypt
7537 unsigned int ivsize = crypto_aead_ivsize(aead);
7538 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7539 struct device *jrdev = ctx->jrdev;
7540 - bool keys_fit_inline;
7541 - u32 geniv, moveiv;
7542 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
7543 u32 ctx1_iv_off = 0;
7544 - u32 *desc;
7545 - u32 *wait_cmd;
7546 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
7547 + u32 *desc, *nonce = NULL;
7548 + u32 inl_mask;
7549 + unsigned int data_len[2];
7550 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
7551 OP_ALG_AAI_CTR_MOD128);
7552 const bool is_rfc3686 = alg->caam.rfc3686;
7553
7554 @@ -483,7 +184,7 @@ static int aead_set_sh_desc(struct crypt
7555 return 0;
7556
7557 /* NULL encryption / decryption */
7558 - if (!ctx->enckeylen)
7559 + if (!ctx->cdata.keylen)
7560 return aead_null_set_sh_desc(aead);
7561
7562 /*
7563 @@ -498,8 +199,14 @@ static int aead_set_sh_desc(struct crypt
7564 * RFC3686 specific:
7565 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
7566 */
7567 - if (is_rfc3686)
7568 + if (is_rfc3686) {
7569 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
7570 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
7571 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
7572 + }
7573 +
7574 + data_len[0] = ctx->adata.keylen_pad;
7575 + data_len[1] = ctx->cdata.keylen;
7576
7577 if (alg->caam.geniv)
7578 goto skip_enc;
7579 @@ -508,146 +215,64 @@ static int aead_set_sh_desc(struct crypt
7580 * Job Descriptor and Shared Descriptors
7581 * must all fit into the 64-word Descriptor h/w Buffer
7582 */
7583 - keys_fit_inline = false;
7584 - if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7585 - ctx->split_key_pad_len + ctx->enckeylen +
7586 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7587 - CAAM_DESC_BYTES_MAX)
7588 - keys_fit_inline = true;
7589 -
7590 - /* aead_encrypt shared descriptor */
7591 - desc = ctx->sh_desc_enc;
7592 -
7593 - /* Note: Context registers are saved. */
7594 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7595 -
7596 - /* Class 2 operation */
7597 - append_operation(desc, ctx->class2_alg_type |
7598 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7599 -
7600 - /* Read and write assoclen bytes */
7601 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7602 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7603 + if (desc_inline_query(DESC_AEAD_ENC_LEN +
7604 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7605 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7606 + ARRAY_SIZE(data_len)) < 0)
7607 + return -EINVAL;
7608
7609 - /* Skip assoc data */
7610 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7611 + if (inl_mask & 1)
7612 + ctx->adata.key_virt = ctx->key;
7613 + else
7614 + ctx->adata.key_dma = ctx->key_dma;
7615
7616 - /* read assoc before reading payload */
7617 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7618 - FIFOLDST_VLF);
7619 + if (inl_mask & 2)
7620 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7621 + else
7622 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7623
7624 - /* Load Counter into CONTEXT1 reg */
7625 - if (is_rfc3686)
7626 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7627 - LDST_SRCDST_BYTE_CONTEXT |
7628 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7629 - LDST_OFFSET_SHIFT));
7630 -
7631 - /* Class 1 operation */
7632 - append_operation(desc, ctx->class1_alg_type |
7633 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7634 -
7635 - /* Read and write cryptlen bytes */
7636 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7637 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7638 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
7639 -
7640 - /* Write ICV */
7641 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7642 - LDST_SRCDST_BYTE_CONTEXT);
7643 + ctx->adata.key_inline = !!(inl_mask & 1);
7644 + ctx->cdata.key_inline = !!(inl_mask & 2);
7645
7646 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7647 - desc_bytes(desc),
7648 - DMA_TO_DEVICE);
7649 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7650 - dev_err(jrdev, "unable to map shared descriptor\n");
7651 - return -ENOMEM;
7652 - }
7653 -#ifdef DEBUG
7654 - print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
7655 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7656 - desc_bytes(desc), 1);
7657 -#endif
7658 + /* aead_encrypt shared descriptor */
7659 + desc = ctx->sh_desc_enc;
7660 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
7661 + ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
7662 + false, ctrlpriv->era);
7663 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7664 + desc_bytes(desc), DMA_TO_DEVICE);
7665
7666 skip_enc:
7667 /*
7668 * Job Descriptor and Shared Descriptors
7669 * must all fit into the 64-word Descriptor h/w Buffer
7670 */
7671 - keys_fit_inline = false;
7672 - if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7673 - ctx->split_key_pad_len + ctx->enckeylen +
7674 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7675 - CAAM_DESC_BYTES_MAX)
7676 - keys_fit_inline = true;
7677 -
7678 - /* aead_decrypt shared descriptor */
7679 - desc = ctx->sh_desc_dec;
7680 -
7681 - /* Note: Context registers are saved. */
7682 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7683 -
7684 - /* Class 2 operation */
7685 - append_operation(desc, ctx->class2_alg_type |
7686 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
7687 + if (desc_inline_query(DESC_AEAD_DEC_LEN +
7688 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7689 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7690 + ARRAY_SIZE(data_len)) < 0)
7691 + return -EINVAL;
7692
7693 - /* Read and write assoclen bytes */
7694 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7695 - if (alg->caam.geniv)
7696 - append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
7697 + if (inl_mask & 1)
7698 + ctx->adata.key_virt = ctx->key;
7699 else
7700 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7701 + ctx->adata.key_dma = ctx->key_dma;
7702
7703 - /* Skip assoc data */
7704 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7705 -
7706 - /* read assoc before reading payload */
7707 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7708 - KEY_VLF);
7709 -
7710 - if (alg->caam.geniv) {
7711 - append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
7712 - LDST_SRCDST_BYTE_CONTEXT |
7713 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
7714 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
7715 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
7716 - }
7717 -
7718 - /* Load Counter into CONTEXT1 reg */
7719 - if (is_rfc3686)
7720 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7721 - LDST_SRCDST_BYTE_CONTEXT |
7722 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7723 - LDST_OFFSET_SHIFT));
7724 -
7725 - /* Choose operation */
7726 - if (ctr_mode)
7727 - append_operation(desc, ctx->class1_alg_type |
7728 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
7729 + if (inl_mask & 2)
7730 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7731 else
7732 - append_dec_op1(desc, ctx->class1_alg_type);
7733 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7734
7735 - /* Read and write cryptlen bytes */
7736 - append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7737 - append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
7738 - aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
7739 -
7740 - /* Load ICV */
7741 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
7742 - FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
7743 + ctx->adata.key_inline = !!(inl_mask & 1);
7744 + ctx->cdata.key_inline = !!(inl_mask & 2);
7745
7746 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
7747 - desc_bytes(desc),
7748 - DMA_TO_DEVICE);
7749 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
7750 - dev_err(jrdev, "unable to map shared descriptor\n");
7751 - return -ENOMEM;
7752 - }
7753 -#ifdef DEBUG
7754 - print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
7755 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7756 - desc_bytes(desc), 1);
7757 -#endif
7758 + /* aead_decrypt shared descriptor */
7759 + desc = ctx->sh_desc_dec;
7760 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
7761 + ctx->authsize, alg->caam.geniv, is_rfc3686,
7762 + nonce, ctx1_iv_off, false, ctrlpriv->era);
7763 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
7764 + desc_bytes(desc), DMA_TO_DEVICE);
7765
7766 if (!alg->caam.geniv)
7767 goto skip_givenc;
7768 @@ -656,115 +281,32 @@ skip_enc:
7769 * Job Descriptor and Shared Descriptors
7770 * must all fit into the 64-word Descriptor h/w Buffer
7771 */
7772 - keys_fit_inline = false;
7773 - if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
7774 - ctx->split_key_pad_len + ctx->enckeylen +
7775 - (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
7776 - CAAM_DESC_BYTES_MAX)
7777 - keys_fit_inline = true;
7778 -
7779 - /* aead_givencrypt shared descriptor */
7780 - desc = ctx->sh_desc_enc;
7781 -
7782 - /* Note: Context registers are saved. */
7783 - init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
7784 -
7785 - if (is_rfc3686)
7786 - goto copy_iv;
7787 -
7788 - /* Generate IV */
7789 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
7790 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
7791 - NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7792 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
7793 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7794 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
7795 - append_move(desc, MOVE_WAITCOMP |
7796 - MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
7797 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7798 - (ivsize << MOVE_LEN_SHIFT));
7799 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
7800 -
7801 -copy_iv:
7802 - /* Copy IV to class 1 context */
7803 - append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
7804 - (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
7805 - (ivsize << MOVE_LEN_SHIFT));
7806 -
7807 - /* Return to encryption */
7808 - append_operation(desc, ctx->class2_alg_type |
7809 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7810 -
7811 - /* Read and write assoclen bytes */
7812 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7813 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7814 -
7815 - /* ivsize + cryptlen = seqoutlen - authsize */
7816 - append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
7817 -
7818 - /* Skip assoc data */
7819 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7820 -
7821 - /* read assoc before reading payload */
7822 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
7823 - KEY_VLF);
7824 -
7825 - /* Copy iv from outfifo to class 2 fifo */
7826 - moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
7827 - NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
7828 - append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
7829 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
7830 - append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
7831 - LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
7832 -
7833 - /* Load Counter into CONTEXT1 reg */
7834 - if (is_rfc3686)
7835 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
7836 - LDST_SRCDST_BYTE_CONTEXT |
7837 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
7838 - LDST_OFFSET_SHIFT));
7839 -
7840 - /* Class 1 operation */
7841 - append_operation(desc, ctx->class1_alg_type |
7842 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7843 -
7844 - /* Will write ivsize + cryptlen */
7845 - append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7846 -
7847 - /* Not need to reload iv */
7848 - append_seq_fifo_load(desc, ivsize,
7849 - FIFOLD_CLASS_SKIP);
7850 + if (desc_inline_query(DESC_AEAD_GIVENC_LEN +
7851 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
7852 + AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
7853 + ARRAY_SIZE(data_len)) < 0)
7854 + return -EINVAL;
7855
7856 - /* Will read cryptlen */
7857 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7858 + if (inl_mask & 1)
7859 + ctx->adata.key_virt = ctx->key;
7860 + else
7861 + ctx->adata.key_dma = ctx->key_dma;
7862
7863 - /*
7864 - * Wait for IV transfer (ofifo -> class2) to finish before starting
7865 - * ciphertext transfer (ofifo -> external memory).
7866 - */
7867 - wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
7868 - set_jump_tgt_here(desc, wait_cmd);
7869 + if (inl_mask & 2)
7870 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
7871 + else
7872 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
7873
7874 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
7875 - FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
7876 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
7877 -
7878 - /* Write ICV */
7879 - append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
7880 - LDST_SRCDST_BYTE_CONTEXT);
7881 + ctx->adata.key_inline = !!(inl_mask & 1);
7882 + ctx->cdata.key_inline = !!(inl_mask & 2);
7883
7884 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
7885 - desc_bytes(desc),
7886 - DMA_TO_DEVICE);
7887 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
7888 - dev_err(jrdev, "unable to map shared descriptor\n");
7889 - return -ENOMEM;
7890 - }
7891 -#ifdef DEBUG
7892 - print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
7893 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
7894 - desc_bytes(desc), 1);
7895 -#endif
7896 + /* aead_givencrypt shared descriptor */
7897 + desc = ctx->sh_desc_enc;
7898 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
7899 + ctx->authsize, is_rfc3686, nonce,
7900 + ctx1_iv_off, false, ctrlpriv->era);
7901 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
7902 + desc_bytes(desc), DMA_TO_DEVICE);
7903
7904 skip_givenc:
7905 return 0;
7906 @@ -785,12 +327,12 @@ static int gcm_set_sh_desc(struct crypto
7907 {
7908 struct caam_ctx *ctx = crypto_aead_ctx(aead);
7909 struct device *jrdev = ctx->jrdev;
7910 - bool keys_fit_inline = false;
7911 - u32 *key_jump_cmd, *zero_payload_jump_cmd,
7912 - *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
7913 + unsigned int ivsize = crypto_aead_ivsize(aead);
7914 u32 *desc;
7915 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
7916 + ctx->cdata.keylen;
7917
7918 - if (!ctx->enckeylen || !ctx->authsize)
7919 + if (!ctx->cdata.keylen || !ctx->authsize)
7920 return 0;
7921
7922 /*
7923 @@ -798,175 +340,35 @@ static int gcm_set_sh_desc(struct crypto
7924 * Job Descriptor and Shared Descriptor
7925 * must fit into the 64-word Descriptor h/w Buffer
7926 */
7927 - if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
7928 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
7929 - keys_fit_inline = true;
7930 + if (rem_bytes >= DESC_GCM_ENC_LEN) {
7931 + ctx->cdata.key_inline = true;
7932 + ctx->cdata.key_virt = ctx->key;
7933 + } else {
7934 + ctx->cdata.key_inline = false;
7935 + ctx->cdata.key_dma = ctx->key_dma;
7936 + }
7937
7938 desc = ctx->sh_desc_enc;
7939 -
7940 - init_sh_desc(desc, HDR_SHARE_SERIAL);
7941 -
7942 - /* skip key loading if they are loaded due to sharing */
7943 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
7944 - JUMP_COND_SHRD | JUMP_COND_SELF);
7945 - if (keys_fit_inline)
7946 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
7947 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
7948 - else
7949 - append_key(desc, ctx->key_dma, ctx->enckeylen,
7950 - CLASS_1 | KEY_DEST_CLASS_REG);
7951 - set_jump_tgt_here(desc, key_jump_cmd);
7952 -
7953 - /* class 1 operation */
7954 - append_operation(desc, ctx->class1_alg_type |
7955 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
7956 -
7957 - /* if assoclen + cryptlen is ZERO, skip to ICV write */
7958 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7959 - zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
7960 - JUMP_COND_MATH_Z);
7961 -
7962 - /* if assoclen is ZERO, skip reading the assoc data */
7963 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
7964 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
7965 - JUMP_COND_MATH_Z);
7966 -
7967 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
7968 -
7969 - /* skip assoc data */
7970 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
7971 -
7972 - /* cryptlen = seqinlen - assoclen */
7973 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
7974 -
7975 - /* if cryptlen is ZERO jump to zero-payload commands */
7976 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
7977 - JUMP_COND_MATH_Z);
7978 -
7979 - /* read assoc data */
7980 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7981 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
7982 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
7983 -
7984 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
7985 -
7986 - /* write encrypted data */
7987 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
7988 -
7989 - /* read payload data */
7990 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
7991 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
7992 -
7993 - /* jump the zero-payload commands */
7994 - append_jump(desc, JUMP_TEST_ALL | 2);
7995 -
7996 - /* zero-payload commands */
7997 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
7998 -
7999 - /* read assoc data */
8000 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8001 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
8002 -
8003 - /* There is no input data */
8004 - set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
8005 -
8006 - /* write ICV */
8007 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8008 - LDST_SRCDST_BYTE_CONTEXT);
8009 -
8010 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8011 - desc_bytes(desc),
8012 - DMA_TO_DEVICE);
8013 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8014 - dev_err(jrdev, "unable to map shared descriptor\n");
8015 - return -ENOMEM;
8016 - }
8017 -#ifdef DEBUG
8018 - print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
8019 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8020 - desc_bytes(desc), 1);
8021 -#endif
8022 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8023 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8024 + desc_bytes(desc), DMA_TO_DEVICE);
8025
8026 /*
8027 * Job Descriptor and Shared Descriptors
8028 * must all fit into the 64-word Descriptor h/w Buffer
8029 */
8030 - keys_fit_inline = false;
8031 - if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8032 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8033 - keys_fit_inline = true;
8034 + if (rem_bytes >= DESC_GCM_DEC_LEN) {
8035 + ctx->cdata.key_inline = true;
8036 + ctx->cdata.key_virt = ctx->key;
8037 + } else {
8038 + ctx->cdata.key_inline = false;
8039 + ctx->cdata.key_dma = ctx->key_dma;
8040 + }
8041
8042 desc = ctx->sh_desc_dec;
8043 -
8044 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8045 -
8046 - /* skip key loading if they are loaded due to sharing */
8047 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8048 - JUMP_TEST_ALL | JUMP_COND_SHRD |
8049 - JUMP_COND_SELF);
8050 - if (keys_fit_inline)
8051 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8052 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8053 - else
8054 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8055 - CLASS_1 | KEY_DEST_CLASS_REG);
8056 - set_jump_tgt_here(desc, key_jump_cmd);
8057 -
8058 - /* class 1 operation */
8059 - append_operation(desc, ctx->class1_alg_type |
8060 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8061 -
8062 - /* if assoclen is ZERO, skip reading the assoc data */
8063 - append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
8064 - zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
8065 - JUMP_COND_MATH_Z);
8066 -
8067 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8068 -
8069 - /* skip assoc data */
8070 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8071 -
8072 - /* read assoc data */
8073 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8074 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8075 -
8076 - set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
8077 -
8078 - /* cryptlen = seqoutlen - assoclen */
8079 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8080 -
8081 - /* jump to zero-payload command if cryptlen is zero */
8082 - zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
8083 - JUMP_COND_MATH_Z);
8084 -
8085 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8086 -
8087 - /* store encrypted data */
8088 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8089 -
8090 - /* read payload data */
8091 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8092 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8093 -
8094 - /* zero-payload command */
8095 - set_jump_tgt_here(desc, zero_payload_jump_cmd);
8096 -
8097 - /* read ICV */
8098 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8099 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8100 -
8101 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8102 - desc_bytes(desc),
8103 - DMA_TO_DEVICE);
8104 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8105 - dev_err(jrdev, "unable to map shared descriptor\n");
8106 - return -ENOMEM;
8107 - }
8108 -#ifdef DEBUG
8109 - print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
8110 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8111 - desc_bytes(desc), 1);
8112 -#endif
8113 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false);
8114 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8115 + desc_bytes(desc), DMA_TO_DEVICE);
8116
8117 return 0;
8118 }
8119 @@ -985,11 +387,12 @@ static int rfc4106_set_sh_desc(struct cr
8120 {
8121 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8122 struct device *jrdev = ctx->jrdev;
8123 - bool keys_fit_inline = false;
8124 - u32 *key_jump_cmd;
8125 + unsigned int ivsize = crypto_aead_ivsize(aead);
8126 u32 *desc;
8127 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8128 + ctx->cdata.keylen;
8129
8130 - if (!ctx->enckeylen || !ctx->authsize)
8131 + if (!ctx->cdata.keylen || !ctx->authsize)
8132 return 0;
8133
8134 /*
8135 @@ -997,148 +400,37 @@ static int rfc4106_set_sh_desc(struct cr
8136 * Job Descriptor and Shared Descriptor
8137 * must fit into the 64-word Descriptor h/w Buffer
8138 */
8139 - if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8140 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8141 - keys_fit_inline = true;
8142 + if (rem_bytes >= DESC_RFC4106_ENC_LEN) {
8143 + ctx->cdata.key_inline = true;
8144 + ctx->cdata.key_virt = ctx->key;
8145 + } else {
8146 + ctx->cdata.key_inline = false;
8147 + ctx->cdata.key_dma = ctx->key_dma;
8148 + }
8149
8150 desc = ctx->sh_desc_enc;
8151 -
8152 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8153 -
8154 - /* Skip key loading if it is loaded due to sharing */
8155 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8156 - JUMP_COND_SHRD);
8157 - if (keys_fit_inline)
8158 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8159 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8160 - else
8161 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8162 - CLASS_1 | KEY_DEST_CLASS_REG);
8163 - set_jump_tgt_here(desc, key_jump_cmd);
8164 -
8165 - /* Class 1 operation */
8166 - append_operation(desc, ctx->class1_alg_type |
8167 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8168 -
8169 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8170 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8171 -
8172 - /* Read assoc data */
8173 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8174 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8175 -
8176 - /* Skip IV */
8177 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8178 -
8179 - /* Will read cryptlen bytes */
8180 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8181 -
8182 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8183 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8184 -
8185 - /* Skip assoc data */
8186 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8187 -
8188 - /* cryptlen = seqoutlen - assoclen */
8189 - append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
8190 -
8191 - /* Write encrypted data */
8192 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8193 -
8194 - /* Read payload data */
8195 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8196 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
8197 -
8198 - /* Write ICV */
8199 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8200 - LDST_SRCDST_BYTE_CONTEXT);
8201 -
8202 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8203 - desc_bytes(desc),
8204 - DMA_TO_DEVICE);
8205 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8206 - dev_err(jrdev, "unable to map shared descriptor\n");
8207 - return -ENOMEM;
8208 - }
8209 -#ifdef DEBUG
8210 - print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
8211 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8212 - desc_bytes(desc), 1);
8213 -#endif
8214 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8215 + false);
8216 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8217 + desc_bytes(desc), DMA_TO_DEVICE);
8218
8219 /*
8220 * Job Descriptor and Shared Descriptors
8221 * must all fit into the 64-word Descriptor h/w Buffer
8222 */
8223 - keys_fit_inline = false;
8224 - if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
8225 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8226 - keys_fit_inline = true;
8227 + if (rem_bytes >= DESC_RFC4106_DEC_LEN) {
8228 + ctx->cdata.key_inline = true;
8229 + ctx->cdata.key_virt = ctx->key;
8230 + } else {
8231 + ctx->cdata.key_inline = false;
8232 + ctx->cdata.key_dma = ctx->key_dma;
8233 + }
8234
8235 desc = ctx->sh_desc_dec;
8236 -
8237 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8238 -
8239 - /* Skip key loading if it is loaded due to sharing */
8240 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8241 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8242 - if (keys_fit_inline)
8243 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8244 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8245 - else
8246 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8247 - CLASS_1 | KEY_DEST_CLASS_REG);
8248 - set_jump_tgt_here(desc, key_jump_cmd);
8249 -
8250 - /* Class 1 operation */
8251 - append_operation(desc, ctx->class1_alg_type |
8252 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8253 -
8254 - append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
8255 - append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
8256 -
8257 - /* Read assoc data */
8258 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8259 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
8260 -
8261 - /* Skip IV */
8262 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
8263 -
8264 - /* Will read cryptlen bytes */
8265 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
8266 -
8267 - /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
8268 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
8269 -
8270 - /* Skip assoc data */
8271 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
8272 -
8273 - /* Will write cryptlen bytes */
8274 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8275 -
8276 - /* Store payload data */
8277 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8278 -
8279 - /* Read encrypted data */
8280 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
8281 - FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
8282 -
8283 - /* Read ICV */
8284 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8285 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8286 -
8287 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8288 - desc_bytes(desc),
8289 - DMA_TO_DEVICE);
8290 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8291 - dev_err(jrdev, "unable to map shared descriptor\n");
8292 - return -ENOMEM;
8293 - }
8294 -#ifdef DEBUG
8295 - print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
8296 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8297 - desc_bytes(desc), 1);
8298 -#endif
8299 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8300 + false);
8301 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8302 + desc_bytes(desc), DMA_TO_DEVICE);
8303
8304 return 0;
8305 }
8306 @@ -1158,12 +450,12 @@ static int rfc4543_set_sh_desc(struct cr
8307 {
8308 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8309 struct device *jrdev = ctx->jrdev;
8310 - bool keys_fit_inline = false;
8311 - u32 *key_jump_cmd;
8312 - u32 *read_move_cmd, *write_move_cmd;
8313 + unsigned int ivsize = crypto_aead_ivsize(aead);
8314 u32 *desc;
8315 + int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
8316 + ctx->cdata.keylen;
8317
8318 - if (!ctx->enckeylen || !ctx->authsize)
8319 + if (!ctx->cdata.keylen || !ctx->authsize)
8320 return 0;
8321
8322 /*
8323 @@ -1171,151 +463,37 @@ static int rfc4543_set_sh_desc(struct cr
8324 * Job Descriptor and Shared Descriptor
8325 * must fit into the 64-word Descriptor h/w Buffer
8326 */
8327 - if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
8328 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8329 - keys_fit_inline = true;
8330 + if (rem_bytes >= DESC_RFC4543_ENC_LEN) {
8331 + ctx->cdata.key_inline = true;
8332 + ctx->cdata.key_virt = ctx->key;
8333 + } else {
8334 + ctx->cdata.key_inline = false;
8335 + ctx->cdata.key_dma = ctx->key_dma;
8336 + }
8337
8338 desc = ctx->sh_desc_enc;
8339 -
8340 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8341 -
8342 - /* Skip key loading if it is loaded due to sharing */
8343 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8344 - JUMP_COND_SHRD);
8345 - if (keys_fit_inline)
8346 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8347 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8348 - else
8349 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8350 - CLASS_1 | KEY_DEST_CLASS_REG);
8351 - set_jump_tgt_here(desc, key_jump_cmd);
8352 -
8353 - /* Class 1 operation */
8354 - append_operation(desc, ctx->class1_alg_type |
8355 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8356 -
8357 - /* assoclen + cryptlen = seqinlen */
8358 - append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
8359 -
8360 - /*
8361 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8362 - * thus need to do some magic, i.e. self-patch the descriptor
8363 - * buffer.
8364 - */
8365 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8366 - (0x6 << MOVE_LEN_SHIFT));
8367 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8368 - (0x8 << MOVE_LEN_SHIFT));
8369 -
8370 - /* Will read assoclen + cryptlen bytes */
8371 - append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8372 -
8373 - /* Will write assoclen + cryptlen bytes */
8374 - append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
8375 -
8376 - /* Read and write assoclen + cryptlen bytes */
8377 - aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
8378 -
8379 - set_move_tgt_here(desc, read_move_cmd);
8380 - set_move_tgt_here(desc, write_move_cmd);
8381 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8382 - /* Move payload data to OFIFO */
8383 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8384 -
8385 - /* Write ICV */
8386 - append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
8387 - LDST_SRCDST_BYTE_CONTEXT);
8388 -
8389 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8390 - desc_bytes(desc),
8391 - DMA_TO_DEVICE);
8392 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8393 - dev_err(jrdev, "unable to map shared descriptor\n");
8394 - return -ENOMEM;
8395 - }
8396 -#ifdef DEBUG
8397 - print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
8398 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8399 - desc_bytes(desc), 1);
8400 -#endif
8401 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
8402 + false);
8403 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8404 + desc_bytes(desc), DMA_TO_DEVICE);
8405
8406 /*
8407 * Job Descriptor and Shared Descriptors
8408 * must all fit into the 64-word Descriptor h/w Buffer
8409 */
8410 - keys_fit_inline = false;
8411 - if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
8412 - ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
8413 - keys_fit_inline = true;
8414 + if (rem_bytes >= DESC_RFC4543_DEC_LEN) {
8415 + ctx->cdata.key_inline = true;
8416 + ctx->cdata.key_virt = ctx->key;
8417 + } else {
8418 + ctx->cdata.key_inline = false;
8419 + ctx->cdata.key_dma = ctx->key_dma;
8420 + }
8421
8422 desc = ctx->sh_desc_dec;
8423 -
8424 - init_sh_desc(desc, HDR_SHARE_SERIAL);
8425 -
8426 - /* Skip key loading if it is loaded due to sharing */
8427 - key_jump_cmd = append_jump(desc, JUMP_JSL |
8428 - JUMP_TEST_ALL | JUMP_COND_SHRD);
8429 - if (keys_fit_inline)
8430 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8431 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8432 - else
8433 - append_key(desc, ctx->key_dma, ctx->enckeylen,
8434 - CLASS_1 | KEY_DEST_CLASS_REG);
8435 - set_jump_tgt_here(desc, key_jump_cmd);
8436 -
8437 - /* Class 1 operation */
8438 - append_operation(desc, ctx->class1_alg_type |
8439 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
8440 -
8441 - /* assoclen + cryptlen = seqoutlen */
8442 - append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8443 -
8444 - /*
8445 - * MOVE_LEN opcode is not available in all SEC HW revisions,
8446 - * thus need to do some magic, i.e. self-patch the descriptor
8447 - * buffer.
8448 - */
8449 - read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
8450 - (0x6 << MOVE_LEN_SHIFT));
8451 - write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
8452 - (0x8 << MOVE_LEN_SHIFT));
8453 -
8454 - /* Will read assoclen + cryptlen bytes */
8455 - append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8456 -
8457 - /* Will write assoclen + cryptlen bytes */
8458 - append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
8459 -
8460 - /* Store payload data */
8461 - append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
8462 -
8463 - /* In-snoop assoclen + cryptlen data */
8464 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
8465 - FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
8466 -
8467 - set_move_tgt_here(desc, read_move_cmd);
8468 - set_move_tgt_here(desc, write_move_cmd);
8469 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8470 - /* Move payload data to OFIFO */
8471 - append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
8472 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8473 -
8474 - /* Read ICV */
8475 - append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
8476 - FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
8477 -
8478 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8479 - desc_bytes(desc),
8480 - DMA_TO_DEVICE);
8481 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8482 - dev_err(jrdev, "unable to map shared descriptor\n");
8483 - return -ENOMEM;
8484 - }
8485 -#ifdef DEBUG
8486 - print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
8487 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8488 - desc_bytes(desc), 1);
8489 -#endif
8490 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
8491 + false);
8492 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8493 + desc_bytes(desc), DMA_TO_DEVICE);
8494
8495 return 0;
8496 }
8497 @@ -1331,74 +509,67 @@ static int rfc4543_setauthsize(struct cr
8498 return 0;
8499 }
8500
8501 -static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
8502 - u32 authkeylen)
8503 -{
8504 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
8505 - ctx->split_key_pad_len, key_in, authkeylen,
8506 - ctx->alg_op);
8507 -}
8508 -
8509 static int aead_setkey(struct crypto_aead *aead,
8510 const u8 *key, unsigned int keylen)
8511 {
8512 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
8513 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
8514 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8515 struct device *jrdev = ctx->jrdev;
8516 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
8517 struct crypto_authenc_keys keys;
8518 int ret = 0;
8519
8520 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
8521 goto badkey;
8522
8523 - /* Pick class 2 key length from algorithm submask */
8524 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
8525 - OP_ALG_ALGSEL_SHIFT] * 2;
8526 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
8527 -
8528 - if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8529 - goto badkey;
8530 -
8531 #ifdef DEBUG
8532 printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
8533 keys.authkeylen + keys.enckeylen, keys.enckeylen,
8534 keys.authkeylen);
8535 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
8536 - ctx->split_key_len, ctx->split_key_pad_len);
8537 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8538 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8539 #endif
8540
8541 - ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
8542 + /*
8543 + * If DKP is supported, use it in the shared descriptor to generate
8544 + * the split key.
8545 + */
8546 + if (ctrlpriv->era >= 6) {
8547 + ctx->adata.keylen = keys.authkeylen;
8548 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
8549 + OP_ALG_ALGSEL_MASK);
8550 +
8551 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
8552 + goto badkey;
8553 +
8554 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
8555 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
8556 + keys.enckeylen);
8557 + dma_sync_single_for_device(jrdev, ctx->key_dma,
8558 + ctx->adata.keylen_pad +
8559 + keys.enckeylen, DMA_TO_DEVICE);
8560 + goto skip_split_key;
8561 + }
8562 +
8563 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
8564 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
8565 + keys.enckeylen);
8566 if (ret) {
8567 goto badkey;
8568 }
8569
8570 /* postpend encryption key to auth split key */
8571 - memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
8572 -
8573 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
8574 - keys.enckeylen, DMA_TO_DEVICE);
8575 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8576 - dev_err(jrdev, "unable to map key i/o memory\n");
8577 - return -ENOMEM;
8578 - }
8579 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
8580 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
8581 + keys.enckeylen, DMA_TO_DEVICE);
8582 #ifdef DEBUG
8583 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
8584 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
8585 - ctx->split_key_pad_len + keys.enckeylen, 1);
8586 + ctx->adata.keylen_pad + keys.enckeylen, 1);
8587 #endif
8588
8589 - ctx->enckeylen = keys.enckeylen;
8590 -
8591 - ret = aead_set_sh_desc(aead);
8592 - if (ret) {
8593 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
8594 - keys.enckeylen, DMA_TO_DEVICE);
8595 - }
8596 -
8597 - return ret;
8598 +skip_split_key:
8599 + ctx->cdata.keylen = keys.enckeylen;
8600 + return aead_set_sh_desc(aead);
8601 badkey:
8602 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
8603 return -EINVAL;
8604 @@ -1409,7 +580,6 @@ static int gcm_setkey(struct crypto_aead
8605 {
8606 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8607 struct device *jrdev = ctx->jrdev;
8608 - int ret = 0;
8609
8610 #ifdef DEBUG
8611 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8612 @@ -1417,21 +587,10 @@ static int gcm_setkey(struct crypto_aead
8613 #endif
8614
8615 memcpy(ctx->key, key, keylen);
8616 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8617 - DMA_TO_DEVICE);
8618 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8619 - dev_err(jrdev, "unable to map key i/o memory\n");
8620 - return -ENOMEM;
8621 - }
8622 - ctx->enckeylen = keylen;
8623 -
8624 - ret = gcm_set_sh_desc(aead);
8625 - if (ret) {
8626 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8627 - DMA_TO_DEVICE);
8628 - }
8629 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8630 + ctx->cdata.keylen = keylen;
8631
8632 - return ret;
8633 + return gcm_set_sh_desc(aead);
8634 }
8635
8636 static int rfc4106_setkey(struct crypto_aead *aead,
8637 @@ -1439,7 +598,6 @@ static int rfc4106_setkey(struct crypto_
8638 {
8639 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8640 struct device *jrdev = ctx->jrdev;
8641 - int ret = 0;
8642
8643 if (keylen < 4)
8644 return -EINVAL;
8645 @@ -1455,22 +613,10 @@ static int rfc4106_setkey(struct crypto_
8646 * The last four bytes of the key material are used as the salt value
8647 * in the nonce. Update the AES key length.
8648 */
8649 - ctx->enckeylen = keylen - 4;
8650 -
8651 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8652 - DMA_TO_DEVICE);
8653 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8654 - dev_err(jrdev, "unable to map key i/o memory\n");
8655 - return -ENOMEM;
8656 - }
8657 -
8658 - ret = rfc4106_set_sh_desc(aead);
8659 - if (ret) {
8660 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8661 - DMA_TO_DEVICE);
8662 - }
8663 -
8664 - return ret;
8665 + ctx->cdata.keylen = keylen - 4;
8666 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8667 + DMA_TO_DEVICE);
8668 + return rfc4106_set_sh_desc(aead);
8669 }
8670
8671 static int rfc4543_setkey(struct crypto_aead *aead,
8672 @@ -1478,7 +624,6 @@ static int rfc4543_setkey(struct crypto_
8673 {
8674 struct caam_ctx *ctx = crypto_aead_ctx(aead);
8675 struct device *jrdev = ctx->jrdev;
8676 - int ret = 0;
8677
8678 if (keylen < 4)
8679 return -EINVAL;
8680 @@ -1494,43 +639,28 @@ static int rfc4543_setkey(struct crypto_
8681 * The last four bytes of the key material are used as the salt value
8682 * in the nonce. Update the AES key length.
8683 */
8684 - ctx->enckeylen = keylen - 4;
8685 -
8686 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
8687 - DMA_TO_DEVICE);
8688 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8689 - dev_err(jrdev, "unable to map key i/o memory\n");
8690 - return -ENOMEM;
8691 - }
8692 -
8693 - ret = rfc4543_set_sh_desc(aead);
8694 - if (ret) {
8695 - dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
8696 - DMA_TO_DEVICE);
8697 - }
8698 -
8699 - return ret;
8700 + ctx->cdata.keylen = keylen - 4;
8701 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
8702 + DMA_TO_DEVICE);
8703 + return rfc4543_set_sh_desc(aead);
8704 }
8705
8706 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8707 const u8 *key, unsigned int keylen)
8708 {
8709 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8710 - struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
8711 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
8712 const char *alg_name = crypto_tfm_alg_name(tfm);
8713 struct device *jrdev = ctx->jrdev;
8714 - int ret = 0;
8715 - u32 *key_jump_cmd;
8716 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
8717 u32 *desc;
8718 - u8 *nonce;
8719 - u32 geniv;
8720 u32 ctx1_iv_off = 0;
8721 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
8722 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
8723 OP_ALG_AAI_CTR_MOD128);
8724 const bool is_rfc3686 = (ctr_mode &&
8725 (strstr(alg_name, "rfc3686") != NULL));
8726
8727 + memcpy(ctx->key, key, keylen);
8728 #ifdef DEBUG
8729 print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
8730 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
8731 @@ -1553,215 +683,33 @@ static int ablkcipher_setkey(struct cryp
8732 keylen -= CTR_RFC3686_NONCE_SIZE;
8733 }
8734
8735 - memcpy(ctx->key, key, keylen);
8736 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
8737 - DMA_TO_DEVICE);
8738 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8739 - dev_err(jrdev, "unable to map key i/o memory\n");
8740 - return -ENOMEM;
8741 - }
8742 - ctx->enckeylen = keylen;
8743 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8744 + ctx->cdata.keylen = keylen;
8745 + ctx->cdata.key_virt = ctx->key;
8746 + ctx->cdata.key_inline = true;
8747
8748 /* ablkcipher_encrypt shared descriptor */
8749 desc = ctx->sh_desc_enc;
8750 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8751 - /* Skip if already shared */
8752 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8753 - JUMP_COND_SHRD);
8754 -
8755 - /* Load class1 key only */
8756 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8757 - ctx->enckeylen, CLASS_1 |
8758 - KEY_DEST_CLASS_REG);
8759 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
8760 + ctx1_iv_off);
8761 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
8762 + desc_bytes(desc), DMA_TO_DEVICE);
8763
8764 - /* Load nonce into CONTEXT1 reg */
8765 - if (is_rfc3686) {
8766 - nonce = (u8 *)key + keylen;
8767 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8768 - LDST_CLASS_IND_CCB |
8769 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8770 - append_move(desc, MOVE_WAITCOMP |
8771 - MOVE_SRC_OUTFIFO |
8772 - MOVE_DEST_CLASS1CTX |
8773 - (16 << MOVE_OFFSET_SHIFT) |
8774 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8775 - }
8776 -
8777 - set_jump_tgt_here(desc, key_jump_cmd);
8778 -
8779 - /* Load iv */
8780 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8781 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8782 -
8783 - /* Load counter into CONTEXT1 reg */
8784 - if (is_rfc3686)
8785 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8786 - LDST_SRCDST_BYTE_CONTEXT |
8787 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8788 - LDST_OFFSET_SHIFT));
8789 -
8790 - /* Load operation */
8791 - append_operation(desc, ctx->class1_alg_type |
8792 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8793 -
8794 - /* Perform operation */
8795 - ablkcipher_append_src_dst(desc);
8796 -
8797 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
8798 - desc_bytes(desc),
8799 - DMA_TO_DEVICE);
8800 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
8801 - dev_err(jrdev, "unable to map shared descriptor\n");
8802 - return -ENOMEM;
8803 - }
8804 -#ifdef DEBUG
8805 - print_hex_dump(KERN_ERR,
8806 - "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
8807 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8808 - desc_bytes(desc), 1);
8809 -#endif
8810 /* ablkcipher_decrypt shared descriptor */
8811 desc = ctx->sh_desc_dec;
8812 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
8813 + ctx1_iv_off);
8814 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
8815 + desc_bytes(desc), DMA_TO_DEVICE);
8816
8817 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8818 - /* Skip if already shared */
8819 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8820 - JUMP_COND_SHRD);
8821 -
8822 - /* Load class1 key only */
8823 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8824 - ctx->enckeylen, CLASS_1 |
8825 - KEY_DEST_CLASS_REG);
8826 -
8827 - /* Load nonce into CONTEXT1 reg */
8828 - if (is_rfc3686) {
8829 - nonce = (u8 *)key + keylen;
8830 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8831 - LDST_CLASS_IND_CCB |
8832 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8833 - append_move(desc, MOVE_WAITCOMP |
8834 - MOVE_SRC_OUTFIFO |
8835 - MOVE_DEST_CLASS1CTX |
8836 - (16 << MOVE_OFFSET_SHIFT) |
8837 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8838 - }
8839 -
8840 - set_jump_tgt_here(desc, key_jump_cmd);
8841 -
8842 - /* load IV */
8843 - append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
8844 - LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
8845 -
8846 - /* Load counter into CONTEXT1 reg */
8847 - if (is_rfc3686)
8848 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8849 - LDST_SRCDST_BYTE_CONTEXT |
8850 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8851 - LDST_OFFSET_SHIFT));
8852 -
8853 - /* Choose operation */
8854 - if (ctr_mode)
8855 - append_operation(desc, ctx->class1_alg_type |
8856 - OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
8857 - else
8858 - append_dec_op1(desc, ctx->class1_alg_type);
8859 -
8860 - /* Perform operation */
8861 - ablkcipher_append_src_dst(desc);
8862 -
8863 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
8864 - desc_bytes(desc),
8865 - DMA_TO_DEVICE);
8866 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
8867 - dev_err(jrdev, "unable to map shared descriptor\n");
8868 - return -ENOMEM;
8869 - }
8870 -
8871 -#ifdef DEBUG
8872 - print_hex_dump(KERN_ERR,
8873 - "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
8874 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8875 - desc_bytes(desc), 1);
8876 -#endif
8877 /* ablkcipher_givencrypt shared descriptor */
8878 desc = ctx->sh_desc_givenc;
8879 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
8880 + ctx1_iv_off);
8881 + dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
8882 + desc_bytes(desc), DMA_TO_DEVICE);
8883
8884 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8885 - /* Skip if already shared */
8886 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8887 - JUMP_COND_SHRD);
8888 -
8889 - /* Load class1 key only */
8890 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8891 - ctx->enckeylen, CLASS_1 |
8892 - KEY_DEST_CLASS_REG);
8893 -
8894 - /* Load Nonce into CONTEXT1 reg */
8895 - if (is_rfc3686) {
8896 - nonce = (u8 *)key + keylen;
8897 - append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
8898 - LDST_CLASS_IND_CCB |
8899 - LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
8900 - append_move(desc, MOVE_WAITCOMP |
8901 - MOVE_SRC_OUTFIFO |
8902 - MOVE_DEST_CLASS1CTX |
8903 - (16 << MOVE_OFFSET_SHIFT) |
8904 - (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
8905 - }
8906 - set_jump_tgt_here(desc, key_jump_cmd);
8907 -
8908 - /* Generate IV */
8909 - geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
8910 - NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
8911 - NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
8912 - append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
8913 - LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
8914 - append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
8915 - append_move(desc, MOVE_WAITCOMP |
8916 - MOVE_SRC_INFIFO |
8917 - MOVE_DEST_CLASS1CTX |
8918 - (crt->ivsize << MOVE_LEN_SHIFT) |
8919 - (ctx1_iv_off << MOVE_OFFSET_SHIFT));
8920 - append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
8921 -
8922 - /* Copy generated IV to memory */
8923 - append_seq_store(desc, crt->ivsize,
8924 - LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
8925 - (ctx1_iv_off << LDST_OFFSET_SHIFT));
8926 -
8927 - /* Load Counter into CONTEXT1 reg */
8928 - if (is_rfc3686)
8929 - append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
8930 - LDST_SRCDST_BYTE_CONTEXT |
8931 - ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
8932 - LDST_OFFSET_SHIFT));
8933 -
8934 - if (ctx1_iv_off)
8935 - append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
8936 - (1 << JUMP_OFFSET_SHIFT));
8937 -
8938 - /* Load operation */
8939 - append_operation(desc, ctx->class1_alg_type |
8940 - OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
8941 -
8942 - /* Perform operation */
8943 - ablkcipher_append_src_dst(desc);
8944 -
8945 - ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
8946 - desc_bytes(desc),
8947 - DMA_TO_DEVICE);
8948 - if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
8949 - dev_err(jrdev, "unable to map shared descriptor\n");
8950 - return -ENOMEM;
8951 - }
8952 -#ifdef DEBUG
8953 - print_hex_dump(KERN_ERR,
8954 - "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
8955 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
8956 - desc_bytes(desc), 1);
8957 -#endif
8958 -
8959 - return ret;
8960 + return 0;
8961 }
8962
8963 static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
8964 @@ -1769,8 +717,7 @@ static int xts_ablkcipher_setkey(struct
8965 {
8966 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
8967 struct device *jrdev = ctx->jrdev;
8968 - u32 *key_jump_cmd, *desc;
8969 - __be64 sector_size = cpu_to_be64(512);
8970 + u32 *desc;
8971
8972 if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
8973 crypto_ablkcipher_set_flags(ablkcipher,
8974 @@ -1780,126 +727,38 @@ static int xts_ablkcipher_setkey(struct
8975 }
8976
8977 memcpy(ctx->key, key, keylen);
8978 - ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
8979 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
8980 - dev_err(jrdev, "unable to map key i/o memory\n");
8981 - return -ENOMEM;
8982 - }
8983 - ctx->enckeylen = keylen;
8984 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
8985 + ctx->cdata.keylen = keylen;
8986 + ctx->cdata.key_virt = ctx->key;
8987 + ctx->cdata.key_inline = true;
8988
8989 /* xts_ablkcipher_encrypt shared descriptor */
8990 desc = ctx->sh_desc_enc;
8991 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
8992 - /* Skip if already shared */
8993 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
8994 - JUMP_COND_SHRD);
8995 -
8996 - /* Load class1 keys only */
8997 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
8998 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
8999 -
9000 - /* Load sector size with index 40 bytes (0x28) */
9001 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
9002 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
9003 - append_data(desc, (void *)&sector_size, 8);
9004 -
9005 - set_jump_tgt_here(desc, key_jump_cmd);
9006 -
9007 - /*
9008 - * create sequence for loading the sector index
9009 - * Upper 8B of IV - will be used as sector index
9010 - * Lower 8B of IV - will be discarded
9011 - */
9012 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9013 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9014 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9015 -
9016 - /* Load operation */
9017 - append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
9018 - OP_ALG_ENCRYPT);
9019 -
9020 - /* Perform operation */
9021 - ablkcipher_append_src_dst(desc);
9022 -
9023 - ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9024 - DMA_TO_DEVICE);
9025 - if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
9026 - dev_err(jrdev, "unable to map shared descriptor\n");
9027 - return -ENOMEM;
9028 - }
9029 -#ifdef DEBUG
9030 - print_hex_dump(KERN_ERR,
9031 - "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
9032 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9033 -#endif
9034 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
9035 + dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
9036 + desc_bytes(desc), DMA_TO_DEVICE);
9037
9038 /* xts_ablkcipher_decrypt shared descriptor */
9039 desc = ctx->sh_desc_dec;
9040 -
9041 - init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
9042 - /* Skip if already shared */
9043 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
9044 - JUMP_COND_SHRD);
9045 -
9046 - /* Load class1 key only */
9047 - append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
9048 - ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
9049 -
9050 - /* Load sector size with index 40 bytes (0x28) */
9051 - append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
9052 - LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
9053 - append_data(desc, (void *)&sector_size, 8);
9054 -
9055 - set_jump_tgt_here(desc, key_jump_cmd);
9056 -
9057 - /*
9058 - * create sequence for loading the sector index
9059 - * Upper 8B of IV - will be used as sector index
9060 - * Lower 8B of IV - will be discarded
9061 - */
9062 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
9063 - LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
9064 - append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
9065 -
9066 - /* Load operation */
9067 - append_dec_op1(desc, ctx->class1_alg_type);
9068 -
9069 - /* Perform operation */
9070 - ablkcipher_append_src_dst(desc);
9071 -
9072 - ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
9073 - DMA_TO_DEVICE);
9074 - if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
9075 - dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
9076 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
9077 - dev_err(jrdev, "unable to map shared descriptor\n");
9078 - return -ENOMEM;
9079 - }
9080 -#ifdef DEBUG
9081 - print_hex_dump(KERN_ERR,
9082 - "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
9083 - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
9084 -#endif
9085 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
9086 + dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
9087 + desc_bytes(desc), DMA_TO_DEVICE);
9088
9089 return 0;
9090 }
9091
9092 /*
9093 * aead_edesc - s/w-extended aead descriptor
9094 - * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
9095 - * @src_nents: number of segments in input scatterlist
9096 - * @dst_nents: number of segments in output scatterlist
9097 - * @iv_dma: dma address of iv for checking continuity and link table
9098 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9099 + * @src_nents: number of segments in input s/w scatterlist
9100 + * @dst_nents: number of segments in output s/w scatterlist
9101 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9102 * @sec4_sg_dma: bus physical mapped address of h/w link table
9103 + * @sec4_sg: pointer to h/w link table
9104 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9105 */
9106 struct aead_edesc {
9107 - int assoc_nents;
9108 int src_nents;
9109 int dst_nents;
9110 - dma_addr_t iv_dma;
9111 int sec4_sg_bytes;
9112 dma_addr_t sec4_sg_dma;
9113 struct sec4_sg_entry *sec4_sg;
9114 @@ -1908,12 +767,12 @@ struct aead_edesc {
9115
9116 /*
9117 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
9118 - * @src_nents: number of segments in input scatterlist
9119 - * @dst_nents: number of segments in output scatterlist
9120 + * @src_nents: number of segments in input s/w scatterlist
9121 + * @dst_nents: number of segments in output s/w scatterlist
9122 * @iv_dma: dma address of iv for checking continuity and link table
9123 - * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
9124 * @sec4_sg_bytes: length of dma mapped sec4_sg space
9125 * @sec4_sg_dma: bus physical mapped address of h/w link table
9126 + * @sec4_sg: pointer to h/w link table
9127 * @hw_desc: the h/w job descriptor followed by any referenced link tables
9128 */
9129 struct ablkcipher_edesc {
9130 @@ -1933,10 +792,11 @@ static void caam_unmap(struct device *de
9131 int sec4_sg_bytes)
9132 {
9133 if (dst != src) {
9134 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
9135 - dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
9136 + if (src_nents)
9137 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
9138 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
9139 } else {
9140 - dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
9141 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
9142 }
9143
9144 if (iv_dma)
9145 @@ -2031,8 +891,7 @@ static void ablkcipher_encrypt_done(stru
9146 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9147 #endif
9148
9149 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9150 - offsetof(struct ablkcipher_edesc, hw_desc));
9151 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9152
9153 if (err)
9154 caam_jr_strstatus(jrdev, err);
9155 @@ -2041,10 +900,10 @@ static void ablkcipher_encrypt_done(stru
9156 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9157 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9158 edesc->src_nents > 1 ? 100 : ivsize, 1);
9159 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9160 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9161 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9162 #endif
9163 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9164 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9165 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9166
9167 ablkcipher_unmap(jrdev, edesc, req);
9168
9169 @@ -2074,8 +933,7 @@ static void ablkcipher_decrypt_done(stru
9170 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
9171 #endif
9172
9173 - edesc = (struct ablkcipher_edesc *)((char *)desc -
9174 - offsetof(struct ablkcipher_edesc, hw_desc));
9175 + edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
9176 if (err)
9177 caam_jr_strstatus(jrdev, err);
9178
9179 @@ -2083,10 +941,10 @@ static void ablkcipher_decrypt_done(stru
9180 print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
9181 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9182 ivsize, 1);
9183 - dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
9184 - DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9185 - edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
9186 #endif
9187 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
9188 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
9189 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
9190
9191 ablkcipher_unmap(jrdev, edesc, req);
9192
9193 @@ -2127,7 +985,7 @@ static void init_aead_job(struct aead_re
9194 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9195
9196 if (all_contig) {
9197 - src_dma = sg_dma_address(req->src);
9198 + src_dma = edesc->src_nents ? sg_dma_address(req->src) : 0;
9199 in_options = 0;
9200 } else {
9201 src_dma = edesc->sec4_sg_dma;
9202 @@ -2142,7 +1000,7 @@ static void init_aead_job(struct aead_re
9203 out_options = in_options;
9204
9205 if (unlikely(req->src != req->dst)) {
9206 - if (!edesc->dst_nents) {
9207 + if (edesc->dst_nents == 1) {
9208 dst_dma = sg_dma_address(req->dst);
9209 out_options = 0;
9210 } else {
9211 @@ -2161,9 +1019,6 @@ static void init_aead_job(struct aead_re
9212 append_seq_out_ptr(desc, dst_dma,
9213 req->assoclen + req->cryptlen - authsize,
9214 out_options);
9215 -
9216 - /* REG3 = assoclen */
9217 - append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9218 }
9219
9220 static void init_gcm_job(struct aead_request *req,
9221 @@ -2178,6 +1033,7 @@ static void init_gcm_job(struct aead_req
9222 unsigned int last;
9223
9224 init_aead_job(req, edesc, all_contig, encrypt);
9225 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9226
9227 /* BUG This should not be specific to generic GCM. */
9228 last = 0;
9229 @@ -2189,7 +1045,7 @@ static void init_gcm_job(struct aead_req
9230 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
9231 /* Append Salt */
9232 if (!generic_gcm)
9233 - append_data(desc, ctx->key + ctx->enckeylen, 4);
9234 + append_data(desc, ctx->key + ctx->cdata.keylen, 4);
9235 /* Append IV */
9236 append_data(desc, req->iv, ivsize);
9237 /* End of blank commands */
9238 @@ -2204,7 +1060,8 @@ static void init_authenc_job(struct aead
9239 struct caam_aead_alg, aead);
9240 unsigned int ivsize = crypto_aead_ivsize(aead);
9241 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9242 - const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
9243 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
9244 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
9245 OP_ALG_AAI_CTR_MOD128);
9246 const bool is_rfc3686 = alg->caam.rfc3686;
9247 u32 *desc = edesc->hw_desc;
9248 @@ -2227,6 +1084,15 @@ static void init_authenc_job(struct aead
9249
9250 init_aead_job(req, edesc, all_contig, encrypt);
9251
9252 + /*
9253 + * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
9254 + * having DPOVRD as destination.
9255 + */
9256 + if (ctrlpriv->era < 3)
9257 + append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
9258 + else
9259 + append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
9260 +
9261 if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
9262 append_load_as_imm(desc, req->iv, ivsize,
9263 LDST_CLASS_1_CCB |
9264 @@ -2250,16 +1116,15 @@ static void init_ablkcipher_job(u32 *sh_
9265 int len, sec4_sg_index = 0;
9266
9267 #ifdef DEBUG
9268 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9269 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9270 print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
9271 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9272 ivsize, 1);
9273 - printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
9274 - dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
9275 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9276 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9277 + pr_err("asked=%d, nbytes%d\n",
9278 + (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
9279 #endif
9280 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
9281 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9282 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9283
9284 len = desc_len(sh_desc);
9285 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9286 @@ -2275,7 +1140,7 @@ static void init_ablkcipher_job(u32 *sh_
9287 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
9288
9289 if (likely(req->src == req->dst)) {
9290 - if (!edesc->src_nents && iv_contig) {
9291 + if (edesc->src_nents == 1 && iv_contig) {
9292 dst_dma = sg_dma_address(req->src);
9293 } else {
9294 dst_dma = edesc->sec4_sg_dma +
9295 @@ -2283,7 +1148,7 @@ static void init_ablkcipher_job(u32 *sh_
9296 out_options = LDST_SGF;
9297 }
9298 } else {
9299 - if (!edesc->dst_nents) {
9300 + if (edesc->dst_nents == 1) {
9301 dst_dma = sg_dma_address(req->dst);
9302 } else {
9303 dst_dma = edesc->sec4_sg_dma +
9304 @@ -2310,20 +1175,18 @@ static void init_ablkcipher_giv_job(u32
9305 int len, sec4_sg_index = 0;
9306
9307 #ifdef DEBUG
9308 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9309 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9310 print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
9311 DUMP_PREFIX_ADDRESS, 16, 4, req->info,
9312 ivsize, 1);
9313 - dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9314 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9315 - edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
9316 #endif
9317 + caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
9318 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9319 + edesc->src_nents > 1 ? 100 : req->nbytes, 1);
9320
9321 len = desc_len(sh_desc);
9322 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
9323
9324 - if (!edesc->src_nents) {
9325 + if (edesc->src_nents == 1) {
9326 src_dma = sg_dma_address(req->src);
9327 in_options = 0;
9328 } else {
9329 @@ -2354,87 +1217,100 @@ static struct aead_edesc *aead_edesc_all
9330 struct crypto_aead *aead = crypto_aead_reqtfm(req);
9331 struct caam_ctx *ctx = crypto_aead_ctx(aead);
9332 struct device *jrdev = ctx->jrdev;
9333 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9334 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
9335 - int src_nents, dst_nents = 0;
9336 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9337 + GFP_KERNEL : GFP_ATOMIC;
9338 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9339 struct aead_edesc *edesc;
9340 - int sgc;
9341 - bool all_contig = true;
9342 - int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
9343 + int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
9344 unsigned int authsize = ctx->authsize;
9345
9346 if (unlikely(req->dst != req->src)) {
9347 - src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
9348 - dst_nents = sg_count(req->dst,
9349 - req->assoclen + req->cryptlen +
9350 - (encrypt ? authsize : (-authsize)));
9351 - } else {
9352 - src_nents = sg_count(req->src,
9353 - req->assoclen + req->cryptlen +
9354 - (encrypt ? authsize : 0));
9355 - }
9356 -
9357 - /* Check if data are contiguous. */
9358 - all_contig = !src_nents;
9359 - if (!all_contig) {
9360 - src_nents = src_nents ? : 1;
9361 - sec4_sg_len = src_nents;
9362 - }
9363 -
9364 - sec4_sg_len += dst_nents;
9365 -
9366 - sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9367 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9368 + req->cryptlen);
9369 + if (unlikely(src_nents < 0)) {
9370 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9371 + req->assoclen + req->cryptlen);
9372 + return ERR_PTR(src_nents);
9373 + }
9374
9375 - /* allocate space for base edesc and hw desc commands, link tables */
9376 - edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9377 - GFP_DMA | flags);
9378 - if (!edesc) {
9379 - dev_err(jrdev, "could not allocate extended descriptor\n");
9380 - return ERR_PTR(-ENOMEM);
9381 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
9382 + req->cryptlen +
9383 + (encrypt ? authsize :
9384 + (-authsize)));
9385 + if (unlikely(dst_nents < 0)) {
9386 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9387 + req->assoclen + req->cryptlen +
9388 + (encrypt ? authsize : (-authsize)));
9389 + return ERR_PTR(dst_nents);
9390 + }
9391 + } else {
9392 + src_nents = sg_nents_for_len(req->src, req->assoclen +
9393 + req->cryptlen +
9394 + (encrypt ? authsize : 0));
9395 + if (unlikely(src_nents < 0)) {
9396 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9397 + req->assoclen + req->cryptlen +
9398 + (encrypt ? authsize : 0));
9399 + return ERR_PTR(src_nents);
9400 + }
9401 }
9402
9403 if (likely(req->src == req->dst)) {
9404 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9405 - DMA_BIDIRECTIONAL);
9406 - if (unlikely(!sgc)) {
9407 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9408 + DMA_BIDIRECTIONAL);
9409 + if (unlikely(!mapped_src_nents)) {
9410 dev_err(jrdev, "unable to map source\n");
9411 - kfree(edesc);
9412 return ERR_PTR(-ENOMEM);
9413 }
9414 } else {
9415 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9416 - DMA_TO_DEVICE);
9417 - if (unlikely(!sgc)) {
9418 - dev_err(jrdev, "unable to map source\n");
9419 - kfree(edesc);
9420 - return ERR_PTR(-ENOMEM);
9421 + /* Cover also the case of null (zero length) input data */
9422 + if (src_nents) {
9423 + mapped_src_nents = dma_map_sg(jrdev, req->src,
9424 + src_nents, DMA_TO_DEVICE);
9425 + if (unlikely(!mapped_src_nents)) {
9426 + dev_err(jrdev, "unable to map source\n");
9427 + return ERR_PTR(-ENOMEM);
9428 + }
9429 + } else {
9430 + mapped_src_nents = 0;
9431 }
9432
9433 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9434 - DMA_FROM_DEVICE);
9435 - if (unlikely(!sgc)) {
9436 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9437 + DMA_FROM_DEVICE);
9438 + if (unlikely(!mapped_dst_nents)) {
9439 dev_err(jrdev, "unable to map destination\n");
9440 - dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
9441 - DMA_TO_DEVICE);
9442 - kfree(edesc);
9443 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9444 return ERR_PTR(-ENOMEM);
9445 }
9446 }
9447
9448 + sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
9449 + sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9450 + sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
9451 +
9452 + /* allocate space for base edesc and hw desc commands, link tables */
9453 + edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9454 + GFP_DMA | flags);
9455 + if (!edesc) {
9456 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9457 + 0, 0, 0);
9458 + return ERR_PTR(-ENOMEM);
9459 + }
9460 +
9461 edesc->src_nents = src_nents;
9462 edesc->dst_nents = dst_nents;
9463 edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
9464 desc_bytes;
9465 - *all_contig_ptr = all_contig;
9466 + *all_contig_ptr = !(mapped_src_nents > 1);
9467
9468 sec4_sg_index = 0;
9469 - if (!all_contig) {
9470 - sg_to_sec4_sg_last(req->src, src_nents,
9471 - edesc->sec4_sg + sec4_sg_index, 0);
9472 - sec4_sg_index += src_nents;
9473 + if (mapped_src_nents > 1) {
9474 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9475 + edesc->sec4_sg + sec4_sg_index, 0);
9476 + sec4_sg_index += mapped_src_nents;
9477 }
9478 - if (dst_nents) {
9479 - sg_to_sec4_sg_last(req->dst, dst_nents,
9480 + if (mapped_dst_nents > 1) {
9481 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9482 edesc->sec4_sg + sec4_sg_index, 0);
9483 }
9484
9485 @@ -2587,13 +1463,9 @@ static int aead_decrypt(struct aead_requ
9486 u32 *desc;
9487 int ret = 0;
9488
9489 -#ifdef DEBUG
9490 - bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9491 - CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
9492 - dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
9493 - DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9494 - req->assoclen + req->cryptlen, 1, may_sleep);
9495 -#endif
9496 + caam_dump_sg(KERN_ERR, "dec src@" __stringify(__LINE__)": ",
9497 + DUMP_PREFIX_ADDRESS, 16, 4, req->src,
9498 + req->assoclen + req->cryptlen, 1);
9499
9500 /* allocate extended descriptor */
9501 edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
9502 @@ -2633,51 +1505,80 @@ static struct ablkcipher_edesc *ablkciph
9503 struct device *jrdev = ctx->jrdev;
9504 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9505 GFP_KERNEL : GFP_ATOMIC;
9506 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9507 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
9508 struct ablkcipher_edesc *edesc;
9509 dma_addr_t iv_dma = 0;
9510 - bool iv_contig = false;
9511 - int sgc;
9512 + bool in_contig;
9513 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9514 - int sec4_sg_index;
9515 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9516
9517 - src_nents = sg_count(req->src, req->nbytes);
9518 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9519 + if (unlikely(src_nents < 0)) {
9520 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9521 + req->nbytes);
9522 + return ERR_PTR(src_nents);
9523 + }
9524
9525 - if (req->dst != req->src)
9526 - dst_nents = sg_count(req->dst, req->nbytes);
9527 + if (req->dst != req->src) {
9528 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9529 + if (unlikely(dst_nents < 0)) {
9530 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9531 + req->nbytes);
9532 + return ERR_PTR(dst_nents);
9533 + }
9534 + }
9535
9536 if (likely(req->src == req->dst)) {
9537 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9538 - DMA_BIDIRECTIONAL);
9539 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9540 + DMA_BIDIRECTIONAL);
9541 + if (unlikely(!mapped_src_nents)) {
9542 + dev_err(jrdev, "unable to map source\n");
9543 + return ERR_PTR(-ENOMEM);
9544 + }
9545 } else {
9546 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9547 - DMA_TO_DEVICE);
9548 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9549 - DMA_FROM_DEVICE);
9550 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9551 + DMA_TO_DEVICE);
9552 + if (unlikely(!mapped_src_nents)) {
9553 + dev_err(jrdev, "unable to map source\n");
9554 + return ERR_PTR(-ENOMEM);
9555 + }
9556 +
9557 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9558 + DMA_FROM_DEVICE);
9559 + if (unlikely(!mapped_dst_nents)) {
9560 + dev_err(jrdev, "unable to map destination\n");
9561 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9562 + return ERR_PTR(-ENOMEM);
9563 + }
9564 }
9565
9566 iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
9567 if (dma_mapping_error(jrdev, iv_dma)) {
9568 dev_err(jrdev, "unable to map IV\n");
9569 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9570 + 0, 0, 0);
9571 return ERR_PTR(-ENOMEM);
9572 }
9573
9574 - /*
9575 - * Check if iv can be contiguous with source and destination.
9576 - * If so, include it. If not, create scatterlist.
9577 - */
9578 - if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
9579 - iv_contig = true;
9580 - else
9581 - src_nents = src_nents ? : 1;
9582 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9583 - sizeof(struct sec4_sg_entry);
9584 + if (mapped_src_nents == 1 &&
9585 + iv_dma + ivsize == sg_dma_address(req->src)) {
9586 + in_contig = true;
9587 + sec4_sg_ents = 0;
9588 + } else {
9589 + in_contig = false;
9590 + sec4_sg_ents = 1 + mapped_src_nents;
9591 + }
9592 + dst_sg_idx = sec4_sg_ents;
9593 + sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
9594 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9595
9596 /* allocate space for base edesc and hw desc commands, link tables */
9597 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9598 GFP_DMA | flags);
9599 if (!edesc) {
9600 dev_err(jrdev, "could not allocate extended descriptor\n");
9601 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9602 + iv_dma, ivsize, 0, 0);
9603 return ERR_PTR(-ENOMEM);
9604 }
9605
9606 @@ -2687,23 +1588,24 @@ static struct ablkcipher_edesc *ablkciph
9607 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9608 desc_bytes;
9609
9610 - sec4_sg_index = 0;
9611 - if (!iv_contig) {
9612 + if (!in_contig) {
9613 dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
9614 - sg_to_sec4_sg_last(req->src, src_nents,
9615 + sg_to_sec4_sg_last(req->src, mapped_src_nents,
9616 edesc->sec4_sg + 1, 0);
9617 - sec4_sg_index += 1 + src_nents;
9618 }
9619
9620 - if (dst_nents) {
9621 - sg_to_sec4_sg_last(req->dst, dst_nents,
9622 - edesc->sec4_sg + sec4_sg_index, 0);
9623 + if (mapped_dst_nents > 1) {
9624 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9625 + edesc->sec4_sg + dst_sg_idx, 0);
9626 }
9627
9628 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9629 sec4_sg_bytes, DMA_TO_DEVICE);
9630 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9631 dev_err(jrdev, "unable to map S/G table\n");
9632 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9633 + iv_dma, ivsize, 0, 0);
9634 + kfree(edesc);
9635 return ERR_PTR(-ENOMEM);
9636 }
9637
9638 @@ -2715,7 +1617,7 @@ static struct ablkcipher_edesc *ablkciph
9639 sec4_sg_bytes, 1);
9640 #endif
9641
9642 - *iv_contig_out = iv_contig;
9643 + *iv_contig_out = in_contig;
9644 return edesc;
9645 }
9646
9647 @@ -2806,30 +1708,54 @@ static struct ablkcipher_edesc *ablkciph
9648 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9649 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9650 struct device *jrdev = ctx->jrdev;
9651 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
9652 - CRYPTO_TFM_REQ_MAY_SLEEP)) ?
9653 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
9654 GFP_KERNEL : GFP_ATOMIC;
9655 - int src_nents, dst_nents = 0, sec4_sg_bytes;
9656 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
9657 struct ablkcipher_edesc *edesc;
9658 dma_addr_t iv_dma = 0;
9659 - bool iv_contig = false;
9660 - int sgc;
9661 + bool out_contig;
9662 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
9663 - int sec4_sg_index;
9664 -
9665 - src_nents = sg_count(req->src, req->nbytes);
9666 + int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
9667
9668 - if (unlikely(req->dst != req->src))
9669 - dst_nents = sg_count(req->dst, req->nbytes);
9670 + src_nents = sg_nents_for_len(req->src, req->nbytes);
9671 + if (unlikely(src_nents < 0)) {
9672 + dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
9673 + req->nbytes);
9674 + return ERR_PTR(src_nents);
9675 + }
9676
9677 if (likely(req->src == req->dst)) {
9678 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9679 - DMA_BIDIRECTIONAL);
9680 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9681 + DMA_BIDIRECTIONAL);
9682 + if (unlikely(!mapped_src_nents)) {
9683 + dev_err(jrdev, "unable to map source\n");
9684 + return ERR_PTR(-ENOMEM);
9685 + }
9686 +
9687 + dst_nents = src_nents;
9688 + mapped_dst_nents = src_nents;
9689 } else {
9690 - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
9691 - DMA_TO_DEVICE);
9692 - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
9693 - DMA_FROM_DEVICE);
9694 + mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
9695 + DMA_TO_DEVICE);
9696 + if (unlikely(!mapped_src_nents)) {
9697 + dev_err(jrdev, "unable to map source\n");
9698 + return ERR_PTR(-ENOMEM);
9699 + }
9700 +
9701 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
9702 + if (unlikely(dst_nents < 0)) {
9703 + dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
9704 + req->nbytes);
9705 + return ERR_PTR(dst_nents);
9706 + }
9707 +
9708 + mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
9709 + DMA_FROM_DEVICE);
9710 + if (unlikely(!mapped_dst_nents)) {
9711 + dev_err(jrdev, "unable to map destination\n");
9712 + dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
9713 + return ERR_PTR(-ENOMEM);
9714 + }
9715 }
9716
9717 /*
9718 @@ -2839,21 +1765,29 @@ static struct ablkcipher_edesc *ablkciph
9719 iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
9720 if (dma_mapping_error(jrdev, iv_dma)) {
9721 dev_err(jrdev, "unable to map IV\n");
9722 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
9723 + 0, 0, 0);
9724 return ERR_PTR(-ENOMEM);
9725 }
9726
9727 - if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
9728 - iv_contig = true;
9729 - else
9730 - dst_nents = dst_nents ? : 1;
9731 - sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
9732 - sizeof(struct sec4_sg_entry);
9733 + sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
9734 + dst_sg_idx = sec4_sg_ents;
9735 + if (mapped_dst_nents == 1 &&
9736 + iv_dma + ivsize == sg_dma_address(req->dst)) {
9737 + out_contig = true;
9738 + } else {
9739 + out_contig = false;
9740 + sec4_sg_ents += 1 + mapped_dst_nents;
9741 + }
9742
9743 /* allocate space for base edesc and hw desc commands, link tables */
9744 + sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
9745 edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
9746 GFP_DMA | flags);
9747 if (!edesc) {
9748 dev_err(jrdev, "could not allocate extended descriptor\n");
9749 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9750 + iv_dma, ivsize, 0, 0);
9751 return ERR_PTR(-ENOMEM);
9752 }
9753
9754 @@ -2863,24 +1797,24 @@ static struct ablkcipher_edesc *ablkciph
9755 edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
9756 desc_bytes;
9757
9758 - sec4_sg_index = 0;
9759 - if (src_nents) {
9760 - sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
9761 - sec4_sg_index += src_nents;
9762 - }
9763 + if (mapped_src_nents > 1)
9764 + sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
9765 + 0);
9766
9767 - if (!iv_contig) {
9768 - dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
9769 + if (!out_contig) {
9770 + dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx,
9771 iv_dma, ivsize, 0);
9772 - sec4_sg_index += 1;
9773 - sg_to_sec4_sg_last(req->dst, dst_nents,
9774 - edesc->sec4_sg + sec4_sg_index, 0);
9775 + sg_to_sec4_sg_last(req->dst, mapped_dst_nents,
9776 + edesc->sec4_sg + dst_sg_idx + 1, 0);
9777 }
9778
9779 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
9780 sec4_sg_bytes, DMA_TO_DEVICE);
9781 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
9782 dev_err(jrdev, "unable to map S/G table\n");
9783 + caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
9784 + iv_dma, ivsize, 0, 0);
9785 + kfree(edesc);
9786 return ERR_PTR(-ENOMEM);
9787 }
9788 edesc->iv_dma = iv_dma;
9789 @@ -2892,7 +1826,7 @@ static struct ablkcipher_edesc *ablkciph
9790 sec4_sg_bytes, 1);
9791 #endif
9792
9793 - *iv_contig_out = iv_contig;
9794 + *iv_contig_out = out_contig;
9795 return edesc;
9796 }
9797
9798 @@ -2903,7 +1837,7 @@ static int ablkcipher_givencrypt(struct
9799 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
9800 struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
9801 struct device *jrdev = ctx->jrdev;
9802 - bool iv_contig;
9803 + bool iv_contig = false;
9804 u32 *desc;
9805 int ret = 0;
9806
9807 @@ -2947,7 +1881,6 @@ struct caam_alg_template {
9808 } template_u;
9809 u32 class1_alg_type;
9810 u32 class2_alg_type;
9811 - u32 alg_op;
9812 };
9813
9814 static struct caam_alg_template driver_algs[] = {
9815 @@ -3132,7 +2065,6 @@ static struct caam_aead_alg driver_aeads
9816 .caam = {
9817 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9818 OP_ALG_AAI_HMAC_PRECOMP,
9819 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9820 },
9821 },
9822 {
9823 @@ -3154,7 +2086,6 @@ static struct caam_aead_alg driver_aeads
9824 .caam = {
9825 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9826 OP_ALG_AAI_HMAC_PRECOMP,
9827 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9828 },
9829 },
9830 {
9831 @@ -3176,7 +2107,6 @@ static struct caam_aead_alg driver_aeads
9832 .caam = {
9833 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9834 OP_ALG_AAI_HMAC_PRECOMP,
9835 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9836 },
9837 },
9838 {
9839 @@ -3198,7 +2128,6 @@ static struct caam_aead_alg driver_aeads
9840 .caam = {
9841 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9842 OP_ALG_AAI_HMAC_PRECOMP,
9843 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9844 },
9845 },
9846 {
9847 @@ -3220,7 +2149,6 @@ static struct caam_aead_alg driver_aeads
9848 .caam = {
9849 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9850 OP_ALG_AAI_HMAC_PRECOMP,
9851 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9852 },
9853 },
9854 {
9855 @@ -3242,7 +2170,6 @@ static struct caam_aead_alg driver_aeads
9856 .caam = {
9857 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9858 OP_ALG_AAI_HMAC_PRECOMP,
9859 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9860 },
9861 },
9862 {
9863 @@ -3264,7 +2191,6 @@ static struct caam_aead_alg driver_aeads
9864 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9865 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9866 OP_ALG_AAI_HMAC_PRECOMP,
9867 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9868 },
9869 },
9870 {
9871 @@ -3287,7 +2213,6 @@ static struct caam_aead_alg driver_aeads
9872 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9873 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9874 OP_ALG_AAI_HMAC_PRECOMP,
9875 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9876 .geniv = true,
9877 },
9878 },
9879 @@ -3310,7 +2235,6 @@ static struct caam_aead_alg driver_aeads
9880 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9881 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9882 OP_ALG_AAI_HMAC_PRECOMP,
9883 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9884 },
9885 },
9886 {
9887 @@ -3333,7 +2257,6 @@ static struct caam_aead_alg driver_aeads
9888 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9889 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9890 OP_ALG_AAI_HMAC_PRECOMP,
9891 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9892 .geniv = true,
9893 },
9894 },
9895 @@ -3356,7 +2279,6 @@ static struct caam_aead_alg driver_aeads
9896 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9897 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9898 OP_ALG_AAI_HMAC_PRECOMP,
9899 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9900 },
9901 },
9902 {
9903 @@ -3379,7 +2301,6 @@ static struct caam_aead_alg driver_aeads
9904 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9905 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9906 OP_ALG_AAI_HMAC_PRECOMP,
9907 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9908 .geniv = true,
9909 },
9910 },
9911 @@ -3402,7 +2323,6 @@ static struct caam_aead_alg driver_aeads
9912 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9913 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9914 OP_ALG_AAI_HMAC_PRECOMP,
9915 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9916 },
9917 },
9918 {
9919 @@ -3425,7 +2345,6 @@ static struct caam_aead_alg driver_aeads
9920 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9921 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
9922 OP_ALG_AAI_HMAC_PRECOMP,
9923 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
9924 .geniv = true,
9925 },
9926 },
9927 @@ -3448,7 +2367,6 @@ static struct caam_aead_alg driver_aeads
9928 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9929 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9930 OP_ALG_AAI_HMAC_PRECOMP,
9931 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9932 },
9933 },
9934 {
9935 @@ -3471,7 +2389,6 @@ static struct caam_aead_alg driver_aeads
9936 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9937 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
9938 OP_ALG_AAI_HMAC_PRECOMP,
9939 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
9940 .geniv = true,
9941 },
9942 },
9943 @@ -3494,7 +2411,6 @@ static struct caam_aead_alg driver_aeads
9944 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9945 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9946 OP_ALG_AAI_HMAC_PRECOMP,
9947 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9948 },
9949 },
9950 {
9951 @@ -3517,7 +2433,6 @@ static struct caam_aead_alg driver_aeads
9952 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
9953 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
9954 OP_ALG_AAI_HMAC_PRECOMP,
9955 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
9956 .geniv = true,
9957 },
9958 },
9959 @@ -3540,7 +2455,6 @@ static struct caam_aead_alg driver_aeads
9960 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9961 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9962 OP_ALG_AAI_HMAC_PRECOMP,
9963 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9964 }
9965 },
9966 {
9967 @@ -3563,7 +2477,6 @@ static struct caam_aead_alg driver_aeads
9968 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9969 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
9970 OP_ALG_AAI_HMAC_PRECOMP,
9971 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
9972 .geniv = true,
9973 }
9974 },
9975 @@ -3587,7 +2500,6 @@ static struct caam_aead_alg driver_aeads
9976 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9977 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9978 OP_ALG_AAI_HMAC_PRECOMP,
9979 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9980 },
9981 },
9982 {
9983 @@ -3611,7 +2523,6 @@ static struct caam_aead_alg driver_aeads
9984 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9985 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
9986 OP_ALG_AAI_HMAC_PRECOMP,
9987 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
9988 .geniv = true,
9989 },
9990 },
9991 @@ -3635,7 +2546,6 @@ static struct caam_aead_alg driver_aeads
9992 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
9993 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
9994 OP_ALG_AAI_HMAC_PRECOMP,
9995 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
9996 },
9997 },
9998 {
9999 @@ -3659,7 +2569,6 @@ static struct caam_aead_alg driver_aeads
10000 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10001 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10002 OP_ALG_AAI_HMAC_PRECOMP,
10003 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10004 .geniv = true,
10005 },
10006 },
10007 @@ -3683,7 +2592,6 @@ static struct caam_aead_alg driver_aeads
10008 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10009 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10010 OP_ALG_AAI_HMAC_PRECOMP,
10011 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10012 },
10013 },
10014 {
10015 @@ -3707,7 +2615,6 @@ static struct caam_aead_alg driver_aeads
10016 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10017 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10018 OP_ALG_AAI_HMAC_PRECOMP,
10019 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10020 .geniv = true,
10021 },
10022 },
10023 @@ -3731,7 +2638,6 @@ static struct caam_aead_alg driver_aeads
10024 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10025 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10026 OP_ALG_AAI_HMAC_PRECOMP,
10027 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10028 },
10029 },
10030 {
10031 @@ -3755,7 +2661,6 @@ static struct caam_aead_alg driver_aeads
10032 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10033 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10034 OP_ALG_AAI_HMAC_PRECOMP,
10035 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10036 .geniv = true,
10037 },
10038 },
10039 @@ -3779,7 +2684,6 @@ static struct caam_aead_alg driver_aeads
10040 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10041 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10042 OP_ALG_AAI_HMAC_PRECOMP,
10043 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10044 },
10045 },
10046 {
10047 @@ -3803,7 +2707,6 @@ static struct caam_aead_alg driver_aeads
10048 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
10049 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10050 OP_ALG_AAI_HMAC_PRECOMP,
10051 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10052 .geniv = true,
10053 },
10054 },
10055 @@ -3826,7 +2729,6 @@ static struct caam_aead_alg driver_aeads
10056 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10057 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10058 OP_ALG_AAI_HMAC_PRECOMP,
10059 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10060 },
10061 },
10062 {
10063 @@ -3849,7 +2751,6 @@ static struct caam_aead_alg driver_aeads
10064 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10065 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10066 OP_ALG_AAI_HMAC_PRECOMP,
10067 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10068 .geniv = true,
10069 },
10070 },
10071 @@ -3872,7 +2773,6 @@ static struct caam_aead_alg driver_aeads
10072 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10073 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10074 OP_ALG_AAI_HMAC_PRECOMP,
10075 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10076 },
10077 },
10078 {
10079 @@ -3895,7 +2795,6 @@ static struct caam_aead_alg driver_aeads
10080 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10081 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10082 OP_ALG_AAI_HMAC_PRECOMP,
10083 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10084 .geniv = true,
10085 },
10086 },
10087 @@ -3918,7 +2817,6 @@ static struct caam_aead_alg driver_aeads
10088 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10089 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10090 OP_ALG_AAI_HMAC_PRECOMP,
10091 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10092 },
10093 },
10094 {
10095 @@ -3941,7 +2839,6 @@ static struct caam_aead_alg driver_aeads
10096 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10097 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10098 OP_ALG_AAI_HMAC_PRECOMP,
10099 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10100 .geniv = true,
10101 },
10102 },
10103 @@ -3964,7 +2861,6 @@ static struct caam_aead_alg driver_aeads
10104 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10105 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10106 OP_ALG_AAI_HMAC_PRECOMP,
10107 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10108 },
10109 },
10110 {
10111 @@ -3987,7 +2883,6 @@ static struct caam_aead_alg driver_aeads
10112 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10113 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10114 OP_ALG_AAI_HMAC_PRECOMP,
10115 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10116 .geniv = true,
10117 },
10118 },
10119 @@ -4010,7 +2905,6 @@ static struct caam_aead_alg driver_aeads
10120 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10121 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10122 OP_ALG_AAI_HMAC_PRECOMP,
10123 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10124 },
10125 },
10126 {
10127 @@ -4033,7 +2927,6 @@ static struct caam_aead_alg driver_aeads
10128 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10129 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10130 OP_ALG_AAI_HMAC_PRECOMP,
10131 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10132 .geniv = true,
10133 },
10134 },
10135 @@ -4056,7 +2949,6 @@ static struct caam_aead_alg driver_aeads
10136 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10137 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10138 OP_ALG_AAI_HMAC_PRECOMP,
10139 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10140 },
10141 },
10142 {
10143 @@ -4079,7 +2971,6 @@ static struct caam_aead_alg driver_aeads
10144 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
10145 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10146 OP_ALG_AAI_HMAC_PRECOMP,
10147 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10148 .geniv = true,
10149 },
10150 },
10151 @@ -4104,7 +2995,6 @@ static struct caam_aead_alg driver_aeads
10152 OP_ALG_AAI_CTR_MOD128,
10153 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10154 OP_ALG_AAI_HMAC_PRECOMP,
10155 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10156 .rfc3686 = true,
10157 },
10158 },
10159 @@ -4129,7 +3019,6 @@ static struct caam_aead_alg driver_aeads
10160 OP_ALG_AAI_CTR_MOD128,
10161 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
10162 OP_ALG_AAI_HMAC_PRECOMP,
10163 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
10164 .rfc3686 = true,
10165 .geniv = true,
10166 },
10167 @@ -4155,7 +3044,6 @@ static struct caam_aead_alg driver_aeads
10168 OP_ALG_AAI_CTR_MOD128,
10169 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10170 OP_ALG_AAI_HMAC_PRECOMP,
10171 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10172 .rfc3686 = true,
10173 },
10174 },
10175 @@ -4180,7 +3068,6 @@ static struct caam_aead_alg driver_aeads
10176 OP_ALG_AAI_CTR_MOD128,
10177 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
10178 OP_ALG_AAI_HMAC_PRECOMP,
10179 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
10180 .rfc3686 = true,
10181 .geniv = true,
10182 },
10183 @@ -4206,7 +3093,6 @@ static struct caam_aead_alg driver_aeads
10184 OP_ALG_AAI_CTR_MOD128,
10185 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10186 OP_ALG_AAI_HMAC_PRECOMP,
10187 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10188 .rfc3686 = true,
10189 },
10190 },
10191 @@ -4231,7 +3117,6 @@ static struct caam_aead_alg driver_aeads
10192 OP_ALG_AAI_CTR_MOD128,
10193 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
10194 OP_ALG_AAI_HMAC_PRECOMP,
10195 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
10196 .rfc3686 = true,
10197 .geniv = true,
10198 },
10199 @@ -4257,7 +3142,6 @@ static struct caam_aead_alg driver_aeads
10200 OP_ALG_AAI_CTR_MOD128,
10201 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10202 OP_ALG_AAI_HMAC_PRECOMP,
10203 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10204 .rfc3686 = true,
10205 },
10206 },
10207 @@ -4282,7 +3166,6 @@ static struct caam_aead_alg driver_aeads
10208 OP_ALG_AAI_CTR_MOD128,
10209 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
10210 OP_ALG_AAI_HMAC_PRECOMP,
10211 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
10212 .rfc3686 = true,
10213 .geniv = true,
10214 },
10215 @@ -4308,7 +3191,6 @@ static struct caam_aead_alg driver_aeads
10216 OP_ALG_AAI_CTR_MOD128,
10217 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10218 OP_ALG_AAI_HMAC_PRECOMP,
10219 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10220 .rfc3686 = true,
10221 },
10222 },
10223 @@ -4333,7 +3215,6 @@ static struct caam_aead_alg driver_aeads
10224 OP_ALG_AAI_CTR_MOD128,
10225 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
10226 OP_ALG_AAI_HMAC_PRECOMP,
10227 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
10228 .rfc3686 = true,
10229 .geniv = true,
10230 },
10231 @@ -4359,7 +3240,6 @@ static struct caam_aead_alg driver_aeads
10232 OP_ALG_AAI_CTR_MOD128,
10233 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10234 OP_ALG_AAI_HMAC_PRECOMP,
10235 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10236 .rfc3686 = true,
10237 },
10238 },
10239 @@ -4384,7 +3264,6 @@ static struct caam_aead_alg driver_aeads
10240 OP_ALG_AAI_CTR_MOD128,
10241 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
10242 OP_ALG_AAI_HMAC_PRECOMP,
10243 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
10244 .rfc3686 = true,
10245 .geniv = true,
10246 },
10247 @@ -4399,16 +3278,34 @@ struct caam_crypto_alg {
10248
10249 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
10250 {
10251 + dma_addr_t dma_addr;
10252 +
10253 ctx->jrdev = caam_jr_alloc();
10254 if (IS_ERR(ctx->jrdev)) {
10255 pr_err("Job Ring Device allocation for transform failed\n");
10256 return PTR_ERR(ctx->jrdev);
10257 }
10258
10259 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
10260 + offsetof(struct caam_ctx,
10261 + sh_desc_enc_dma),
10262 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10263 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
10264 + dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
10265 + caam_jr_free(ctx->jrdev);
10266 + return -ENOMEM;
10267 + }
10268 +
10269 + ctx->sh_desc_enc_dma = dma_addr;
10270 + ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
10271 + sh_desc_dec);
10272 + ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
10273 + sh_desc_givenc);
10274 + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
10275 +
10276 /* copy descriptor header template value */
10277 - ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10278 - ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10279 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
10280 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
10281 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
10282
10283 return 0;
10284 }
10285 @@ -4435,25 +3332,9 @@ static int caam_aead_init(struct crypto_
10286
10287 static void caam_exit_common(struct caam_ctx *ctx)
10288 {
10289 - if (ctx->sh_desc_enc_dma &&
10290 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
10291 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
10292 - desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
10293 - if (ctx->sh_desc_dec_dma &&
10294 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
10295 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
10296 - desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
10297 - if (ctx->sh_desc_givenc_dma &&
10298 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
10299 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
10300 - desc_bytes(ctx->sh_desc_givenc),
10301 - DMA_TO_DEVICE);
10302 - if (ctx->key_dma &&
10303 - !dma_mapping_error(ctx->jrdev, ctx->key_dma))
10304 - dma_unmap_single(ctx->jrdev, ctx->key_dma,
10305 - ctx->enckeylen + ctx->split_key_pad_len,
10306 - DMA_TO_DEVICE);
10307 -
10308 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
10309 + offsetof(struct caam_ctx, sh_desc_enc_dma),
10310 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
10311 caam_jr_free(ctx->jrdev);
10312 }
10313
10314 @@ -4529,7 +3410,6 @@ static struct caam_crypto_alg *caam_alg_
10315
10316 t_alg->caam.class1_alg_type = template->class1_alg_type;
10317 t_alg->caam.class2_alg_type = template->class2_alg_type;
10318 - t_alg->caam.alg_op = template->alg_op;
10319
10320 return t_alg;
10321 }
10322 --- /dev/null
10323 +++ b/drivers/crypto/caam/caamalg_desc.c
10324 @@ -0,0 +1,1961 @@
10325 +/*
10326 + * Shared descriptors for aead, ablkcipher algorithms
10327 + *
10328 + * Copyright 2016 NXP
10329 + */
10330 +
10331 +#include "compat.h"
10332 +#include "desc_constr.h"
10333 +#include "caamalg_desc.h"
10334 +
10335 +/*
10336 + * For aead functions, read payload and write payload,
10337 + * both of which are specified in req->src and req->dst
10338 + */
10339 +static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
10340 +{
10341 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10342 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
10343 + KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
10344 +}
10345 +
10346 +/* Set DK bit in class 1 operation if shared */
10347 +static inline void append_dec_op1(u32 *desc, u32 type)
10348 +{
10349 + u32 *jump_cmd, *uncond_jump_cmd;
10350 +
10351 + /* DK bit is valid only for AES */
10352 + if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
10353 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10354 + OP_ALG_DECRYPT);
10355 + return;
10356 + }
10357 +
10358 + jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
10359 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10360 + OP_ALG_DECRYPT);
10361 + uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10362 + set_jump_tgt_here(desc, jump_cmd);
10363 + append_operation(desc, type | OP_ALG_AS_INITFINAL |
10364 + OP_ALG_DECRYPT | OP_ALG_AAI_DK);
10365 + set_jump_tgt_here(desc, uncond_jump_cmd);
10366 +}
10367 +
10368 +/**
10369 + * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
10370 + * (non-protocol) with no (null) encryption.
10371 + * @desc: pointer to buffer used for descriptor construction
10372 + * @adata: pointer to authentication transform definitions.
10373 + * A split key is required for SEC Era < 6; the size of the split key
10374 + * is specified in this case. Valid algorithm values - one of
10375 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10376 + * with OP_ALG_AAI_HMAC_PRECOMP.
10377 + * @icvsize: integrity check value (ICV) size (truncated or full)
10378 + * @era: SEC Era
10379 + */
10380 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
10381 + unsigned int icvsize, int era)
10382 +{
10383 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
10384 +
10385 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10386 +
10387 + /* Skip if already shared */
10388 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10389 + JUMP_COND_SHRD);
10390 + if (era < 6) {
10391 + if (adata->key_inline)
10392 + append_key_as_imm(desc, adata->key_virt,
10393 + adata->keylen_pad, adata->keylen,
10394 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10395 + KEY_ENC);
10396 + else
10397 + append_key(desc, adata->key_dma, adata->keylen,
10398 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10399 + } else {
10400 + append_proto_dkp(desc, adata);
10401 + }
10402 + set_jump_tgt_here(desc, key_jump_cmd);
10403 +
10404 + /* assoclen + cryptlen = seqinlen */
10405 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
10406 +
10407 + /* Prepare to read and write cryptlen + assoclen bytes */
10408 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10409 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10410 +
10411 + /*
10412 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10413 + * thus need to do some magic, i.e. self-patch the descriptor
10414 + * buffer.
10415 + */
10416 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10417 + MOVE_DEST_MATH3 |
10418 + (0x6 << MOVE_LEN_SHIFT));
10419 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
10420 + MOVE_DEST_DESCBUF |
10421 + MOVE_WAITCOMP |
10422 + (0x8 << MOVE_LEN_SHIFT));
10423 +
10424 + /* Class 2 operation */
10425 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10426 + OP_ALG_ENCRYPT);
10427 +
10428 + /* Read and write cryptlen bytes */
10429 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10430 +
10431 + set_move_tgt_here(desc, read_move_cmd);
10432 + set_move_tgt_here(desc, write_move_cmd);
10433 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10434 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10435 + MOVE_AUX_LS);
10436 +
10437 + /* Write ICV */
10438 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10439 + LDST_SRCDST_BYTE_CONTEXT);
10440 +
10441 +#ifdef DEBUG
10442 + print_hex_dump(KERN_ERR,
10443 + "aead null enc shdesc@" __stringify(__LINE__)": ",
10444 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10445 +#endif
10446 +}
10447 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap);
10448 +
10449 +/**
10450 + * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
10451 + * (non-protocol) with no (null) decryption.
10452 + * @desc: pointer to buffer used for descriptor construction
10453 + * @adata: pointer to authentication transform definitions.
10454 + * A split key is required for SEC Era < 6; the size of the split key
10455 + * is specified in this case. Valid algorithm values - one of
10456 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10457 + * with OP_ALG_AAI_HMAC_PRECOMP.
10458 + * @icvsize: integrity check value (ICV) size (truncated or full)
10459 + * @era: SEC Era
10460 + */
10461 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
10462 + unsigned int icvsize, int era)
10463 +{
10464 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
10465 +
10466 + init_sh_desc(desc, HDR_SHARE_SERIAL);
10467 +
10468 + /* Skip if already shared */
10469 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10470 + JUMP_COND_SHRD);
10471 + if (era < 6) {
10472 + if (adata->key_inline)
10473 + append_key_as_imm(desc, adata->key_virt,
10474 + adata->keylen_pad, adata->keylen,
10475 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10476 + KEY_ENC);
10477 + else
10478 + append_key(desc, adata->key_dma, adata->keylen,
10479 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10480 + } else {
10481 + append_proto_dkp(desc, adata);
10482 + }
10483 + set_jump_tgt_here(desc, key_jump_cmd);
10484 +
10485 + /* Class 2 operation */
10486 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10487 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10488 +
10489 + /* assoclen + cryptlen = seqoutlen */
10490 + append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10491 +
10492 + /* Prepare to read and write cryptlen + assoclen bytes */
10493 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
10494 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
10495 +
10496 + /*
10497 + * MOVE_LEN opcode is not available in all SEC HW revisions,
10498 + * thus need to do some magic, i.e. self-patch the descriptor
10499 + * buffer.
10500 + */
10501 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
10502 + MOVE_DEST_MATH2 |
10503 + (0x6 << MOVE_LEN_SHIFT));
10504 + write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
10505 + MOVE_DEST_DESCBUF |
10506 + MOVE_WAITCOMP |
10507 + (0x8 << MOVE_LEN_SHIFT));
10508 +
10509 + /* Read and write cryptlen bytes */
10510 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
10511 +
10512 + /*
10513 + * Insert a NOP here, since we need at least 4 instructions between
10514 + * code patching the descriptor buffer and the location being patched.
10515 + */
10516 + jump_cmd = append_jump(desc, JUMP_TEST_ALL);
10517 + set_jump_tgt_here(desc, jump_cmd);
10518 +
10519 + set_move_tgt_here(desc, read_move_cmd);
10520 + set_move_tgt_here(desc, write_move_cmd);
10521 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10522 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
10523 + MOVE_AUX_LS);
10524 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10525 +
10526 + /* Load ICV */
10527 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10528 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10529 +
10530 +#ifdef DEBUG
10531 + print_hex_dump(KERN_ERR,
10532 + "aead null dec shdesc@" __stringify(__LINE__)": ",
10533 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10534 +#endif
10535 +}
10536 +EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap);
10537 +
10538 +static void init_sh_desc_key_aead(u32 * const desc,
10539 + struct alginfo * const cdata,
10540 + struct alginfo * const adata,
10541 + const bool is_rfc3686, u32 *nonce, int era)
10542 +{
10543 + u32 *key_jump_cmd;
10544 + unsigned int enckeylen = cdata->keylen;
10545 +
10546 + /* Note: Context registers are saved. */
10547 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
10548 +
10549 + /* Skip if already shared */
10550 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10551 + JUMP_COND_SHRD);
10552 +
10553 + /*
10554 + * RFC3686 specific:
10555 + * | key = {AUTH_KEY, ENC_KEY, NONCE}
10556 + * | enckeylen = encryption key size + nonce size
10557 + */
10558 + if (is_rfc3686)
10559 + enckeylen -= CTR_RFC3686_NONCE_SIZE;
10560 +
10561 + if (era < 6) {
10562 + if (adata->key_inline)
10563 + append_key_as_imm(desc, adata->key_virt,
10564 + adata->keylen_pad, adata->keylen,
10565 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10566 + KEY_ENC);
10567 + else
10568 + append_key(desc, adata->key_dma, adata->keylen,
10569 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
10570 + } else {
10571 + append_proto_dkp(desc, adata);
10572 + }
10573 +
10574 + if (cdata->key_inline)
10575 + append_key_as_imm(desc, cdata->key_virt, enckeylen,
10576 + enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
10577 + else
10578 + append_key(desc, cdata->key_dma, enckeylen, CLASS_1 |
10579 + KEY_DEST_CLASS_REG);
10580 +
10581 + /* Load Counter into CONTEXT1 reg */
10582 + if (is_rfc3686) {
10583 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
10584 + LDST_CLASS_IND_CCB |
10585 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
10586 + append_move(desc,
10587 + MOVE_SRC_OUTFIFO |
10588 + MOVE_DEST_CLASS1CTX |
10589 + (16 << MOVE_OFFSET_SHIFT) |
10590 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
10591 + }
10592 +
10593 + set_jump_tgt_here(desc, key_jump_cmd);
10594 +}
10595 +
10596 +/**
10597 + * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor
10598 + * (non-protocol).
10599 + * @desc: pointer to buffer used for descriptor construction
10600 + * @cdata: pointer to block cipher transform definitions
10601 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10602 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10603 + * @adata: pointer to authentication transform definitions.
10604 + * A split key is required for SEC Era < 6; the size of the split key
10605 + * is specified in this case. Valid algorithm values - one of
10606 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10607 + * with OP_ALG_AAI_HMAC_PRECOMP.
10608 + * @ivsize: initialization vector size
10609 + * @icvsize: integrity check value (ICV) size (truncated or full)
10610 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10611 + * @nonce: pointer to rfc3686 nonce
10612 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10613 + * @is_qi: true when called from caam/qi
10614 + * @era: SEC Era
10615 + */
10616 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
10617 + struct alginfo *adata, unsigned int ivsize,
10618 + unsigned int icvsize, const bool is_rfc3686,
10619 + u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
10620 + int era)
10621 +{
10622 + /* Note: Context registers are saved. */
10623 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10624 +
10625 + /* Class 2 operation */
10626 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10627 + OP_ALG_ENCRYPT);
10628 +
10629 + if (is_qi) {
10630 + u32 *wait_load_cmd;
10631 +
10632 + /* REG3 = assoclen */
10633 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10634 + LDST_SRCDST_WORD_DECO_MATH3 |
10635 + (4 << LDST_OFFSET_SHIFT));
10636 +
10637 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10638 + JUMP_COND_CALM | JUMP_COND_NCP |
10639 + JUMP_COND_NOP | JUMP_COND_NIP |
10640 + JUMP_COND_NIFP);
10641 + set_jump_tgt_here(desc, wait_load_cmd);
10642 +
10643 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10644 + LDST_SRCDST_BYTE_CONTEXT |
10645 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10646 + }
10647 +
10648 + /* Read and write assoclen bytes */
10649 + if (is_qi || era < 3) {
10650 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10651 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10652 + } else {
10653 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10654 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10655 + }
10656 +
10657 + /* Skip assoc data */
10658 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10659 +
10660 + /* read assoc before reading payload */
10661 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10662 + FIFOLDST_VLF);
10663 +
10664 + /* Load Counter into CONTEXT1 reg */
10665 + if (is_rfc3686)
10666 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10667 + LDST_SRCDST_BYTE_CONTEXT |
10668 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10669 + LDST_OFFSET_SHIFT));
10670 +
10671 + /* Class 1 operation */
10672 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10673 + OP_ALG_ENCRYPT);
10674 +
10675 + /* Read and write cryptlen bytes */
10676 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10677 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10678 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
10679 +
10680 + /* Write ICV */
10681 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10682 + LDST_SRCDST_BYTE_CONTEXT);
10683 +
10684 +#ifdef DEBUG
10685 + print_hex_dump(KERN_ERR, "aead enc shdesc@" __stringify(__LINE__)": ",
10686 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10687 +#endif
10688 +}
10689 +EXPORT_SYMBOL(cnstr_shdsc_aead_encap);
10690 +
10691 +/**
10692 + * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor
10693 + * (non-protocol).
10694 + * @desc: pointer to buffer used for descriptor construction
10695 + * @cdata: pointer to block cipher transform definitions
10696 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10697 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10698 + * @adata: pointer to authentication transform definitions.
10699 + * A split key is required for SEC Era < 6; the size of the split key
10700 + * is specified in this case. Valid algorithm values - one of
10701 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10702 + * with OP_ALG_AAI_HMAC_PRECOMP.
10703 + * @ivsize: initialization vector size
10704 + * @icvsize: integrity check value (ICV) size (truncated or full)
10705 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10706 + * @nonce: pointer to rfc3686 nonce
10707 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10708 + * @is_qi: true when called from caam/qi
10709 + * @era: SEC Era
10710 + */
10711 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
10712 + struct alginfo *adata, unsigned int ivsize,
10713 + unsigned int icvsize, const bool geniv,
10714 + const bool is_rfc3686, u32 *nonce,
10715 + const u32 ctx1_iv_off, const bool is_qi, int era)
10716 +{
10717 + /* Note: Context registers are saved. */
10718 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10719 +
10720 + /* Class 2 operation */
10721 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10722 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
10723 +
10724 + if (is_qi) {
10725 + u32 *wait_load_cmd;
10726 +
10727 + /* REG3 = assoclen */
10728 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10729 + LDST_SRCDST_WORD_DECO_MATH3 |
10730 + (4 << LDST_OFFSET_SHIFT));
10731 +
10732 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10733 + JUMP_COND_CALM | JUMP_COND_NCP |
10734 + JUMP_COND_NOP | JUMP_COND_NIP |
10735 + JUMP_COND_NIFP);
10736 + set_jump_tgt_here(desc, wait_load_cmd);
10737 +
10738 + if (!geniv)
10739 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10740 + LDST_SRCDST_BYTE_CONTEXT |
10741 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10742 + }
10743 +
10744 + /* Read and write assoclen bytes */
10745 + if (is_qi || era < 3) {
10746 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10747 + if (geniv)
10748 + append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
10749 + ivsize);
10750 + else
10751 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
10752 + CAAM_CMD_SZ);
10753 + } else {
10754 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10755 + if (geniv)
10756 + append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
10757 + ivsize);
10758 + else
10759 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
10760 + CAAM_CMD_SZ);
10761 + }
10762 +
10763 + /* Skip assoc data */
10764 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10765 +
10766 + /* read assoc before reading payload */
10767 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10768 + KEY_VLF);
10769 +
10770 + if (geniv) {
10771 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10772 + LDST_SRCDST_BYTE_CONTEXT |
10773 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10774 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
10775 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
10776 + }
10777 +
10778 + /* Load Counter into CONTEXT1 reg */
10779 + if (is_rfc3686)
10780 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10781 + LDST_SRCDST_BYTE_CONTEXT |
10782 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10783 + LDST_OFFSET_SHIFT));
10784 +
10785 + /* Choose operation */
10786 + if (ctx1_iv_off)
10787 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10788 + OP_ALG_DECRYPT);
10789 + else
10790 + append_dec_op1(desc, cdata->algtype);
10791 +
10792 + /* Read and write cryptlen bytes */
10793 + append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10794 + append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
10795 + aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
10796 +
10797 + /* Load ICV */
10798 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
10799 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
10800 +
10801 +#ifdef DEBUG
10802 + print_hex_dump(KERN_ERR, "aead dec shdesc@" __stringify(__LINE__)": ",
10803 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10804 +#endif
10805 +}
10806 +EXPORT_SYMBOL(cnstr_shdsc_aead_decap);
10807 +
10808 +/**
10809 + * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor
10810 + * (non-protocol) with HW-generated initialization
10811 + * vector.
10812 + * @desc: pointer to buffer used for descriptor construction
10813 + * @cdata: pointer to block cipher transform definitions
10814 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
10815 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
10816 + * @adata: pointer to authentication transform definitions.
10817 + * A split key is required for SEC Era < 6; the size of the split key
10818 + * is specified in this case. Valid algorithm values - one of
10819 + * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
10820 + * with OP_ALG_AAI_HMAC_PRECOMP.
10821 + * @ivsize: initialization vector size
10822 + * @icvsize: integrity check value (ICV) size (truncated or full)
10823 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
10824 + * @nonce: pointer to rfc3686 nonce
10825 + * @ctx1_iv_off: IV offset in CONTEXT1 register
10826 + * @is_qi: true when called from caam/qi
10827 + * @era: SEC Era
10828 + */
10829 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
10830 + struct alginfo *adata, unsigned int ivsize,
10831 + unsigned int icvsize, const bool is_rfc3686,
10832 + u32 *nonce, const u32 ctx1_iv_off,
10833 + const bool is_qi, int era)
10834 +{
10835 + u32 geniv, moveiv;
10836 +
10837 + /* Note: Context registers are saved. */
10838 + init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
10839 +
10840 + if (is_qi) {
10841 + u32 *wait_load_cmd;
10842 +
10843 + /* REG3 = assoclen */
10844 + append_seq_load(desc, 4, LDST_CLASS_DECO |
10845 + LDST_SRCDST_WORD_DECO_MATH3 |
10846 + (4 << LDST_OFFSET_SHIFT));
10847 +
10848 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10849 + JUMP_COND_CALM | JUMP_COND_NCP |
10850 + JUMP_COND_NOP | JUMP_COND_NIP |
10851 + JUMP_COND_NIFP);
10852 + set_jump_tgt_here(desc, wait_load_cmd);
10853 + }
10854 +
10855 + if (is_rfc3686) {
10856 + if (is_qi)
10857 + append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
10858 + LDST_SRCDST_BYTE_CONTEXT |
10859 + (ctx1_iv_off << LDST_OFFSET_SHIFT));
10860 +
10861 + goto copy_iv;
10862 + }
10863 +
10864 + /* Generate IV */
10865 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
10866 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
10867 + NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10868 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
10869 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10870 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
10871 + append_move(desc, MOVE_WAITCOMP |
10872 + MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
10873 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10874 + (ivsize << MOVE_LEN_SHIFT));
10875 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
10876 +
10877 +copy_iv:
10878 + /* Copy IV to class 1 context */
10879 + append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
10880 + (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
10881 + (ivsize << MOVE_LEN_SHIFT));
10882 +
10883 + /* Return to encryption */
10884 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
10885 + OP_ALG_ENCRYPT);
10886 +
10887 + /* Read and write assoclen bytes */
10888 + if (is_qi || era < 3) {
10889 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
10890 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
10891 + } else {
10892 + append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10893 + append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
10894 + }
10895 +
10896 + /* Skip assoc data */
10897 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
10898 +
10899 + /* read assoc before reading payload */
10900 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
10901 + KEY_VLF);
10902 +
10903 + /* Copy iv from outfifo to class 2 fifo */
10904 + moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
10905 + NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
10906 + append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
10907 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
10908 + append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
10909 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
10910 +
10911 + /* Load Counter into CONTEXT1 reg */
10912 + if (is_rfc3686)
10913 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
10914 + LDST_SRCDST_BYTE_CONTEXT |
10915 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
10916 + LDST_OFFSET_SHIFT));
10917 +
10918 + /* Class 1 operation */
10919 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
10920 + OP_ALG_ENCRYPT);
10921 +
10922 + /* Will write ivsize + cryptlen */
10923 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10924 +
10925 + /* Not need to reload iv */
10926 + append_seq_fifo_load(desc, ivsize,
10927 + FIFOLD_CLASS_SKIP);
10928 +
10929 + /* Will read cryptlen */
10930 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
10931 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
10932 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
10933 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
10934 +
10935 + /* Write ICV */
10936 + append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
10937 + LDST_SRCDST_BYTE_CONTEXT);
10938 +
10939 +#ifdef DEBUG
10940 + print_hex_dump(KERN_ERR,
10941 + "aead givenc shdesc@" __stringify(__LINE__)": ",
10942 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
10943 +#endif
10944 +}
10945 +EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
10946 +
10947 +/**
10948 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
10949 + * @desc: pointer to buffer used for descriptor construction
10950 + * @cdata: pointer to block cipher transform definitions
10951 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
10952 + * with OP_ALG_AAI_CBC
10953 + * @adata: pointer to authentication transform definitions.
10954 + * A split key is required for SEC Era < 6; the size of the split key
10955 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
10956 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
10957 + * @assoclen: associated data length
10958 + * @ivsize: initialization vector size
10959 + * @authsize: authentication data size
10960 + * @blocksize: block cipher size
10961 + * @era: SEC Era
10962 + */
10963 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
10964 + struct alginfo *adata, unsigned int assoclen,
10965 + unsigned int ivsize, unsigned int authsize,
10966 + unsigned int blocksize, int era)
10967 +{
10968 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
10969 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
10970 +
10971 + /*
10972 + * Compute the index (in bytes) for the LOAD with destination of
10973 + * Class 1 Data Size Register and for the LOAD that generates padding
10974 + */
10975 + if (adata->key_inline) {
10976 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10977 + cdata->keylen - 4 * CAAM_CMD_SZ;
10978 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
10979 + cdata->keylen - 2 * CAAM_CMD_SZ;
10980 + } else {
10981 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10982 + 4 * CAAM_CMD_SZ;
10983 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
10984 + 2 * CAAM_CMD_SZ;
10985 + }
10986 +
10987 + stidx = 1 << HDR_START_IDX_SHIFT;
10988 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
10989 +
10990 + /* skip key loading if they are loaded due to sharing */
10991 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
10992 + JUMP_COND_SHRD);
10993 +
10994 + if (era < 6) {
10995 + if (adata->key_inline)
10996 + append_key_as_imm(desc, adata->key_virt,
10997 + adata->keylen_pad, adata->keylen,
10998 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
10999 + KEY_ENC);
11000 + else
11001 + append_key(desc, adata->key_dma, adata->keylen,
11002 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
11003 + } else {
11004 + append_proto_dkp(desc, adata);
11005 + }
11006 +
11007 + if (cdata->key_inline)
11008 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11009 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11010 + else
11011 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11012 + KEY_DEST_CLASS_REG);
11013 +
11014 + set_jump_tgt_here(desc, key_jump_cmd);
11015 +
11016 + /* class 2 operation */
11017 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11018 + OP_ALG_ENCRYPT);
11019 + /* class 1 operation */
11020 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11021 + OP_ALG_ENCRYPT);
11022 +
11023 + /* payloadlen = input data length - (assoclen + ivlen) */
11024 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
11025 +
11026 + /* math1 = payloadlen + icvlen */
11027 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
11028 +
11029 + /* padlen = block_size - math1 % block_size */
11030 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
11031 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
11032 +
11033 + /* cryptlen = payloadlen + icvlen + padlen */
11034 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
11035 +
11036 + /*
11037 + * update immediate data with the padding length value
11038 + * for the LOAD in the class 1 data size register.
11039 + */
11040 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
11041 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
11042 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
11043 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
11044 +
11045 + /* overwrite PL field for the padding iNFO FIFO entry */
11046 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
11047 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
11048 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
11049 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
11050 +
11051 + /* store encrypted payload, icv and padding */
11052 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11053 +
11054 + /* if payload length is zero, jump to zero-payload commands */
11055 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
11056 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11057 + JUMP_COND_MATH_Z);
11058 +
11059 + /* load iv in context1 */
11060 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11061 + LDST_CLASS_1_CCB | ivsize);
11062 +
11063 + /* read assoc for authentication */
11064 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
11065 + FIFOLD_TYPE_MSG);
11066 + /* insnoop payload */
11067 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
11068 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
11069 +
11070 + /* jump the zero-payload commands */
11071 + append_jump(desc, JUMP_TEST_ALL | 3);
11072 +
11073 + /* zero-payload commands */
11074 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11075 +
11076 + /* load iv in context1 */
11077 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11078 + LDST_CLASS_1_CCB | ivsize);
11079 +
11080 + /* assoc data is the only data for authentication */
11081 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
11082 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
11083 +
11084 + /* send icv to encryption */
11085 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
11086 + authsize);
11087 +
11088 + /* update class 1 data size register with padding length */
11089 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
11090 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
11091 +
11092 + /* generate padding and send it to encryption */
11093 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
11094 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
11095 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
11096 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
11097 +
11098 +#ifdef DEBUG
11099 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
11100 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11101 + desc_bytes(desc), 1);
11102 +#endif
11103 +}
11104 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
11105 +
11106 +/**
11107 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
11108 + * @desc: pointer to buffer used for descriptor construction
11109 + * @cdata: pointer to block cipher transform definitions
11110 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
11111 + * with OP_ALG_AAI_CBC
11112 + * @adata: pointer to authentication transform definitions.
11113 + * A split key is required for SEC Era < 6; the size of the split key
11114 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
11115 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
11116 + * @assoclen: associated data length
11117 + * @ivsize: initialization vector size
11118 + * @authsize: authentication data size
11119 + * @blocksize: block cipher size
11120 + * @era: SEC Era
11121 + */
11122 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
11123 + struct alginfo *adata, unsigned int assoclen,
11124 + unsigned int ivsize, unsigned int authsize,
11125 + unsigned int blocksize, int era)
11126 +{
11127 + u32 stidx, jumpback;
11128 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
11129 + /*
11130 + * Pointer Size bool determines the size of address pointers.
11131 + * false - Pointers fit in one 32-bit word.
11132 + * true - Pointers fit in two 32-bit words.
11133 + */
11134 + static const bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
11135 +
11136 + stidx = 1 << HDR_START_IDX_SHIFT;
11137 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
11138 +
11139 + /* skip key loading if they are loaded due to sharing */
11140 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11141 + JUMP_COND_SHRD);
11142 +
11143 + if (era < 6)
11144 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
11145 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
11146 + else
11147 + append_proto_dkp(desc, adata);
11148 +
11149 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11150 + KEY_DEST_CLASS_REG);
11151 +
11152 + set_jump_tgt_here(desc, key_jump_cmd);
11153 +
11154 + /* class 2 operation */
11155 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
11156 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11157 + /* class 1 operation */
11158 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11159 + OP_ALG_DECRYPT);
11160 +
11161 + /* VSIL = input data length - 2 * block_size */
11162 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
11163 + blocksize);
11164 +
11165 + /*
11166 + * payloadlen + icvlen + padlen = input data length - (assoclen +
11167 + * ivsize)
11168 + */
11169 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
11170 +
11171 + /* skip data to the last but one cipher block */
11172 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
11173 +
11174 + /* load iv for the last cipher block */
11175 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
11176 + LDST_CLASS_1_CCB | ivsize);
11177 +
11178 + /* read last cipher block */
11179 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11180 + FIFOLD_TYPE_LAST1 | blocksize);
11181 +
11182 + /* move decrypted block into math0 and math1 */
11183 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
11184 + blocksize);
11185 +
11186 + /* reset AES CHA */
11187 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
11188 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
11189 +
11190 + /* rewind input sequence */
11191 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
11192 +
11193 + /* key1 is in decryption form */
11194 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
11195 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
11196 +
11197 + /* load iv in context1 */
11198 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
11199 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
11200 +
11201 + /* read sequence number */
11202 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
11203 + /* load Type, Version and Len fields in math0 */
11204 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
11205 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
11206 +
11207 + /* compute (padlen - 1) */
11208 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
11209 +
11210 + /* math2 = icvlen + (padlen - 1) + 1 */
11211 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
11212 +
11213 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11214 +
11215 + /* VSOL = payloadlen + icvlen + padlen */
11216 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
11217 +
11218 +#ifdef __LITTLE_ENDIAN
11219 + append_moveb(desc, MOVE_WAITCOMP |
11220 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
11221 +#endif
11222 + /* update Len field */
11223 + append_math_sub(desc, REG0, REG0, REG2, 8);
11224 +
11225 + /* store decrypted payload, icv and padding */
11226 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
11227 +
11228 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
11229 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11230 +
11231 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11232 + JUMP_COND_MATH_Z);
11233 +
11234 + /* send Type, Version and Len(pre ICV) fields to authentication */
11235 + append_move(desc, MOVE_WAITCOMP |
11236 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11237 + (3 << MOVE_OFFSET_SHIFT) | 5);
11238 +
11239 + /* outsnooping payload */
11240 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
11241 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
11242 + FIFOLDST_VLF);
11243 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
11244 +
11245 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11246 + /* send Type, Version and Len(pre ICV) fields to authentication */
11247 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
11248 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
11249 + (3 << MOVE_OFFSET_SHIFT) | 5);
11250 +
11251 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
11252 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
11253 +
11254 + /* load icvlen and padlen */
11255 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
11256 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
11257 +
11258 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
11259 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
11260 +
11261 + /*
11262 + * Start a new input sequence using the SEQ OUT PTR command options,
11263 + * pointer and length used when the current output sequence was defined.
11264 + */
11265 + if (ps) {
11266 + /*
11267 + * Move the lower 32 bits of Shared Descriptor address, the
11268 + * SEQ OUT PTR command, Output Pointer (2 words) and
11269 + * Output Length into math registers.
11270 + */
11271 +#ifdef __LITTLE_ENDIAN
11272 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11273 + MOVE_DEST_MATH0 | (55 * 4 << MOVE_OFFSET_SHIFT) |
11274 + 20);
11275 +#else
11276 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11277 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11278 + 20);
11279 +#endif
11280 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11281 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
11282 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
11283 + /* Append a JUMP command after the copied fields */
11284 + jumpback = CMD_JUMP | (char)-9;
11285 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11286 + LDST_SRCDST_WORD_DECO_MATH2 |
11287 + (4 << LDST_OFFSET_SHIFT));
11288 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11289 + /* Move the updated fields back to the Job Descriptor */
11290 +#ifdef __LITTLE_ENDIAN
11291 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11292 + MOVE_DEST_DESCBUF | (55 * 4 << MOVE_OFFSET_SHIFT) |
11293 + 24);
11294 +#else
11295 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11296 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11297 + 24);
11298 +#endif
11299 + /*
11300 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11301 + * and then jump back to the next command from the
11302 + * Shared Descriptor.
11303 + */
11304 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
11305 + } else {
11306 + /*
11307 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
11308 + * Output Length into math registers.
11309 + */
11310 +#ifdef __LITTLE_ENDIAN
11311 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11312 + MOVE_DEST_MATH0 | (54 * 4 << MOVE_OFFSET_SHIFT) |
11313 + 12);
11314 +#else
11315 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
11316 + MOVE_DEST_MATH0 | (53 * 4 << MOVE_OFFSET_SHIFT) |
11317 + 12);
11318 +#endif
11319 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
11320 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
11321 + ~(((u64)(CMD_SEQ_IN_PTR ^
11322 + CMD_SEQ_OUT_PTR)) << 32));
11323 + /* Append a JUMP command after the copied fields */
11324 + jumpback = CMD_JUMP | (char)-7;
11325 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
11326 + LDST_SRCDST_WORD_DECO_MATH1 |
11327 + (4 << LDST_OFFSET_SHIFT));
11328 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
11329 + /* Move the updated fields back to the Job Descriptor */
11330 +#ifdef __LITTLE_ENDIAN
11331 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11332 + MOVE_DEST_DESCBUF | (54 * 4 << MOVE_OFFSET_SHIFT) |
11333 + 16);
11334 +#else
11335 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
11336 + MOVE_DEST_DESCBUF | (53 * 4 << MOVE_OFFSET_SHIFT) |
11337 + 16);
11338 +#endif
11339 + /*
11340 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
11341 + * and then jump back to the next command from the
11342 + * Shared Descriptor.
11343 + */
11344 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
11345 + }
11346 +
11347 + /* skip payload */
11348 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
11349 + /* check icv */
11350 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
11351 + FIFOLD_TYPE_LAST2 | authsize);
11352 +
11353 +#ifdef DEBUG
11354 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
11355 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
11356 + desc_bytes(desc), 1);
11357 +#endif
11358 +}
11359 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
11360 +
11361 +/**
11362 + * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
11363 + * @desc: pointer to buffer used for descriptor construction
11364 + * @cdata: pointer to block cipher transform definitions
11365 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11366 + * @ivsize: initialization vector size
11367 + * @icvsize: integrity check value (ICV) size (truncated or full)
11368 + * @is_qi: true when called from caam/qi
11369 + */
11370 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
11371 + unsigned int ivsize, unsigned int icvsize,
11372 + const bool is_qi)
11373 +{
11374 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1,
11375 + *zero_assoc_jump_cmd2;
11376 +
11377 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11378 +
11379 + /* skip key loading if they are loaded due to sharing */
11380 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11381 + JUMP_COND_SHRD);
11382 + if (cdata->key_inline)
11383 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11384 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11385 + else
11386 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11387 + KEY_DEST_CLASS_REG);
11388 + set_jump_tgt_here(desc, key_jump_cmd);
11389 +
11390 + /* class 1 operation */
11391 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11392 + OP_ALG_ENCRYPT);
11393 +
11394 + if (is_qi) {
11395 + u32 *wait_load_cmd;
11396 +
11397 + /* REG3 = assoclen */
11398 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11399 + LDST_SRCDST_WORD_DECO_MATH3 |
11400 + (4 << LDST_OFFSET_SHIFT));
11401 +
11402 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11403 + JUMP_COND_CALM | JUMP_COND_NCP |
11404 + JUMP_COND_NOP | JUMP_COND_NIP |
11405 + JUMP_COND_NIFP);
11406 + set_jump_tgt_here(desc, wait_load_cmd);
11407 +
11408 + append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM,
11409 + ivsize);
11410 + } else {
11411 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0,
11412 + CAAM_CMD_SZ);
11413 + }
11414 +
11415 + /* if assoclen + cryptlen is ZERO, skip to ICV write */
11416 + zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
11417 + JUMP_COND_MATH_Z);
11418 +
11419 + if (is_qi)
11420 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11421 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11422 +
11423 + /* if assoclen is ZERO, skip reading the assoc data */
11424 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11425 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11426 + JUMP_COND_MATH_Z);
11427 +
11428 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11429 +
11430 + /* skip assoc data */
11431 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11432 +
11433 + /* cryptlen = seqinlen - assoclen */
11434 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
11435 +
11436 + /* if cryptlen is ZERO jump to zero-payload commands */
11437 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11438 + JUMP_COND_MATH_Z);
11439 +
11440 + /* read assoc data */
11441 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11442 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11443 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11444 +
11445 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11446 +
11447 + /* write encrypted data */
11448 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11449 +
11450 + /* read payload data */
11451 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11452 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11453 +
11454 + /* jump to ICV writing */
11455 + if (is_qi)
11456 + append_jump(desc, JUMP_TEST_ALL | 4);
11457 + else
11458 + append_jump(desc, JUMP_TEST_ALL | 2);
11459 +
11460 + /* zero-payload commands */
11461 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11462 +
11463 + /* read assoc data */
11464 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11465 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
11466 + if (is_qi)
11467 + /* jump to ICV writing */
11468 + append_jump(desc, JUMP_TEST_ALL | 2);
11469 +
11470 + /* There is no input data */
11471 + set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
11472 +
11473 + if (is_qi)
11474 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11475 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
11476 + FIFOLD_TYPE_LAST1);
11477 +
11478 + /* write ICV */
11479 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11480 + LDST_SRCDST_BYTE_CONTEXT);
11481 +
11482 +#ifdef DEBUG
11483 + print_hex_dump(KERN_ERR, "gcm enc shdesc@" __stringify(__LINE__)": ",
11484 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11485 +#endif
11486 +}
11487 +EXPORT_SYMBOL(cnstr_shdsc_gcm_encap);
11488 +
11489 +/**
11490 + * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor
11491 + * @desc: pointer to buffer used for descriptor construction
11492 + * @cdata: pointer to block cipher transform definitions
11493 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11494 + * @ivsize: initialization vector size
11495 + * @icvsize: integrity check value (ICV) size (truncated or full)
11496 + * @is_qi: true when called from caam/qi
11497 + */
11498 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
11499 + unsigned int ivsize, unsigned int icvsize,
11500 + const bool is_qi)
11501 +{
11502 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1;
11503 +
11504 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11505 +
11506 + /* skip key loading if they are loaded due to sharing */
11507 + key_jump_cmd = append_jump(desc, JUMP_JSL |
11508 + JUMP_TEST_ALL | JUMP_COND_SHRD);
11509 + if (cdata->key_inline)
11510 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11511 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11512 + else
11513 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11514 + KEY_DEST_CLASS_REG);
11515 + set_jump_tgt_here(desc, key_jump_cmd);
11516 +
11517 + /* class 1 operation */
11518 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11519 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11520 +
11521 + if (is_qi) {
11522 + u32 *wait_load_cmd;
11523 +
11524 + /* REG3 = assoclen */
11525 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11526 + LDST_SRCDST_WORD_DECO_MATH3 |
11527 + (4 << LDST_OFFSET_SHIFT));
11528 +
11529 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11530 + JUMP_COND_CALM | JUMP_COND_NCP |
11531 + JUMP_COND_NOP | JUMP_COND_NIP |
11532 + JUMP_COND_NIFP);
11533 + set_jump_tgt_here(desc, wait_load_cmd);
11534 +
11535 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11536 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11537 + }
11538 +
11539 + /* if assoclen is ZERO, skip reading the assoc data */
11540 + append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
11541 + zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
11542 + JUMP_COND_MATH_Z);
11543 +
11544 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11545 +
11546 + /* skip assoc data */
11547 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11548 +
11549 + /* read assoc data */
11550 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11551 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11552 +
11553 + set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
11554 +
11555 + /* cryptlen = seqoutlen - assoclen */
11556 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11557 +
11558 + /* jump to zero-payload command if cryptlen is zero */
11559 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
11560 + JUMP_COND_MATH_Z);
11561 +
11562 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11563 +
11564 + /* store encrypted data */
11565 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11566 +
11567 + /* read payload data */
11568 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11569 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11570 +
11571 + /* zero-payload command */
11572 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
11573 +
11574 + /* read ICV */
11575 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11576 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11577 +
11578 +#ifdef DEBUG
11579 + print_hex_dump(KERN_ERR, "gcm dec shdesc@" __stringify(__LINE__)": ",
11580 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11581 +#endif
11582 +}
11583 +EXPORT_SYMBOL(cnstr_shdsc_gcm_decap);
11584 +
11585 +/**
11586 + * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor
11587 + * (non-protocol).
11588 + * @desc: pointer to buffer used for descriptor construction
11589 + * @cdata: pointer to block cipher transform definitions
11590 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11591 + * @ivsize: initialization vector size
11592 + * @icvsize: integrity check value (ICV) size (truncated or full)
11593 + * @is_qi: true when called from caam/qi
11594 + */
11595 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
11596 + unsigned int ivsize, unsigned int icvsize,
11597 + const bool is_qi)
11598 +{
11599 + u32 *key_jump_cmd;
11600 +
11601 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11602 +
11603 + /* Skip key loading if it is loaded due to sharing */
11604 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11605 + JUMP_COND_SHRD);
11606 + if (cdata->key_inline)
11607 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11608 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11609 + else
11610 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11611 + KEY_DEST_CLASS_REG);
11612 + set_jump_tgt_here(desc, key_jump_cmd);
11613 +
11614 + /* Class 1 operation */
11615 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11616 + OP_ALG_ENCRYPT);
11617 +
11618 + if (is_qi) {
11619 + u32 *wait_load_cmd;
11620 +
11621 + /* REG3 = assoclen */
11622 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11623 + LDST_SRCDST_WORD_DECO_MATH3 |
11624 + (4 << LDST_OFFSET_SHIFT));
11625 +
11626 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11627 + JUMP_COND_CALM | JUMP_COND_NCP |
11628 + JUMP_COND_NOP | JUMP_COND_NIP |
11629 + JUMP_COND_NIFP);
11630 + set_jump_tgt_here(desc, wait_load_cmd);
11631 +
11632 + /* Read salt and IV */
11633 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11634 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11635 + FIFOLD_TYPE_IV);
11636 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11637 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11638 + }
11639 +
11640 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11641 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11642 +
11643 + /* Read assoc data */
11644 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11645 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11646 +
11647 + /* Skip IV */
11648 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11649 +
11650 + /* Will read cryptlen bytes */
11651 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11652 +
11653 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11654 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11655 +
11656 + /* Skip assoc data */
11657 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11658 +
11659 + /* cryptlen = seqoutlen - assoclen */
11660 + append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
11661 +
11662 + /* Write encrypted data */
11663 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11664 +
11665 + /* Read payload data */
11666 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11667 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11668 +
11669 + /* Write ICV */
11670 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11671 + LDST_SRCDST_BYTE_CONTEXT);
11672 +
11673 +#ifdef DEBUG
11674 + print_hex_dump(KERN_ERR,
11675 + "rfc4106 enc shdesc@" __stringify(__LINE__)": ",
11676 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11677 +#endif
11678 +}
11679 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap);
11680 +
11681 +/**
11682 + * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor
11683 + * (non-protocol).
11684 + * @desc: pointer to buffer used for descriptor construction
11685 + * @cdata: pointer to block cipher transform definitions
11686 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11687 + * @ivsize: initialization vector size
11688 + * @icvsize: integrity check value (ICV) size (truncated or full)
11689 + * @is_qi: true when called from caam/qi
11690 + */
11691 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
11692 + unsigned int ivsize, unsigned int icvsize,
11693 + const bool is_qi)
11694 +{
11695 + u32 *key_jump_cmd;
11696 +
11697 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11698 +
11699 + /* Skip key loading if it is loaded due to sharing */
11700 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11701 + JUMP_COND_SHRD);
11702 + if (cdata->key_inline)
11703 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11704 + cdata->keylen, CLASS_1 |
11705 + KEY_DEST_CLASS_REG);
11706 + else
11707 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11708 + KEY_DEST_CLASS_REG);
11709 + set_jump_tgt_here(desc, key_jump_cmd);
11710 +
11711 + /* Class 1 operation */
11712 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11713 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11714 +
11715 + if (is_qi) {
11716 + u32 *wait_load_cmd;
11717 +
11718 + /* REG3 = assoclen */
11719 + append_seq_load(desc, 4, LDST_CLASS_DECO |
11720 + LDST_SRCDST_WORD_DECO_MATH3 |
11721 + (4 << LDST_OFFSET_SHIFT));
11722 +
11723 + wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11724 + JUMP_COND_CALM | JUMP_COND_NCP |
11725 + JUMP_COND_NOP | JUMP_COND_NIP |
11726 + JUMP_COND_NIFP);
11727 + set_jump_tgt_here(desc, wait_load_cmd);
11728 +
11729 + /* Read salt and IV */
11730 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11731 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11732 + FIFOLD_TYPE_IV);
11733 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11734 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11735 + }
11736 +
11737 + append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize);
11738 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
11739 +
11740 + /* Read assoc data */
11741 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11742 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
11743 +
11744 + /* Skip IV */
11745 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP);
11746 +
11747 + /* Will read cryptlen bytes */
11748 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
11749 +
11750 + /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
11751 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
11752 +
11753 + /* Skip assoc data */
11754 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
11755 +
11756 + /* Will write cryptlen bytes */
11757 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11758 +
11759 + /* Store payload data */
11760 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11761 +
11762 + /* Read encrypted data */
11763 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
11764 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
11765 +
11766 + /* Read ICV */
11767 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11768 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11769 +
11770 +#ifdef DEBUG
11771 + print_hex_dump(KERN_ERR,
11772 + "rfc4106 dec shdesc@" __stringify(__LINE__)": ",
11773 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11774 +#endif
11775 +}
11776 +EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap);
11777 +
11778 +/**
11779 + * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor
11780 + * (non-protocol).
11781 + * @desc: pointer to buffer used for descriptor construction
11782 + * @cdata: pointer to block cipher transform definitions
11783 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11784 + * @ivsize: initialization vector size
11785 + * @icvsize: integrity check value (ICV) size (truncated or full)
11786 + * @is_qi: true when called from caam/qi
11787 + */
11788 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
11789 + unsigned int ivsize, unsigned int icvsize,
11790 + const bool is_qi)
11791 +{
11792 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11793 +
11794 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11795 +
11796 + /* Skip key loading if it is loaded due to sharing */
11797 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11798 + JUMP_COND_SHRD);
11799 + if (cdata->key_inline)
11800 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11801 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11802 + else
11803 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11804 + KEY_DEST_CLASS_REG);
11805 + set_jump_tgt_here(desc, key_jump_cmd);
11806 +
11807 + /* Class 1 operation */
11808 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11809 + OP_ALG_ENCRYPT);
11810 +
11811 + if (is_qi) {
11812 + /* assoclen is not needed, skip it */
11813 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11814 +
11815 + /* Read salt and IV */
11816 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11817 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11818 + FIFOLD_TYPE_IV);
11819 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11820 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11821 + }
11822 +
11823 + /* assoclen + cryptlen = seqinlen */
11824 + append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
11825 +
11826 + /*
11827 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11828 + * thus need to do some magic, i.e. self-patch the descriptor
11829 + * buffer.
11830 + */
11831 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11832 + (0x6 << MOVE_LEN_SHIFT));
11833 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11834 + (0x8 << MOVE_LEN_SHIFT));
11835 +
11836 + /* Will read assoclen + cryptlen bytes */
11837 + append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11838 +
11839 + /* Will write assoclen + cryptlen bytes */
11840 + append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11841 +
11842 + /* Read and write assoclen + cryptlen bytes */
11843 + aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
11844 +
11845 + set_move_tgt_here(desc, read_move_cmd);
11846 + set_move_tgt_here(desc, write_move_cmd);
11847 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11848 + /* Move payload data to OFIFO */
11849 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11850 +
11851 + /* Write ICV */
11852 + append_seq_store(desc, icvsize, LDST_CLASS_1_CCB |
11853 + LDST_SRCDST_BYTE_CONTEXT);
11854 +
11855 +#ifdef DEBUG
11856 + print_hex_dump(KERN_ERR,
11857 + "rfc4543 enc shdesc@" __stringify(__LINE__)": ",
11858 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11859 +#endif
11860 +}
11861 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap);
11862 +
11863 +/**
11864 + * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor
11865 + * (non-protocol).
11866 + * @desc: pointer to buffer used for descriptor construction
11867 + * @cdata: pointer to block cipher transform definitions
11868 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM.
11869 + * @ivsize: initialization vector size
11870 + * @icvsize: integrity check value (ICV) size (truncated or full)
11871 + * @is_qi: true when called from caam/qi
11872 + */
11873 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
11874 + unsigned int ivsize, unsigned int icvsize,
11875 + const bool is_qi)
11876 +{
11877 + u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
11878 +
11879 + init_sh_desc(desc, HDR_SHARE_SERIAL);
11880 +
11881 + /* Skip key loading if it is loaded due to sharing */
11882 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11883 + JUMP_COND_SHRD);
11884 + if (cdata->key_inline)
11885 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11886 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11887 + else
11888 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
11889 + KEY_DEST_CLASS_REG);
11890 + set_jump_tgt_here(desc, key_jump_cmd);
11891 +
11892 + /* Class 1 operation */
11893 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
11894 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
11895 +
11896 + if (is_qi) {
11897 + /* assoclen is not needed, skip it */
11898 + append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP);
11899 +
11900 + /* Read salt and IV */
11901 + append_fifo_load_as_imm(desc, (void *)(cdata->key_virt +
11902 + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 |
11903 + FIFOLD_TYPE_IV);
11904 + append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 |
11905 + FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
11906 + }
11907 +
11908 + /* assoclen + cryptlen = seqoutlen */
11909 + append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11910 +
11911 + /*
11912 + * MOVE_LEN opcode is not available in all SEC HW revisions,
11913 + * thus need to do some magic, i.e. self-patch the descriptor
11914 + * buffer.
11915 + */
11916 + read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
11917 + (0x6 << MOVE_LEN_SHIFT));
11918 + write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
11919 + (0x8 << MOVE_LEN_SHIFT));
11920 +
11921 + /* Will read assoclen + cryptlen bytes */
11922 + append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11923 +
11924 + /* Will write assoclen + cryptlen bytes */
11925 + append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
11926 +
11927 + /* Store payload data */
11928 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
11929 +
11930 + /* In-snoop assoclen + cryptlen data */
11931 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
11932 + FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
11933 +
11934 + set_move_tgt_here(desc, read_move_cmd);
11935 + set_move_tgt_here(desc, write_move_cmd);
11936 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
11937 + /* Move payload data to OFIFO */
11938 + append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
11939 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
11940 +
11941 + /* Read ICV */
11942 + append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 |
11943 + FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
11944 +
11945 +#ifdef DEBUG
11946 + print_hex_dump(KERN_ERR,
11947 + "rfc4543 dec shdesc@" __stringify(__LINE__)": ",
11948 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
11949 +#endif
11950 +}
11951 +EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
11952 +
11953 +/*
11954 + * For ablkcipher encrypt and decrypt, read from req->src and
11955 + * write to req->dst
11956 + */
11957 +static inline void ablkcipher_append_src_dst(u32 *desc)
11958 +{
11959 + append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11960 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
11961 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
11962 + KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
11963 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
11964 +}
11965 +
11966 +/**
11967 + * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
11968 + * @desc: pointer to buffer used for descriptor construction
11969 + * @cdata: pointer to block cipher transform definitions
11970 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
11971 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
11972 + * @ivsize: initialization vector size
11973 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
11974 + * @ctx1_iv_off: IV offset in CONTEXT1 register
11975 + */
11976 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
11977 + unsigned int ivsize, const bool is_rfc3686,
11978 + const u32 ctx1_iv_off)
11979 +{
11980 + u32 *key_jump_cmd;
11981 +
11982 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
11983 + /* Skip if already shared */
11984 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
11985 + JUMP_COND_SHRD);
11986 +
11987 + /* Load class1 key only */
11988 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
11989 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
11990 +
11991 + /* Load nonce into CONTEXT1 reg */
11992 + if (is_rfc3686) {
11993 + const u8 *nonce = cdata->key_virt + cdata->keylen;
11994 +
11995 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
11996 + LDST_CLASS_IND_CCB |
11997 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
11998 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
11999 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12000 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12001 + }
12002 +
12003 + set_jump_tgt_here(desc, key_jump_cmd);
12004 +
12005 + /* Load iv */
12006 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12007 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12008 +
12009 + /* Load counter into CONTEXT1 reg */
12010 + if (is_rfc3686)
12011 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12012 + LDST_SRCDST_BYTE_CONTEXT |
12013 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12014 + LDST_OFFSET_SHIFT));
12015 +
12016 + /* Load operation */
12017 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12018 + OP_ALG_ENCRYPT);
12019 +
12020 + /* Perform operation */
12021 + ablkcipher_append_src_dst(desc);
12022 +
12023 +#ifdef DEBUG
12024 + print_hex_dump(KERN_ERR,
12025 + "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
12026 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12027 +#endif
12028 +}
12029 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
12030 +
12031 +/**
12032 + * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
12033 + * @desc: pointer to buffer used for descriptor construction
12034 + * @cdata: pointer to block cipher transform definitions
12035 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
12036 + * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
12037 + * @ivsize: initialization vector size
12038 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
12039 + * @ctx1_iv_off: IV offset in CONTEXT1 register
12040 + */
12041 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12042 + unsigned int ivsize, const bool is_rfc3686,
12043 + const u32 ctx1_iv_off)
12044 +{
12045 + u32 *key_jump_cmd;
12046 +
12047 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12048 + /* Skip if already shared */
12049 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12050 + JUMP_COND_SHRD);
12051 +
12052 + /* Load class1 key only */
12053 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12054 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12055 +
12056 + /* Load nonce into CONTEXT1 reg */
12057 + if (is_rfc3686) {
12058 + const u8 *nonce = cdata->key_virt + cdata->keylen;
12059 +
12060 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12061 + LDST_CLASS_IND_CCB |
12062 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12063 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12064 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12065 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12066 + }
12067 +
12068 + set_jump_tgt_here(desc, key_jump_cmd);
12069 +
12070 + /* load IV */
12071 + append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12072 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12073 +
12074 + /* Load counter into CONTEXT1 reg */
12075 + if (is_rfc3686)
12076 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12077 + LDST_SRCDST_BYTE_CONTEXT |
12078 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12079 + LDST_OFFSET_SHIFT));
12080 +
12081 + /* Choose operation */
12082 + if (ctx1_iv_off)
12083 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12084 + OP_ALG_DECRYPT);
12085 + else
12086 + append_dec_op1(desc, cdata->algtype);
12087 +
12088 + /* Perform operation */
12089 + ablkcipher_append_src_dst(desc);
12090 +
12091 +#ifdef DEBUG
12092 + print_hex_dump(KERN_ERR,
12093 + "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
12094 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12095 +#endif
12096 +}
12097 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
12098 +
12099 +/**
12100 + * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
12101 + * with HW-generated initialization vector.
12102 + * @desc: pointer to buffer used for descriptor construction
12103 + * @cdata: pointer to block cipher transform definitions
12104 + * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
12105 + * with OP_ALG_AAI_CBC.
12106 + * @ivsize: initialization vector size
12107 + * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
12108 + * @ctx1_iv_off: IV offset in CONTEXT1 register
12109 + */
12110 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12111 + unsigned int ivsize, const bool is_rfc3686,
12112 + const u32 ctx1_iv_off)
12113 +{
12114 + u32 *key_jump_cmd, geniv;
12115 +
12116 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12117 + /* Skip if already shared */
12118 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12119 + JUMP_COND_SHRD);
12120 +
12121 + /* Load class1 key only */
12122 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12123 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12124 +
12125 + /* Load Nonce into CONTEXT1 reg */
12126 + if (is_rfc3686) {
12127 + const u8 *nonce = cdata->key_virt + cdata->keylen;
12128 +
12129 + append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
12130 + LDST_CLASS_IND_CCB |
12131 + LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
12132 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
12133 + MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
12134 + (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
12135 + }
12136 + set_jump_tgt_here(desc, key_jump_cmd);
12137 +
12138 + /* Generate IV */
12139 + geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
12140 + NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
12141 + (ivsize << NFIFOENTRY_DLEN_SHIFT);
12142 + append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
12143 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
12144 + append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
12145 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
12146 + MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
12147 + (ctx1_iv_off << MOVE_OFFSET_SHIFT));
12148 + append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
12149 +
12150 + /* Copy generated IV to memory */
12151 + append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
12152 + LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
12153 +
12154 + /* Load Counter into CONTEXT1 reg */
12155 + if (is_rfc3686)
12156 + append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
12157 + LDST_SRCDST_BYTE_CONTEXT |
12158 + ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
12159 + LDST_OFFSET_SHIFT));
12160 +
12161 + if (ctx1_iv_off)
12162 + append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
12163 + (1 << JUMP_OFFSET_SHIFT));
12164 +
12165 + /* Load operation */
12166 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12167 + OP_ALG_ENCRYPT);
12168 +
12169 + /* Perform operation */
12170 + ablkcipher_append_src_dst(desc);
12171 +
12172 +#ifdef DEBUG
12173 + print_hex_dump(KERN_ERR,
12174 + "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
12175 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12176 +#endif
12177 +}
12178 +EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
12179 +
12180 +/**
12181 + * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
12182 + * descriptor
12183 + * @desc: pointer to buffer used for descriptor construction
12184 + * @cdata: pointer to block cipher transform definitions
12185 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12186 + */
12187 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
12188 +{
12189 + __be64 sector_size = cpu_to_be64(512);
12190 + u32 *key_jump_cmd;
12191 +
12192 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12193 + /* Skip if already shared */
12194 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12195 + JUMP_COND_SHRD);
12196 +
12197 + /* Load class1 keys only */
12198 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12199 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12200 +
12201 + /* Load sector size with index 40 bytes (0x28) */
12202 + append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12203 + LDST_SRCDST_BYTE_CONTEXT |
12204 + (0x28 << LDST_OFFSET_SHIFT));
12205 +
12206 + set_jump_tgt_here(desc, key_jump_cmd);
12207 +
12208 + /*
12209 + * create sequence for loading the sector index
12210 + * Upper 8B of IV - will be used as sector index
12211 + * Lower 8B of IV - will be discarded
12212 + */
12213 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12214 + (0x20 << LDST_OFFSET_SHIFT));
12215 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12216 +
12217 + /* Load operation */
12218 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
12219 + OP_ALG_ENCRYPT);
12220 +
12221 + /* Perform operation */
12222 + ablkcipher_append_src_dst(desc);
12223 +
12224 +#ifdef DEBUG
12225 + print_hex_dump(KERN_ERR,
12226 + "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
12227 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12228 +#endif
12229 +}
12230 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
12231 +
12232 +/**
12233 + * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
12234 + * descriptor
12235 + * @desc: pointer to buffer used for descriptor construction
12236 + * @cdata: pointer to block cipher transform definitions
12237 + * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
12238 + */
12239 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
12240 +{
12241 + __be64 sector_size = cpu_to_be64(512);
12242 + u32 *key_jump_cmd;
12243 +
12244 + init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
12245 + /* Skip if already shared */
12246 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
12247 + JUMP_COND_SHRD);
12248 +
12249 + /* Load class1 key only */
12250 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
12251 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
12252 +
12253 + /* Load sector size with index 40 bytes (0x28) */
12254 + append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB |
12255 + LDST_SRCDST_BYTE_CONTEXT |
12256 + (0x28 << LDST_OFFSET_SHIFT));
12257 +
12258 + set_jump_tgt_here(desc, key_jump_cmd);
12259 +
12260 + /*
12261 + * create sequence for loading the sector index
12262 + * Upper 8B of IV - will be used as sector index
12263 + * Lower 8B of IV - will be discarded
12264 + */
12265 + append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
12266 + (0x20 << LDST_OFFSET_SHIFT));
12267 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
12268 +
12269 + /* Load operation */
12270 + append_dec_op1(desc, cdata->algtype);
12271 +
12272 + /* Perform operation */
12273 + ablkcipher_append_src_dst(desc);
12274 +
12275 +#ifdef DEBUG
12276 + print_hex_dump(KERN_ERR,
12277 + "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
12278 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
12279 +#endif
12280 +}
12281 +EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
12282 +
12283 +MODULE_LICENSE("GPL");
12284 +MODULE_DESCRIPTION("FSL CAAM descriptor support");
12285 +MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
12286 --- /dev/null
12287 +++ b/drivers/crypto/caam/caamalg_desc.h
12288 @@ -0,0 +1,127 @@
12289 +/*
12290 + * Shared descriptors for aead, ablkcipher algorithms
12291 + *
12292 + * Copyright 2016 NXP
12293 + */
12294 +
12295 +#ifndef _CAAMALG_DESC_H_
12296 +#define _CAAMALG_DESC_H_
12297 +
12298 +/* length of descriptors text */
12299 +#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
12300 +#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
12301 +#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
12302 +#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
12303 +#define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
12304 +#define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
12305 +#define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
12306 +
12307 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
12308 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
12309 +
12310 +/* Note: Nonce is counted in cdata.keylen */
12311 +#define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
12312 +
12313 +#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
12314 +#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
12315 +#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
12316 +
12317 +#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
12318 +#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
12319 +#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
12320 +#define DESC_QI_GCM_ENC_LEN (DESC_GCM_ENC_LEN + 6 * CAAM_CMD_SZ)
12321 +#define DESC_QI_GCM_DEC_LEN (DESC_GCM_DEC_LEN + 3 * CAAM_CMD_SZ)
12322 +
12323 +#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
12324 +#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12325 +#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
12326 +#define DESC_QI_RFC4106_ENC_LEN (DESC_RFC4106_ENC_LEN + 5 * CAAM_CMD_SZ)
12327 +#define DESC_QI_RFC4106_DEC_LEN (DESC_RFC4106_DEC_LEN + 5 * CAAM_CMD_SZ)
12328 +
12329 +#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
12330 +#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
12331 +#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
12332 +#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
12333 +#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
12334 +
12335 +#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
12336 +#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
12337 + 20 * CAAM_CMD_SZ)
12338 +#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
12339 + 15 * CAAM_CMD_SZ)
12340 +
12341 +void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
12342 + unsigned int icvsize, int era);
12343 +
12344 +void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
12345 + unsigned int icvsize, int era);
12346 +
12347 +void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
12348 + struct alginfo *adata, unsigned int ivsize,
12349 + unsigned int icvsize, const bool is_rfc3686,
12350 + u32 *nonce, const u32 ctx1_iv_off,
12351 + const bool is_qi, int era);
12352 +
12353 +void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
12354 + struct alginfo *adata, unsigned int ivsize,
12355 + unsigned int icvsize, const bool geniv,
12356 + const bool is_rfc3686, u32 *nonce,
12357 + const u32 ctx1_iv_off, const bool is_qi, int era);
12358 +
12359 +void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
12360 + struct alginfo *adata, unsigned int ivsize,
12361 + unsigned int icvsize, const bool is_rfc3686,
12362 + u32 *nonce, const u32 ctx1_iv_off,
12363 + const bool is_qi, int era);
12364 +
12365 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
12366 + struct alginfo *adata, unsigned int assoclen,
12367 + unsigned int ivsize, unsigned int authsize,
12368 + unsigned int blocksize, int era);
12369 +
12370 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
12371 + struct alginfo *adata, unsigned int assoclen,
12372 + unsigned int ivsize, unsigned int authsize,
12373 + unsigned int blocksize, int era);
12374 +
12375 +void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
12376 + unsigned int ivsize, unsigned int icvsize,
12377 + const bool is_qi);
12378 +
12379 +void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata,
12380 + unsigned int ivsize, unsigned int icvsize,
12381 + const bool is_qi);
12382 +
12383 +void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata,
12384 + unsigned int ivsize, unsigned int icvsize,
12385 + const bool is_qi);
12386 +
12387 +void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata,
12388 + unsigned int ivsize, unsigned int icvsize,
12389 + const bool is_qi);
12390 +
12391 +void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata,
12392 + unsigned int ivsize, unsigned int icvsize,
12393 + const bool is_qi);
12394 +
12395 +void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
12396 + unsigned int ivsize, unsigned int icvsize,
12397 + const bool is_qi);
12398 +
12399 +void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
12400 + unsigned int ivsize, const bool is_rfc3686,
12401 + const u32 ctx1_iv_off);
12402 +
12403 +void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
12404 + unsigned int ivsize, const bool is_rfc3686,
12405 + const u32 ctx1_iv_off);
12406 +
12407 +void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
12408 + unsigned int ivsize, const bool is_rfc3686,
12409 + const u32 ctx1_iv_off);
12410 +
12411 +void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
12412 +
12413 +void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
12414 +
12415 +#endif /* _CAAMALG_DESC_H_ */
12416 --- /dev/null
12417 +++ b/drivers/crypto/caam/caamalg_qi.c
12418 @@ -0,0 +1,2929 @@
12419 +/*
12420 + * Freescale FSL CAAM support for crypto API over QI backend.
12421 + * Based on caamalg.c
12422 + *
12423 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
12424 + * Copyright 2016-2017 NXP
12425 + */
12426 +
12427 +#include "compat.h"
12428 +#include "ctrl.h"
12429 +#include "regs.h"
12430 +#include "intern.h"
12431 +#include "desc_constr.h"
12432 +#include "error.h"
12433 +#include "sg_sw_qm.h"
12434 +#include "key_gen.h"
12435 +#include "qi.h"
12436 +#include "jr.h"
12437 +#include "caamalg_desc.h"
12438 +
12439 +/*
12440 + * crypto alg
12441 + */
12442 +#define CAAM_CRA_PRIORITY 2000
12443 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
12444 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
12445 + SHA512_DIGEST_SIZE * 2)
12446 +
12447 +#define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
12448 + CAAM_MAX_KEY_SIZE)
12449 +#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
12450 +
12451 +struct caam_alg_entry {
12452 + int class1_alg_type;
12453 + int class2_alg_type;
12454 + bool rfc3686;
12455 + bool geniv;
12456 +};
12457 +
12458 +struct caam_aead_alg {
12459 + struct aead_alg aead;
12460 + struct caam_alg_entry caam;
12461 + bool registered;
12462 +};
12463 +
12464 +/*
12465 + * per-session context
12466 + */
12467 +struct caam_ctx {
12468 + struct device *jrdev;
12469 + u32 sh_desc_enc[DESC_MAX_USED_LEN];
12470 + u32 sh_desc_dec[DESC_MAX_USED_LEN];
12471 + u32 sh_desc_givenc[DESC_MAX_USED_LEN];
12472 + u8 key[CAAM_MAX_KEY_SIZE];
12473 + dma_addr_t key_dma;
12474 + struct alginfo adata;
12475 + struct alginfo cdata;
12476 + unsigned int authsize;
12477 + struct device *qidev;
12478 + spinlock_t lock; /* Protects multiple init of driver context */
12479 + struct caam_drv_ctx *drv_ctx[NUM_OP];
12480 +};
12481 +
12482 +static int aead_set_sh_desc(struct crypto_aead *aead)
12483 +{
12484 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
12485 + typeof(*alg), aead);
12486 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12487 + unsigned int ivsize = crypto_aead_ivsize(aead);
12488 + u32 ctx1_iv_off = 0;
12489 + u32 *nonce = NULL;
12490 + unsigned int data_len[2];
12491 + u32 inl_mask;
12492 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12493 + OP_ALG_AAI_CTR_MOD128);
12494 + const bool is_rfc3686 = alg->caam.rfc3686;
12495 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
12496 +
12497 + if (!ctx->cdata.keylen || !ctx->authsize)
12498 + return 0;
12499 +
12500 + /*
12501 + * AES-CTR needs to load IV in CONTEXT1 reg
12502 + * at an offset of 128bits (16bytes)
12503 + * CONTEXT1[255:128] = IV
12504 + */
12505 + if (ctr_mode)
12506 + ctx1_iv_off = 16;
12507 +
12508 + /*
12509 + * RFC3686 specific:
12510 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12511 + */
12512 + if (is_rfc3686) {
12513 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12514 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
12515 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
12516 + }
12517 +
12518 + data_len[0] = ctx->adata.keylen_pad;
12519 + data_len[1] = ctx->cdata.keylen;
12520 +
12521 + if (alg->caam.geniv)
12522 + goto skip_enc;
12523 +
12524 + /* aead_encrypt shared descriptor */
12525 + if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
12526 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12527 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12528 + ARRAY_SIZE(data_len)) < 0)
12529 + return -EINVAL;
12530 +
12531 + if (inl_mask & 1)
12532 + ctx->adata.key_virt = ctx->key;
12533 + else
12534 + ctx->adata.key_dma = ctx->key_dma;
12535 +
12536 + if (inl_mask & 2)
12537 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12538 + else
12539 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12540 +
12541 + ctx->adata.key_inline = !!(inl_mask & 1);
12542 + ctx->cdata.key_inline = !!(inl_mask & 2);
12543 +
12544 + cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12545 + ivsize, ctx->authsize, is_rfc3686, nonce,
12546 + ctx1_iv_off, true, ctrlpriv->era);
12547 +
12548 +skip_enc:
12549 + /* aead_decrypt shared descriptor */
12550 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
12551 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12552 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12553 + ARRAY_SIZE(data_len)) < 0)
12554 + return -EINVAL;
12555 +
12556 + if (inl_mask & 1)
12557 + ctx->adata.key_virt = ctx->key;
12558 + else
12559 + ctx->adata.key_dma = ctx->key_dma;
12560 +
12561 + if (inl_mask & 2)
12562 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12563 + else
12564 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12565 +
12566 + ctx->adata.key_inline = !!(inl_mask & 1);
12567 + ctx->cdata.key_inline = !!(inl_mask & 2);
12568 +
12569 + cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12570 + ivsize, ctx->authsize, alg->caam.geniv,
12571 + is_rfc3686, nonce, ctx1_iv_off, true,
12572 + ctrlpriv->era);
12573 +
12574 + if (!alg->caam.geniv)
12575 + goto skip_givenc;
12576 +
12577 + /* aead_givencrypt shared descriptor */
12578 + if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
12579 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
12580 + DESC_JOB_IO_LEN, data_len, &inl_mask,
12581 + ARRAY_SIZE(data_len)) < 0)
12582 + return -EINVAL;
12583 +
12584 + if (inl_mask & 1)
12585 + ctx->adata.key_virt = ctx->key;
12586 + else
12587 + ctx->adata.key_dma = ctx->key_dma;
12588 +
12589 + if (inl_mask & 2)
12590 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12591 + else
12592 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12593 +
12594 + ctx->adata.key_inline = !!(inl_mask & 1);
12595 + ctx->cdata.key_inline = !!(inl_mask & 2);
12596 +
12597 + cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12598 + ivsize, ctx->authsize, is_rfc3686, nonce,
12599 + ctx1_iv_off, true, ctrlpriv->era);
12600 +
12601 +skip_givenc:
12602 + return 0;
12603 +}
12604 +
12605 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
12606 +{
12607 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
12608 +
12609 + ctx->authsize = authsize;
12610 + aead_set_sh_desc(authenc);
12611 +
12612 + return 0;
12613 +}
12614 +
12615 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
12616 + unsigned int keylen)
12617 +{
12618 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
12619 + struct device *jrdev = ctx->jrdev;
12620 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
12621 + struct crypto_authenc_keys keys;
12622 + int ret = 0;
12623 +
12624 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12625 + goto badkey;
12626 +
12627 +#ifdef DEBUG
12628 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12629 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12630 + keys.authkeylen);
12631 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12632 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12633 +#endif
12634 +
12635 + /*
12636 + * If DKP is supported, use it in the shared descriptor to generate
12637 + * the split key.
12638 + */
12639 + if (ctrlpriv->era >= 6) {
12640 + ctx->adata.keylen = keys.authkeylen;
12641 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
12642 + OP_ALG_ALGSEL_MASK);
12643 +
12644 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
12645 + goto badkey;
12646 +
12647 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
12648 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
12649 + keys.enckeylen);
12650 + dma_sync_single_for_device(jrdev, ctx->key_dma,
12651 + ctx->adata.keylen_pad +
12652 + keys.enckeylen, DMA_TO_DEVICE);
12653 + goto skip_split_key;
12654 + }
12655 +
12656 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12657 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12658 + keys.enckeylen);
12659 + if (ret)
12660 + goto badkey;
12661 +
12662 + /* postpend encryption key to auth split key */
12663 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12664 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12665 + keys.enckeylen, DMA_TO_DEVICE);
12666 +#ifdef DEBUG
12667 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12668 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12669 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12670 +#endif
12671 +
12672 +skip_split_key:
12673 + ctx->cdata.keylen = keys.enckeylen;
12674 +
12675 + ret = aead_set_sh_desc(aead);
12676 + if (ret)
12677 + goto badkey;
12678 +
12679 + /* Now update the driver contexts with the new shared descriptor */
12680 + if (ctx->drv_ctx[ENCRYPT]) {
12681 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12682 + ctx->sh_desc_enc);
12683 + if (ret) {
12684 + dev_err(jrdev, "driver enc context update failed\n");
12685 + goto badkey;
12686 + }
12687 + }
12688 +
12689 + if (ctx->drv_ctx[DECRYPT]) {
12690 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12691 + ctx->sh_desc_dec);
12692 + if (ret) {
12693 + dev_err(jrdev, "driver dec context update failed\n");
12694 + goto badkey;
12695 + }
12696 + }
12697 +
12698 + return ret;
12699 +badkey:
12700 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
12701 + return -EINVAL;
12702 +}
12703 +
12704 +static int tls_set_sh_desc(struct crypto_aead *tls)
12705 +{
12706 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12707 + unsigned int ivsize = crypto_aead_ivsize(tls);
12708 + unsigned int blocksize = crypto_aead_blocksize(tls);
12709 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
12710 + unsigned int data_len[2];
12711 + u32 inl_mask;
12712 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
12713 +
12714 + if (!ctx->cdata.keylen || !ctx->authsize)
12715 + return 0;
12716 +
12717 + /*
12718 + * TLS 1.0 encrypt shared descriptor
12719 + * Job Descriptor and Shared Descriptor
12720 + * must fit into the 64-word Descriptor h/w Buffer
12721 + */
12722 + data_len[0] = ctx->adata.keylen_pad;
12723 + data_len[1] = ctx->cdata.keylen;
12724 +
12725 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
12726 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
12727 + return -EINVAL;
12728 +
12729 + if (inl_mask & 1)
12730 + ctx->adata.key_virt = ctx->key;
12731 + else
12732 + ctx->adata.key_dma = ctx->key_dma;
12733 +
12734 + if (inl_mask & 2)
12735 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
12736 + else
12737 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12738 +
12739 + ctx->adata.key_inline = !!(inl_mask & 1);
12740 + ctx->cdata.key_inline = !!(inl_mask & 2);
12741 +
12742 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
12743 + assoclen, ivsize, ctx->authsize, blocksize,
12744 + ctrlpriv->era);
12745 +
12746 + /*
12747 + * TLS 1.0 decrypt shared descriptor
12748 + * Keys do not fit inline, regardless of algorithms used
12749 + */
12750 + ctx->adata.key_inline = false;
12751 + ctx->adata.key_dma = ctx->key_dma;
12752 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
12753 +
12754 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
12755 + assoclen, ivsize, ctx->authsize, blocksize,
12756 + ctrlpriv->era);
12757 +
12758 + return 0;
12759 +}
12760 +
12761 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
12762 +{
12763 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12764 +
12765 + ctx->authsize = authsize;
12766 + tls_set_sh_desc(tls);
12767 +
12768 + return 0;
12769 +}
12770 +
12771 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
12772 + unsigned int keylen)
12773 +{
12774 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
12775 + struct device *jrdev = ctx->jrdev;
12776 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
12777 + struct crypto_authenc_keys keys;
12778 + int ret = 0;
12779 +
12780 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
12781 + goto badkey;
12782 +
12783 +#ifdef DEBUG
12784 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
12785 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
12786 + keys.authkeylen);
12787 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12788 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12789 +#endif
12790 +
12791 + /*
12792 + * If DKP is supported, use it in the shared descriptor to generate
12793 + * the split key.
12794 + */
12795 + if (ctrlpriv->era >= 6) {
12796 + ctx->adata.keylen = keys.authkeylen;
12797 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
12798 + OP_ALG_ALGSEL_MASK);
12799 +
12800 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
12801 + goto badkey;
12802 +
12803 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
12804 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
12805 + keys.enckeylen);
12806 + dma_sync_single_for_device(jrdev, ctx->key_dma,
12807 + ctx->adata.keylen_pad +
12808 + keys.enckeylen, DMA_TO_DEVICE);
12809 + goto skip_split_key;
12810 + }
12811 +
12812 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
12813 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
12814 + keys.enckeylen);
12815 + if (ret)
12816 + goto badkey;
12817 +
12818 + /* postpend encryption key to auth split key */
12819 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
12820 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
12821 + keys.enckeylen, DMA_TO_DEVICE);
12822 +
12823 +#ifdef DEBUG
12824 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
12825 + ctx->adata.keylen, ctx->adata.keylen_pad);
12826 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
12827 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
12828 + ctx->adata.keylen_pad + keys.enckeylen, 1);
12829 +#endif
12830 +
12831 +skip_split_key:
12832 + ctx->cdata.keylen = keys.enckeylen;
12833 +
12834 + ret = tls_set_sh_desc(tls);
12835 + if (ret)
12836 + goto badkey;
12837 +
12838 + /* Now update the driver contexts with the new shared descriptor */
12839 + if (ctx->drv_ctx[ENCRYPT]) {
12840 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12841 + ctx->sh_desc_enc);
12842 + if (ret) {
12843 + dev_err(jrdev, "driver enc context update failed\n");
12844 + goto badkey;
12845 + }
12846 + }
12847 +
12848 + if (ctx->drv_ctx[DECRYPT]) {
12849 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12850 + ctx->sh_desc_dec);
12851 + if (ret) {
12852 + dev_err(jrdev, "driver dec context update failed\n");
12853 + goto badkey;
12854 + }
12855 + }
12856 +
12857 + return ret;
12858 +badkey:
12859 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
12860 + return -EINVAL;
12861 +}
12862 +
12863 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12864 + const u8 *key, unsigned int keylen)
12865 +{
12866 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12867 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
12868 + const char *alg_name = crypto_tfm_alg_name(tfm);
12869 + struct device *jrdev = ctx->jrdev;
12870 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
12871 + u32 ctx1_iv_off = 0;
12872 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
12873 + OP_ALG_AAI_CTR_MOD128);
12874 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
12875 + int ret = 0;
12876 +
12877 + memcpy(ctx->key, key, keylen);
12878 +#ifdef DEBUG
12879 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
12880 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
12881 +#endif
12882 + /*
12883 + * AES-CTR needs to load IV in CONTEXT1 reg
12884 + * at an offset of 128bits (16bytes)
12885 + * CONTEXT1[255:128] = IV
12886 + */
12887 + if (ctr_mode)
12888 + ctx1_iv_off = 16;
12889 +
12890 + /*
12891 + * RFC3686 specific:
12892 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
12893 + * | *key = {KEY, NONCE}
12894 + */
12895 + if (is_rfc3686) {
12896 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
12897 + keylen -= CTR_RFC3686_NONCE_SIZE;
12898 + }
12899 +
12900 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12901 + ctx->cdata.keylen = keylen;
12902 + ctx->cdata.key_virt = ctx->key;
12903 + ctx->cdata.key_inline = true;
12904 +
12905 + /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
12906 + cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
12907 + is_rfc3686, ctx1_iv_off);
12908 + cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
12909 + is_rfc3686, ctx1_iv_off);
12910 + cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
12911 + ivsize, is_rfc3686, ctx1_iv_off);
12912 +
12913 + /* Now update the driver contexts with the new shared descriptor */
12914 + if (ctx->drv_ctx[ENCRYPT]) {
12915 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12916 + ctx->sh_desc_enc);
12917 + if (ret) {
12918 + dev_err(jrdev, "driver enc context update failed\n");
12919 + goto badkey;
12920 + }
12921 + }
12922 +
12923 + if (ctx->drv_ctx[DECRYPT]) {
12924 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12925 + ctx->sh_desc_dec);
12926 + if (ret) {
12927 + dev_err(jrdev, "driver dec context update failed\n");
12928 + goto badkey;
12929 + }
12930 + }
12931 +
12932 + if (ctx->drv_ctx[GIVENCRYPT]) {
12933 + ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
12934 + ctx->sh_desc_givenc);
12935 + if (ret) {
12936 + dev_err(jrdev, "driver givenc context update failed\n");
12937 + goto badkey;
12938 + }
12939 + }
12940 +
12941 + return ret;
12942 +badkey:
12943 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12944 + return -EINVAL;
12945 +}
12946 +
12947 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
12948 + const u8 *key, unsigned int keylen)
12949 +{
12950 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
12951 + struct device *jrdev = ctx->jrdev;
12952 + int ret = 0;
12953 +
12954 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
12955 + crypto_ablkcipher_set_flags(ablkcipher,
12956 + CRYPTO_TFM_RES_BAD_KEY_LEN);
12957 + dev_err(jrdev, "key size mismatch\n");
12958 + return -EINVAL;
12959 + }
12960 +
12961 + memcpy(ctx->key, key, keylen);
12962 + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
12963 + ctx->cdata.keylen = keylen;
12964 + ctx->cdata.key_virt = ctx->key;
12965 + ctx->cdata.key_inline = true;
12966 +
12967 + /* xts ablkcipher encrypt, decrypt shared descriptors */
12968 + cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
12969 + cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
12970 +
12971 + /* Now update the driver contexts with the new shared descriptor */
12972 + if (ctx->drv_ctx[ENCRYPT]) {
12973 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
12974 + ctx->sh_desc_enc);
12975 + if (ret) {
12976 + dev_err(jrdev, "driver enc context update failed\n");
12977 + goto badkey;
12978 + }
12979 + }
12980 +
12981 + if (ctx->drv_ctx[DECRYPT]) {
12982 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
12983 + ctx->sh_desc_dec);
12984 + if (ret) {
12985 + dev_err(jrdev, "driver dec context update failed\n");
12986 + goto badkey;
12987 + }
12988 + }
12989 +
12990 + return ret;
12991 +badkey:
12992 + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
12993 + return 0;
12994 +}
12995 +
12996 +/*
12997 + * aead_edesc - s/w-extended aead descriptor
12998 + * @src_nents: number of segments in input scatterlist
12999 + * @dst_nents: number of segments in output scatterlist
13000 + * @iv_dma: dma address of iv for checking continuity and link table
13001 + * @qm_sg_bytes: length of dma mapped h/w link table
13002 + * @qm_sg_dma: bus physical mapped address of h/w link table
13003 + * @assoclen: associated data length, in CAAM endianness
13004 + * @assoclen_dma: bus physical mapped address of req->assoclen
13005 + * @drv_req: driver-specific request structure
13006 + * @sgt: the h/w link table
13007 + */
13008 +struct aead_edesc {
13009 + int src_nents;
13010 + int dst_nents;
13011 + dma_addr_t iv_dma;
13012 + int qm_sg_bytes;
13013 + dma_addr_t qm_sg_dma;
13014 + unsigned int assoclen;
13015 + dma_addr_t assoclen_dma;
13016 + struct caam_drv_req drv_req;
13017 +#define CAAM_QI_MAX_AEAD_SG \
13018 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
13019 + sizeof(struct qm_sg_entry))
13020 + struct qm_sg_entry sgt[0];
13021 +};
13022 +
13023 +/*
13024 + * tls_edesc - s/w-extended tls descriptor
13025 + * @src_nents: number of segments in input scatterlist
13026 + * @dst_nents: number of segments in output scatterlist
13027 + * @iv_dma: dma address of iv for checking continuity and link table
13028 + * @qm_sg_bytes: length of dma mapped h/w link table
13029 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
13030 + * @qm_sg_dma: bus physical mapped address of h/w link table
13031 + * @drv_req: driver-specific request structure
13032 + * @sgt: the h/w link table
13033 + */
13034 +struct tls_edesc {
13035 + int src_nents;
13036 + int dst_nents;
13037 + dma_addr_t iv_dma;
13038 + int qm_sg_bytes;
13039 + dma_addr_t qm_sg_dma;
13040 + struct scatterlist tmp[2];
13041 + struct scatterlist *dst;
13042 + struct caam_drv_req drv_req;
13043 + struct qm_sg_entry sgt[0];
13044 +};
13045 +
13046 +/*
13047 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
13048 + * @src_nents: number of segments in input scatterlist
13049 + * @dst_nents: number of segments in output scatterlist
13050 + * @iv_dma: dma address of iv for checking continuity and link table
13051 + * @qm_sg_bytes: length of dma mapped h/w link table
13052 + * @qm_sg_dma: bus physical mapped address of h/w link table
13053 + * @drv_req: driver-specific request structure
13054 + * @sgt: the h/w link table
13055 + */
13056 +struct ablkcipher_edesc {
13057 + int src_nents;
13058 + int dst_nents;
13059 + dma_addr_t iv_dma;
13060 + int qm_sg_bytes;
13061 + dma_addr_t qm_sg_dma;
13062 + struct caam_drv_req drv_req;
13063 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
13064 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
13065 + sizeof(struct qm_sg_entry))
13066 + struct qm_sg_entry sgt[0];
13067 +};
13068 +
13069 +static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
13070 + enum optype type)
13071 +{
13072 + /*
13073 + * This function is called on the fast path with values of 'type'
13074 + * known at compile time. Invalid arguments are not expected and
13075 + * thus no checks are made.
13076 + */
13077 + struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
13078 + u32 *desc;
13079 +
13080 + if (unlikely(!drv_ctx)) {
13081 + spin_lock(&ctx->lock);
13082 +
13083 + /* Read again to check if some other core init drv_ctx */
13084 + drv_ctx = ctx->drv_ctx[type];
13085 + if (!drv_ctx) {
13086 + int cpu;
13087 +
13088 + if (type == ENCRYPT)
13089 + desc = ctx->sh_desc_enc;
13090 + else if (type == DECRYPT)
13091 + desc = ctx->sh_desc_dec;
13092 + else /* (type == GIVENCRYPT) */
13093 + desc = ctx->sh_desc_givenc;
13094 +
13095 + cpu = smp_processor_id();
13096 + drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
13097 + if (likely(!IS_ERR_OR_NULL(drv_ctx)))
13098 + drv_ctx->op_type = type;
13099 +
13100 + ctx->drv_ctx[type] = drv_ctx;
13101 + }
13102 +
13103 + spin_unlock(&ctx->lock);
13104 + }
13105 +
13106 + return drv_ctx;
13107 +}
13108 +
13109 +static void caam_unmap(struct device *dev, struct scatterlist *src,
13110 + struct scatterlist *dst, int src_nents,
13111 + int dst_nents, dma_addr_t iv_dma, int ivsize,
13112 + enum optype op_type, dma_addr_t qm_sg_dma,
13113 + int qm_sg_bytes)
13114 +{
13115 + if (dst != src) {
13116 + if (src_nents)
13117 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
13118 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
13119 + } else {
13120 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
13121 + }
13122 +
13123 + if (iv_dma)
13124 + dma_unmap_single(dev, iv_dma, ivsize,
13125 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
13126 + DMA_TO_DEVICE);
13127 + if (qm_sg_bytes)
13128 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
13129 +}
13130 +
13131 +static void aead_unmap(struct device *dev,
13132 + struct aead_edesc *edesc,
13133 + struct aead_request *req)
13134 +{
13135 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13136 + int ivsize = crypto_aead_ivsize(aead);
13137 +
13138 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13139 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13140 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
13141 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13142 +}
13143 +
13144 +static void tls_unmap(struct device *dev,
13145 + struct tls_edesc *edesc,
13146 + struct aead_request *req)
13147 +{
13148 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13149 + int ivsize = crypto_aead_ivsize(aead);
13150 +
13151 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
13152 + edesc->dst_nents, edesc->iv_dma, ivsize,
13153 + edesc->drv_req.drv_ctx->op_type, edesc->qm_sg_dma,
13154 + edesc->qm_sg_bytes);
13155 +}
13156 +
13157 +static void ablkcipher_unmap(struct device *dev,
13158 + struct ablkcipher_edesc *edesc,
13159 + struct ablkcipher_request *req)
13160 +{
13161 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13162 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13163 +
13164 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
13165 + edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
13166 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
13167 +}
13168 +
13169 +static void aead_done(struct caam_drv_req *drv_req, u32 status)
13170 +{
13171 + struct device *qidev;
13172 + struct aead_edesc *edesc;
13173 + struct aead_request *aead_req = drv_req->app_ctx;
13174 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13175 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13176 + int ecode = 0;
13177 +
13178 + qidev = caam_ctx->qidev;
13179 +
13180 + if (unlikely(status)) {
13181 + caam_jr_strstatus(qidev, status);
13182 + ecode = -EIO;
13183 + }
13184 +
13185 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13186 + aead_unmap(qidev, edesc, aead_req);
13187 +
13188 + aead_request_complete(aead_req, ecode);
13189 + qi_cache_free(edesc);
13190 +}
13191 +
13192 +/*
13193 + * allocate and map the aead extended descriptor
13194 + */
13195 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
13196 + bool encrypt)
13197 +{
13198 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13199 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13200 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13201 + typeof(*alg), aead);
13202 + struct device *qidev = ctx->qidev;
13203 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13204 + GFP_KERNEL : GFP_ATOMIC;
13205 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13206 + struct aead_edesc *edesc;
13207 + dma_addr_t qm_sg_dma, iv_dma = 0;
13208 + int ivsize = 0;
13209 + unsigned int authsize = ctx->authsize;
13210 + int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
13211 + int in_len, out_len;
13212 + struct qm_sg_entry *sg_table, *fd_sgt;
13213 + struct caam_drv_ctx *drv_ctx;
13214 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13215 +
13216 + drv_ctx = get_drv_ctx(ctx, op_type);
13217 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13218 + return (struct aead_edesc *)drv_ctx;
13219 +
13220 + /* allocate space for base edesc and hw desc commands, link tables */
13221 + edesc = qi_cache_alloc(GFP_DMA | flags);
13222 + if (unlikely(!edesc)) {
13223 + dev_err(qidev, "could not allocate extended descriptor\n");
13224 + return ERR_PTR(-ENOMEM);
13225 + }
13226 +
13227 + if (likely(req->src == req->dst)) {
13228 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13229 + req->cryptlen +
13230 + (encrypt ? authsize : 0));
13231 + if (unlikely(src_nents < 0)) {
13232 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13233 + req->assoclen + req->cryptlen +
13234 + (encrypt ? authsize : 0));
13235 + qi_cache_free(edesc);
13236 + return ERR_PTR(src_nents);
13237 + }
13238 +
13239 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13240 + DMA_BIDIRECTIONAL);
13241 + if (unlikely(!mapped_src_nents)) {
13242 + dev_err(qidev, "unable to map source\n");
13243 + qi_cache_free(edesc);
13244 + return ERR_PTR(-ENOMEM);
13245 + }
13246 + } else {
13247 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13248 + req->cryptlen);
13249 + if (unlikely(src_nents < 0)) {
13250 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13251 + req->assoclen + req->cryptlen);
13252 + qi_cache_free(edesc);
13253 + return ERR_PTR(src_nents);
13254 + }
13255 +
13256 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
13257 + req->cryptlen +
13258 + (encrypt ? authsize :
13259 + (-authsize)));
13260 + if (unlikely(dst_nents < 0)) {
13261 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13262 + req->assoclen + req->cryptlen +
13263 + (encrypt ? authsize : (-authsize)));
13264 + qi_cache_free(edesc);
13265 + return ERR_PTR(dst_nents);
13266 + }
13267 +
13268 + if (src_nents) {
13269 + mapped_src_nents = dma_map_sg(qidev, req->src,
13270 + src_nents, DMA_TO_DEVICE);
13271 + if (unlikely(!mapped_src_nents)) {
13272 + dev_err(qidev, "unable to map source\n");
13273 + qi_cache_free(edesc);
13274 + return ERR_PTR(-ENOMEM);
13275 + }
13276 + } else {
13277 + mapped_src_nents = 0;
13278 + }
13279 +
13280 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13281 + DMA_FROM_DEVICE);
13282 + if (unlikely(!mapped_dst_nents)) {
13283 + dev_err(qidev, "unable to map destination\n");
13284 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13285 + qi_cache_free(edesc);
13286 + return ERR_PTR(-ENOMEM);
13287 + }
13288 + }
13289 +
13290 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
13291 + ivsize = crypto_aead_ivsize(aead);
13292 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13293 + if (dma_mapping_error(qidev, iv_dma)) {
13294 + dev_err(qidev, "unable to map IV\n");
13295 + caam_unmap(qidev, req->src, req->dst, src_nents,
13296 + dst_nents, 0, 0, op_type, 0, 0);
13297 + qi_cache_free(edesc);
13298 + return ERR_PTR(-ENOMEM);
13299 + }
13300 + }
13301 +
13302 + /*
13303 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
13304 + * Input is not contiguous.
13305 + */
13306 + qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
13307 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13308 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
13309 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13310 + qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
13311 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13312 + iv_dma, ivsize, op_type, 0, 0);
13313 + qi_cache_free(edesc);
13314 + return ERR_PTR(-ENOMEM);
13315 + }
13316 + sg_table = &edesc->sgt[0];
13317 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13318 +
13319 + edesc->src_nents = src_nents;
13320 + edesc->dst_nents = dst_nents;
13321 + edesc->iv_dma = iv_dma;
13322 + edesc->drv_req.app_ctx = req;
13323 + edesc->drv_req.cbk = aead_done;
13324 + edesc->drv_req.drv_ctx = drv_ctx;
13325 +
13326 + edesc->assoclen = cpu_to_caam32(req->assoclen);
13327 + edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
13328 + DMA_TO_DEVICE);
13329 + if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
13330 + dev_err(qidev, "unable to map assoclen\n");
13331 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13332 + iv_dma, ivsize, op_type, 0, 0);
13333 + qi_cache_free(edesc);
13334 + return ERR_PTR(-ENOMEM);
13335 + }
13336 +
13337 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
13338 + qm_sg_index++;
13339 + if (ivsize) {
13340 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
13341 + qm_sg_index++;
13342 + }
13343 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13344 + qm_sg_index += mapped_src_nents;
13345 +
13346 + if (mapped_dst_nents > 1)
13347 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13348 + qm_sg_index, 0);
13349 +
13350 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13351 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13352 + dev_err(qidev, "unable to map S/G table\n");
13353 + dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
13354 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13355 + iv_dma, ivsize, op_type, 0, 0);
13356 + qi_cache_free(edesc);
13357 + return ERR_PTR(-ENOMEM);
13358 + }
13359 +
13360 + edesc->qm_sg_dma = qm_sg_dma;
13361 + edesc->qm_sg_bytes = qm_sg_bytes;
13362 +
13363 + out_len = req->assoclen + req->cryptlen +
13364 + (encrypt ? ctx->authsize : (-ctx->authsize));
13365 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
13366 +
13367 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13368 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13369 +
13370 + if (req->dst == req->src) {
13371 + if (mapped_src_nents == 1)
13372 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13373 + out_len, 0);
13374 + else
13375 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13376 + (1 + !!ivsize) * sizeof(*sg_table),
13377 + out_len, 0);
13378 + } else if (mapped_dst_nents == 1) {
13379 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
13380 + 0);
13381 + } else {
13382 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13383 + qm_sg_index, out_len, 0);
13384 + }
13385 +
13386 + return edesc;
13387 +}
13388 +
13389 +static inline int aead_crypt(struct aead_request *req, bool encrypt)
13390 +{
13391 + struct aead_edesc *edesc;
13392 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13393 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13394 + int ret;
13395 +
13396 + if (unlikely(caam_congested))
13397 + return -EAGAIN;
13398 +
13399 + /* allocate extended descriptor */
13400 + edesc = aead_edesc_alloc(req, encrypt);
13401 + if (IS_ERR_OR_NULL(edesc))
13402 + return PTR_ERR(edesc);
13403 +
13404 + /* Create and submit job descriptor */
13405 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13406 + if (!ret) {
13407 + ret = -EINPROGRESS;
13408 + } else {
13409 + aead_unmap(ctx->qidev, edesc, req);
13410 + qi_cache_free(edesc);
13411 + }
13412 +
13413 + return ret;
13414 +}
13415 +
13416 +static int aead_encrypt(struct aead_request *req)
13417 +{
13418 + return aead_crypt(req, true);
13419 +}
13420 +
13421 +static int aead_decrypt(struct aead_request *req)
13422 +{
13423 + return aead_crypt(req, false);
13424 +}
13425 +
13426 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
13427 +{
13428 + struct device *qidev;
13429 + struct tls_edesc *edesc;
13430 + struct aead_request *aead_req = drv_req->app_ctx;
13431 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
13432 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
13433 + int ecode = 0;
13434 +
13435 + qidev = caam_ctx->qidev;
13436 +
13437 + if (unlikely(status)) {
13438 + caam_jr_strstatus(qidev, status);
13439 + ecode = -EIO;
13440 + }
13441 +
13442 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13443 + tls_unmap(qidev, edesc, aead_req);
13444 +
13445 + aead_request_complete(aead_req, ecode);
13446 + qi_cache_free(edesc);
13447 +}
13448 +
13449 +/*
13450 + * allocate and map the tls extended descriptor
13451 + */
13452 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
13453 +{
13454 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13455 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13456 + unsigned int blocksize = crypto_aead_blocksize(aead);
13457 + unsigned int padsize, authsize;
13458 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
13459 + typeof(*alg), aead);
13460 + struct device *qidev = ctx->qidev;
13461 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
13462 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
13463 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13464 + struct tls_edesc *edesc;
13465 + dma_addr_t qm_sg_dma, iv_dma = 0;
13466 + int ivsize = 0;
13467 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
13468 + int in_len, out_len;
13469 + struct qm_sg_entry *sg_table, *fd_sgt;
13470 + struct caam_drv_ctx *drv_ctx;
13471 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13472 + struct scatterlist *dst;
13473 +
13474 + if (encrypt) {
13475 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
13476 + blocksize);
13477 + authsize = ctx->authsize + padsize;
13478 + } else {
13479 + authsize = ctx->authsize;
13480 + }
13481 +
13482 + drv_ctx = get_drv_ctx(ctx, op_type);
13483 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13484 + return (struct tls_edesc *)drv_ctx;
13485 +
13486 + /* allocate space for base edesc and hw desc commands, link tables */
13487 + edesc = qi_cache_alloc(GFP_DMA | flags);
13488 + if (unlikely(!edesc)) {
13489 + dev_err(qidev, "could not allocate extended descriptor\n");
13490 + return ERR_PTR(-ENOMEM);
13491 + }
13492 +
13493 + if (likely(req->src == req->dst)) {
13494 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13495 + req->cryptlen +
13496 + (encrypt ? authsize : 0));
13497 + if (unlikely(src_nents < 0)) {
13498 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13499 + req->assoclen + req->cryptlen +
13500 + (encrypt ? authsize : 0));
13501 + qi_cache_free(edesc);
13502 + return ERR_PTR(src_nents);
13503 + }
13504 +
13505 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13506 + DMA_BIDIRECTIONAL);
13507 + if (unlikely(!mapped_src_nents)) {
13508 + dev_err(qidev, "unable to map source\n");
13509 + qi_cache_free(edesc);
13510 + return ERR_PTR(-ENOMEM);
13511 + }
13512 + dst = req->dst;
13513 + } else {
13514 + src_nents = sg_nents_for_len(req->src, req->assoclen +
13515 + req->cryptlen);
13516 + if (unlikely(src_nents < 0)) {
13517 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13518 + req->assoclen + req->cryptlen);
13519 + qi_cache_free(edesc);
13520 + return ERR_PTR(src_nents);
13521 + }
13522 +
13523 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
13524 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
13525 + (encrypt ? authsize : 0));
13526 + if (unlikely(dst_nents < 0)) {
13527 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13528 + req->cryptlen +
13529 + (encrypt ? authsize : 0));
13530 + qi_cache_free(edesc);
13531 + return ERR_PTR(dst_nents);
13532 + }
13533 +
13534 + if (src_nents) {
13535 + mapped_src_nents = dma_map_sg(qidev, req->src,
13536 + src_nents, DMA_TO_DEVICE);
13537 + if (unlikely(!mapped_src_nents)) {
13538 + dev_err(qidev, "unable to map source\n");
13539 + qi_cache_free(edesc);
13540 + return ERR_PTR(-ENOMEM);
13541 + }
13542 + } else {
13543 + mapped_src_nents = 0;
13544 + }
13545 +
13546 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
13547 + DMA_FROM_DEVICE);
13548 + if (unlikely(!mapped_dst_nents)) {
13549 + dev_err(qidev, "unable to map destination\n");
13550 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13551 + qi_cache_free(edesc);
13552 + return ERR_PTR(-ENOMEM);
13553 + }
13554 + }
13555 +
13556 + ivsize = crypto_aead_ivsize(aead);
13557 + iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
13558 + if (dma_mapping_error(qidev, iv_dma)) {
13559 + dev_err(qidev, "unable to map IV\n");
13560 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
13561 + op_type, 0, 0);
13562 + qi_cache_free(edesc);
13563 + return ERR_PTR(-ENOMEM);
13564 + }
13565 +
13566 + /*
13567 + * Create S/G table: IV, src, dst.
13568 + * Input is not contiguous.
13569 + */
13570 + qm_sg_ents = 1 + mapped_src_nents +
13571 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
13572 + sg_table = &edesc->sgt[0];
13573 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13574 +
13575 + edesc->src_nents = src_nents;
13576 + edesc->dst_nents = dst_nents;
13577 + edesc->dst = dst;
13578 + edesc->iv_dma = iv_dma;
13579 + edesc->drv_req.app_ctx = req;
13580 + edesc->drv_req.cbk = tls_done;
13581 + edesc->drv_req.drv_ctx = drv_ctx;
13582 +
13583 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13584 + qm_sg_index = 1;
13585 +
13586 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
13587 + qm_sg_index += mapped_src_nents;
13588 +
13589 + if (mapped_dst_nents > 1)
13590 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
13591 + qm_sg_index, 0);
13592 +
13593 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
13594 + if (dma_mapping_error(qidev, qm_sg_dma)) {
13595 + dev_err(qidev, "unable to map S/G table\n");
13596 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
13597 + ivsize, op_type, 0, 0);
13598 + qi_cache_free(edesc);
13599 + return ERR_PTR(-ENOMEM);
13600 + }
13601 +
13602 + edesc->qm_sg_dma = qm_sg_dma;
13603 + edesc->qm_sg_bytes = qm_sg_bytes;
13604 +
13605 + out_len = req->cryptlen + (encrypt ? authsize : 0);
13606 + in_len = ivsize + req->assoclen + req->cryptlen;
13607 +
13608 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13609 +
13610 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
13611 +
13612 + if (req->dst == req->src)
13613 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
13614 + (sg_nents_for_len(req->src, req->assoclen) +
13615 + 1) * sizeof(*sg_table), out_len, 0);
13616 + else if (mapped_dst_nents == 1)
13617 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
13618 + else
13619 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
13620 + qm_sg_index, out_len, 0);
13621 +
13622 + return edesc;
13623 +}
13624 +
13625 +static int tls_crypt(struct aead_request *req, bool encrypt)
13626 +{
13627 + struct tls_edesc *edesc;
13628 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
13629 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
13630 + int ret;
13631 +
13632 + if (unlikely(caam_congested))
13633 + return -EAGAIN;
13634 +
13635 + edesc = tls_edesc_alloc(req, encrypt);
13636 + if (IS_ERR_OR_NULL(edesc))
13637 + return PTR_ERR(edesc);
13638 +
13639 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
13640 + if (!ret) {
13641 + ret = -EINPROGRESS;
13642 + } else {
13643 + tls_unmap(ctx->qidev, edesc, req);
13644 + qi_cache_free(edesc);
13645 + }
13646 +
13647 + return ret;
13648 +}
13649 +
13650 +static int tls_encrypt(struct aead_request *req)
13651 +{
13652 + return tls_crypt(req, true);
13653 +}
13654 +
13655 +static int tls_decrypt(struct aead_request *req)
13656 +{
13657 + return tls_crypt(req, false);
13658 +}
13659 +
13660 +static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
13661 +{
13662 + struct ablkcipher_edesc *edesc;
13663 + struct ablkcipher_request *req = drv_req->app_ctx;
13664 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13665 + struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
13666 + struct device *qidev = caam_ctx->qidev;
13667 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13668 +
13669 +#ifdef DEBUG
13670 + dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
13671 +#endif
13672 +
13673 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
13674 +
13675 + if (status)
13676 + caam_jr_strstatus(qidev, status);
13677 +
13678 +#ifdef DEBUG
13679 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
13680 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
13681 + edesc->src_nents > 1 ? 100 : ivsize, 1);
13682 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
13683 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
13684 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
13685 +#endif
13686 +
13687 + ablkcipher_unmap(qidev, edesc, req);
13688 + qi_cache_free(edesc);
13689 +
13690 + /*
13691 + * The crypto API expects us to set the IV (req->info) to the last
13692 + * ciphertext block. This is used e.g. by the CTS mode.
13693 + */
13694 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
13695 + ivsize, 0);
13696 +
13697 + ablkcipher_request_complete(req, status);
13698 +}
13699 +
13700 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
13701 + *req, bool encrypt)
13702 +{
13703 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13704 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13705 + struct device *qidev = ctx->qidev;
13706 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13707 + GFP_KERNEL : GFP_ATOMIC;
13708 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
13709 + struct ablkcipher_edesc *edesc;
13710 + dma_addr_t iv_dma;
13711 + bool in_contig;
13712 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13713 + int dst_sg_idx, qm_sg_ents;
13714 + struct qm_sg_entry *sg_table, *fd_sgt;
13715 + struct caam_drv_ctx *drv_ctx;
13716 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
13717 +
13718 + drv_ctx = get_drv_ctx(ctx, op_type);
13719 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13720 + return (struct ablkcipher_edesc *)drv_ctx;
13721 +
13722 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13723 + if (unlikely(src_nents < 0)) {
13724 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13725 + req->nbytes);
13726 + return ERR_PTR(src_nents);
13727 + }
13728 +
13729 + if (unlikely(req->src != req->dst)) {
13730 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13731 + if (unlikely(dst_nents < 0)) {
13732 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13733 + req->nbytes);
13734 + return ERR_PTR(dst_nents);
13735 + }
13736 +
13737 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13738 + DMA_TO_DEVICE);
13739 + if (unlikely(!mapped_src_nents)) {
13740 + dev_err(qidev, "unable to map source\n");
13741 + return ERR_PTR(-ENOMEM);
13742 + }
13743 +
13744 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13745 + DMA_FROM_DEVICE);
13746 + if (unlikely(!mapped_dst_nents)) {
13747 + dev_err(qidev, "unable to map destination\n");
13748 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13749 + return ERR_PTR(-ENOMEM);
13750 + }
13751 + } else {
13752 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13753 + DMA_BIDIRECTIONAL);
13754 + if (unlikely(!mapped_src_nents)) {
13755 + dev_err(qidev, "unable to map source\n");
13756 + return ERR_PTR(-ENOMEM);
13757 + }
13758 + }
13759 +
13760 + iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
13761 + if (dma_mapping_error(qidev, iv_dma)) {
13762 + dev_err(qidev, "unable to map IV\n");
13763 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13764 + 0, 0, 0, 0);
13765 + return ERR_PTR(-ENOMEM);
13766 + }
13767 +
13768 + if (mapped_src_nents == 1 &&
13769 + iv_dma + ivsize == sg_dma_address(req->src)) {
13770 + in_contig = true;
13771 + qm_sg_ents = 0;
13772 + } else {
13773 + in_contig = false;
13774 + qm_sg_ents = 1 + mapped_src_nents;
13775 + }
13776 + dst_sg_idx = qm_sg_ents;
13777 +
13778 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
13779 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13780 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13781 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13782 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13783 + iv_dma, ivsize, op_type, 0, 0);
13784 + return ERR_PTR(-ENOMEM);
13785 + }
13786 +
13787 + /* allocate space for base edesc and link tables */
13788 + edesc = qi_cache_alloc(GFP_DMA | flags);
13789 + if (unlikely(!edesc)) {
13790 + dev_err(qidev, "could not allocate extended descriptor\n");
13791 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13792 + iv_dma, ivsize, op_type, 0, 0);
13793 + return ERR_PTR(-ENOMEM);
13794 + }
13795 +
13796 + edesc->src_nents = src_nents;
13797 + edesc->dst_nents = dst_nents;
13798 + edesc->iv_dma = iv_dma;
13799 + sg_table = &edesc->sgt[0];
13800 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13801 + edesc->drv_req.app_ctx = req;
13802 + edesc->drv_req.cbk = ablkcipher_done;
13803 + edesc->drv_req.drv_ctx = drv_ctx;
13804 +
13805 + if (!in_contig) {
13806 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
13807 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
13808 + }
13809 +
13810 + if (mapped_dst_nents > 1)
13811 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13812 + dst_sg_idx, 0);
13813 +
13814 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13815 + DMA_TO_DEVICE);
13816 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13817 + dev_err(qidev, "unable to map S/G table\n");
13818 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13819 + iv_dma, ivsize, op_type, 0, 0);
13820 + qi_cache_free(edesc);
13821 + return ERR_PTR(-ENOMEM);
13822 + }
13823 +
13824 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13825 +
13826 + if (!in_contig)
13827 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
13828 + ivsize + req->nbytes, 0);
13829 + else
13830 + dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
13831 + 0);
13832 +
13833 + if (req->src == req->dst) {
13834 + if (!in_contig)
13835 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
13836 + sizeof(*sg_table), req->nbytes, 0);
13837 + else
13838 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
13839 + req->nbytes, 0);
13840 + } else if (mapped_dst_nents > 1) {
13841 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13842 + sizeof(*sg_table), req->nbytes, 0);
13843 + } else {
13844 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13845 + req->nbytes, 0);
13846 + }
13847 +
13848 + return edesc;
13849 +}
13850 +
13851 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
13852 + struct skcipher_givcrypt_request *creq)
13853 +{
13854 + struct ablkcipher_request *req = &creq->creq;
13855 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
13856 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
13857 + struct device *qidev = ctx->qidev;
13858 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
13859 + GFP_KERNEL : GFP_ATOMIC;
13860 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
13861 + struct ablkcipher_edesc *edesc;
13862 + dma_addr_t iv_dma;
13863 + bool out_contig;
13864 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
13865 + struct qm_sg_entry *sg_table, *fd_sgt;
13866 + int dst_sg_idx, qm_sg_ents;
13867 + struct caam_drv_ctx *drv_ctx;
13868 +
13869 + drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
13870 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
13871 + return (struct ablkcipher_edesc *)drv_ctx;
13872 +
13873 + src_nents = sg_nents_for_len(req->src, req->nbytes);
13874 + if (unlikely(src_nents < 0)) {
13875 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
13876 + req->nbytes);
13877 + return ERR_PTR(src_nents);
13878 + }
13879 +
13880 + if (unlikely(req->src != req->dst)) {
13881 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
13882 + if (unlikely(dst_nents < 0)) {
13883 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
13884 + req->nbytes);
13885 + return ERR_PTR(dst_nents);
13886 + }
13887 +
13888 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13889 + DMA_TO_DEVICE);
13890 + if (unlikely(!mapped_src_nents)) {
13891 + dev_err(qidev, "unable to map source\n");
13892 + return ERR_PTR(-ENOMEM);
13893 + }
13894 +
13895 + mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
13896 + DMA_FROM_DEVICE);
13897 + if (unlikely(!mapped_dst_nents)) {
13898 + dev_err(qidev, "unable to map destination\n");
13899 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
13900 + return ERR_PTR(-ENOMEM);
13901 + }
13902 + } else {
13903 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
13904 + DMA_BIDIRECTIONAL);
13905 + if (unlikely(!mapped_src_nents)) {
13906 + dev_err(qidev, "unable to map source\n");
13907 + return ERR_PTR(-ENOMEM);
13908 + }
13909 +
13910 + dst_nents = src_nents;
13911 + mapped_dst_nents = src_nents;
13912 + }
13913 +
13914 + iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
13915 + if (dma_mapping_error(qidev, iv_dma)) {
13916 + dev_err(qidev, "unable to map IV\n");
13917 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
13918 + 0, 0, 0, 0);
13919 + return ERR_PTR(-ENOMEM);
13920 + }
13921 +
13922 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
13923 + dst_sg_idx = qm_sg_ents;
13924 + if (mapped_dst_nents == 1 &&
13925 + iv_dma + ivsize == sg_dma_address(req->dst)) {
13926 + out_contig = true;
13927 + } else {
13928 + out_contig = false;
13929 + qm_sg_ents += 1 + mapped_dst_nents;
13930 + }
13931 +
13932 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
13933 + dev_err(qidev, "Insufficient S/G entries: %d > %lu\n",
13934 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
13935 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13936 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13937 + return ERR_PTR(-ENOMEM);
13938 + }
13939 +
13940 + /* allocate space for base edesc and link tables */
13941 + edesc = qi_cache_alloc(GFP_DMA | flags);
13942 + if (!edesc) {
13943 + dev_err(qidev, "could not allocate extended descriptor\n");
13944 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13945 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13946 + return ERR_PTR(-ENOMEM);
13947 + }
13948 +
13949 + edesc->src_nents = src_nents;
13950 + edesc->dst_nents = dst_nents;
13951 + edesc->iv_dma = iv_dma;
13952 + sg_table = &edesc->sgt[0];
13953 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
13954 + edesc->drv_req.app_ctx = req;
13955 + edesc->drv_req.cbk = ablkcipher_done;
13956 + edesc->drv_req.drv_ctx = drv_ctx;
13957 +
13958 + if (mapped_src_nents > 1)
13959 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
13960 +
13961 + if (!out_contig) {
13962 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
13963 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
13964 + dst_sg_idx + 1, 0);
13965 + }
13966 +
13967 + edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
13968 + DMA_TO_DEVICE);
13969 + if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
13970 + dev_err(qidev, "unable to map S/G table\n");
13971 + caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
13972 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
13973 + qi_cache_free(edesc);
13974 + return ERR_PTR(-ENOMEM);
13975 + }
13976 +
13977 + fd_sgt = &edesc->drv_req.fd_sgt[0];
13978 +
13979 + if (mapped_src_nents > 1)
13980 + dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
13981 + 0);
13982 + else
13983 + dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
13984 + req->nbytes, 0);
13985 +
13986 + if (!out_contig)
13987 + dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
13988 + sizeof(*sg_table), ivsize + req->nbytes,
13989 + 0);
13990 + else
13991 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
13992 + ivsize + req->nbytes, 0);
13993 +
13994 + return edesc;
13995 +}
13996 +
13997 +static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
13998 +{
13999 + struct ablkcipher_edesc *edesc;
14000 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14001 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14002 + int ret;
14003 +
14004 + if (unlikely(caam_congested))
14005 + return -EAGAIN;
14006 +
14007 + /* allocate extended descriptor */
14008 + edesc = ablkcipher_edesc_alloc(req, encrypt);
14009 + if (IS_ERR(edesc))
14010 + return PTR_ERR(edesc);
14011 +
14012 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
14013 + if (!ret) {
14014 + ret = -EINPROGRESS;
14015 + } else {
14016 + ablkcipher_unmap(ctx->qidev, edesc, req);
14017 + qi_cache_free(edesc);
14018 + }
14019 +
14020 + return ret;
14021 +}
14022 +
14023 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
14024 +{
14025 + return ablkcipher_crypt(req, true);
14026 +}
14027 +
14028 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
14029 +{
14030 + return ablkcipher_crypt(req, false);
14031 +}
14032 +
14033 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
14034 +{
14035 + struct ablkcipher_request *req = &creq->creq;
14036 + struct ablkcipher_edesc *edesc;
14037 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
14038 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
14039 + int ret;
14040 +
14041 + if (unlikely(caam_congested))
14042 + return -EAGAIN;
14043 +
14044 + /* allocate extended descriptor */
14045 + edesc = ablkcipher_giv_edesc_alloc(creq);
14046 + if (IS_ERR(edesc))
14047 + return PTR_ERR(edesc);
14048 +
14049 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
14050 + if (!ret) {
14051 + ret = -EINPROGRESS;
14052 + } else {
14053 + ablkcipher_unmap(ctx->qidev, edesc, req);
14054 + qi_cache_free(edesc);
14055 + }
14056 +
14057 + return ret;
14058 +}
14059 +
14060 +#define template_ablkcipher template_u.ablkcipher
14061 +struct caam_alg_template {
14062 + char name[CRYPTO_MAX_ALG_NAME];
14063 + char driver_name[CRYPTO_MAX_ALG_NAME];
14064 + unsigned int blocksize;
14065 + u32 type;
14066 + union {
14067 + struct ablkcipher_alg ablkcipher;
14068 + } template_u;
14069 + u32 class1_alg_type;
14070 + u32 class2_alg_type;
14071 +};
14072 +
14073 +static struct caam_alg_template driver_algs[] = {
14074 + /* ablkcipher descriptor */
14075 + {
14076 + .name = "cbc(aes)",
14077 + .driver_name = "cbc-aes-caam-qi",
14078 + .blocksize = AES_BLOCK_SIZE,
14079 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14080 + .template_ablkcipher = {
14081 + .setkey = ablkcipher_setkey,
14082 + .encrypt = ablkcipher_encrypt,
14083 + .decrypt = ablkcipher_decrypt,
14084 + .givencrypt = ablkcipher_givencrypt,
14085 + .geniv = "<built-in>",
14086 + .min_keysize = AES_MIN_KEY_SIZE,
14087 + .max_keysize = AES_MAX_KEY_SIZE,
14088 + .ivsize = AES_BLOCK_SIZE,
14089 + },
14090 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14091 + },
14092 + {
14093 + .name = "cbc(des3_ede)",
14094 + .driver_name = "cbc-3des-caam-qi",
14095 + .blocksize = DES3_EDE_BLOCK_SIZE,
14096 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14097 + .template_ablkcipher = {
14098 + .setkey = ablkcipher_setkey,
14099 + .encrypt = ablkcipher_encrypt,
14100 + .decrypt = ablkcipher_decrypt,
14101 + .givencrypt = ablkcipher_givencrypt,
14102 + .geniv = "<built-in>",
14103 + .min_keysize = DES3_EDE_KEY_SIZE,
14104 + .max_keysize = DES3_EDE_KEY_SIZE,
14105 + .ivsize = DES3_EDE_BLOCK_SIZE,
14106 + },
14107 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14108 + },
14109 + {
14110 + .name = "cbc(des)",
14111 + .driver_name = "cbc-des-caam-qi",
14112 + .blocksize = DES_BLOCK_SIZE,
14113 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14114 + .template_ablkcipher = {
14115 + .setkey = ablkcipher_setkey,
14116 + .encrypt = ablkcipher_encrypt,
14117 + .decrypt = ablkcipher_decrypt,
14118 + .givencrypt = ablkcipher_givencrypt,
14119 + .geniv = "<built-in>",
14120 + .min_keysize = DES_KEY_SIZE,
14121 + .max_keysize = DES_KEY_SIZE,
14122 + .ivsize = DES_BLOCK_SIZE,
14123 + },
14124 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14125 + },
14126 + {
14127 + .name = "ctr(aes)",
14128 + .driver_name = "ctr-aes-caam-qi",
14129 + .blocksize = 1,
14130 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14131 + .template_ablkcipher = {
14132 + .setkey = ablkcipher_setkey,
14133 + .encrypt = ablkcipher_encrypt,
14134 + .decrypt = ablkcipher_decrypt,
14135 + .geniv = "chainiv",
14136 + .min_keysize = AES_MIN_KEY_SIZE,
14137 + .max_keysize = AES_MAX_KEY_SIZE,
14138 + .ivsize = AES_BLOCK_SIZE,
14139 + },
14140 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
14141 + },
14142 + {
14143 + .name = "rfc3686(ctr(aes))",
14144 + .driver_name = "rfc3686-ctr-aes-caam-qi",
14145 + .blocksize = 1,
14146 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
14147 + .template_ablkcipher = {
14148 + .setkey = ablkcipher_setkey,
14149 + .encrypt = ablkcipher_encrypt,
14150 + .decrypt = ablkcipher_decrypt,
14151 + .givencrypt = ablkcipher_givencrypt,
14152 + .geniv = "<built-in>",
14153 + .min_keysize = AES_MIN_KEY_SIZE +
14154 + CTR_RFC3686_NONCE_SIZE,
14155 + .max_keysize = AES_MAX_KEY_SIZE +
14156 + CTR_RFC3686_NONCE_SIZE,
14157 + .ivsize = CTR_RFC3686_IV_SIZE,
14158 + },
14159 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
14160 + },
14161 + {
14162 + .name = "xts(aes)",
14163 + .driver_name = "xts-aes-caam-qi",
14164 + .blocksize = AES_BLOCK_SIZE,
14165 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
14166 + .template_ablkcipher = {
14167 + .setkey = xts_ablkcipher_setkey,
14168 + .encrypt = ablkcipher_encrypt,
14169 + .decrypt = ablkcipher_decrypt,
14170 + .geniv = "eseqiv",
14171 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
14172 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
14173 + .ivsize = AES_BLOCK_SIZE,
14174 + },
14175 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
14176 + },
14177 +};
14178 +
14179 +static struct caam_aead_alg driver_aeads[] = {
14180 + /* single-pass ipsec_esp descriptor */
14181 + {
14182 + .aead = {
14183 + .base = {
14184 + .cra_name = "authenc(hmac(md5),cbc(aes))",
14185 + .cra_driver_name = "authenc-hmac-md5-"
14186 + "cbc-aes-caam-qi",
14187 + .cra_blocksize = AES_BLOCK_SIZE,
14188 + },
14189 + .setkey = aead_setkey,
14190 + .setauthsize = aead_setauthsize,
14191 + .encrypt = aead_encrypt,
14192 + .decrypt = aead_decrypt,
14193 + .ivsize = AES_BLOCK_SIZE,
14194 + .maxauthsize = MD5_DIGEST_SIZE,
14195 + },
14196 + .caam = {
14197 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14198 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14199 + OP_ALG_AAI_HMAC_PRECOMP,
14200 + }
14201 + },
14202 + {
14203 + .aead = {
14204 + .base = {
14205 + .cra_name = "echainiv(authenc(hmac(md5),"
14206 + "cbc(aes)))",
14207 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14208 + "cbc-aes-caam-qi",
14209 + .cra_blocksize = AES_BLOCK_SIZE,
14210 + },
14211 + .setkey = aead_setkey,
14212 + .setauthsize = aead_setauthsize,
14213 + .encrypt = aead_encrypt,
14214 + .decrypt = aead_decrypt,
14215 + .ivsize = AES_BLOCK_SIZE,
14216 + .maxauthsize = MD5_DIGEST_SIZE,
14217 + },
14218 + .caam = {
14219 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14220 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14221 + OP_ALG_AAI_HMAC_PRECOMP,
14222 + .geniv = true,
14223 + }
14224 + },
14225 + {
14226 + .aead = {
14227 + .base = {
14228 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
14229 + .cra_driver_name = "authenc-hmac-sha1-"
14230 + "cbc-aes-caam-qi",
14231 + .cra_blocksize = AES_BLOCK_SIZE,
14232 + },
14233 + .setkey = aead_setkey,
14234 + .setauthsize = aead_setauthsize,
14235 + .encrypt = aead_encrypt,
14236 + .decrypt = aead_decrypt,
14237 + .ivsize = AES_BLOCK_SIZE,
14238 + .maxauthsize = SHA1_DIGEST_SIZE,
14239 + },
14240 + .caam = {
14241 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14242 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14243 + OP_ALG_AAI_HMAC_PRECOMP,
14244 + }
14245 + },
14246 + {
14247 + .aead = {
14248 + .base = {
14249 + .cra_name = "echainiv(authenc(hmac(sha1),"
14250 + "cbc(aes)))",
14251 + .cra_driver_name = "echainiv-authenc-"
14252 + "hmac-sha1-cbc-aes-caam-qi",
14253 + .cra_blocksize = AES_BLOCK_SIZE,
14254 + },
14255 + .setkey = aead_setkey,
14256 + .setauthsize = aead_setauthsize,
14257 + .encrypt = aead_encrypt,
14258 + .decrypt = aead_decrypt,
14259 + .ivsize = AES_BLOCK_SIZE,
14260 + .maxauthsize = SHA1_DIGEST_SIZE,
14261 + },
14262 + .caam = {
14263 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14264 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14265 + OP_ALG_AAI_HMAC_PRECOMP,
14266 + .geniv = true,
14267 + },
14268 + },
14269 + {
14270 + .aead = {
14271 + .base = {
14272 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
14273 + .cra_driver_name = "authenc-hmac-sha224-"
14274 + "cbc-aes-caam-qi",
14275 + .cra_blocksize = AES_BLOCK_SIZE,
14276 + },
14277 + .setkey = aead_setkey,
14278 + .setauthsize = aead_setauthsize,
14279 + .encrypt = aead_encrypt,
14280 + .decrypt = aead_decrypt,
14281 + .ivsize = AES_BLOCK_SIZE,
14282 + .maxauthsize = SHA224_DIGEST_SIZE,
14283 + },
14284 + .caam = {
14285 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14286 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14287 + OP_ALG_AAI_HMAC_PRECOMP,
14288 + }
14289 + },
14290 + {
14291 + .aead = {
14292 + .base = {
14293 + .cra_name = "echainiv(authenc(hmac(sha224),"
14294 + "cbc(aes)))",
14295 + .cra_driver_name = "echainiv-authenc-"
14296 + "hmac-sha224-cbc-aes-caam-qi",
14297 + .cra_blocksize = AES_BLOCK_SIZE,
14298 + },
14299 + .setkey = aead_setkey,
14300 + .setauthsize = aead_setauthsize,
14301 + .encrypt = aead_encrypt,
14302 + .decrypt = aead_decrypt,
14303 + .ivsize = AES_BLOCK_SIZE,
14304 + .maxauthsize = SHA224_DIGEST_SIZE,
14305 + },
14306 + .caam = {
14307 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14308 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14309 + OP_ALG_AAI_HMAC_PRECOMP,
14310 + .geniv = true,
14311 + }
14312 + },
14313 + {
14314 + .aead = {
14315 + .base = {
14316 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
14317 + .cra_driver_name = "authenc-hmac-sha256-"
14318 + "cbc-aes-caam-qi",
14319 + .cra_blocksize = AES_BLOCK_SIZE,
14320 + },
14321 + .setkey = aead_setkey,
14322 + .setauthsize = aead_setauthsize,
14323 + .encrypt = aead_encrypt,
14324 + .decrypt = aead_decrypt,
14325 + .ivsize = AES_BLOCK_SIZE,
14326 + .maxauthsize = SHA256_DIGEST_SIZE,
14327 + },
14328 + .caam = {
14329 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14330 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14331 + OP_ALG_AAI_HMAC_PRECOMP,
14332 + }
14333 + },
14334 + {
14335 + .aead = {
14336 + .base = {
14337 + .cra_name = "echainiv(authenc(hmac(sha256),"
14338 + "cbc(aes)))",
14339 + .cra_driver_name = "echainiv-authenc-"
14340 + "hmac-sha256-cbc-aes-"
14341 + "caam-qi",
14342 + .cra_blocksize = AES_BLOCK_SIZE,
14343 + },
14344 + .setkey = aead_setkey,
14345 + .setauthsize = aead_setauthsize,
14346 + .encrypt = aead_encrypt,
14347 + .decrypt = aead_decrypt,
14348 + .ivsize = AES_BLOCK_SIZE,
14349 + .maxauthsize = SHA256_DIGEST_SIZE,
14350 + },
14351 + .caam = {
14352 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14353 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14354 + OP_ALG_AAI_HMAC_PRECOMP,
14355 + .geniv = true,
14356 + }
14357 + },
14358 + {
14359 + .aead = {
14360 + .base = {
14361 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
14362 + .cra_driver_name = "authenc-hmac-sha384-"
14363 + "cbc-aes-caam-qi",
14364 + .cra_blocksize = AES_BLOCK_SIZE,
14365 + },
14366 + .setkey = aead_setkey,
14367 + .setauthsize = aead_setauthsize,
14368 + .encrypt = aead_encrypt,
14369 + .decrypt = aead_decrypt,
14370 + .ivsize = AES_BLOCK_SIZE,
14371 + .maxauthsize = SHA384_DIGEST_SIZE,
14372 + },
14373 + .caam = {
14374 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14375 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14376 + OP_ALG_AAI_HMAC_PRECOMP,
14377 + }
14378 + },
14379 + {
14380 + .aead = {
14381 + .base = {
14382 + .cra_name = "echainiv(authenc(hmac(sha384),"
14383 + "cbc(aes)))",
14384 + .cra_driver_name = "echainiv-authenc-"
14385 + "hmac-sha384-cbc-aes-"
14386 + "caam-qi",
14387 + .cra_blocksize = AES_BLOCK_SIZE,
14388 + },
14389 + .setkey = aead_setkey,
14390 + .setauthsize = aead_setauthsize,
14391 + .encrypt = aead_encrypt,
14392 + .decrypt = aead_decrypt,
14393 + .ivsize = AES_BLOCK_SIZE,
14394 + .maxauthsize = SHA384_DIGEST_SIZE,
14395 + },
14396 + .caam = {
14397 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14398 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14399 + OP_ALG_AAI_HMAC_PRECOMP,
14400 + .geniv = true,
14401 + }
14402 + },
14403 + {
14404 + .aead = {
14405 + .base = {
14406 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
14407 + .cra_driver_name = "authenc-hmac-sha512-"
14408 + "cbc-aes-caam-qi",
14409 + .cra_blocksize = AES_BLOCK_SIZE,
14410 + },
14411 + .setkey = aead_setkey,
14412 + .setauthsize = aead_setauthsize,
14413 + .encrypt = aead_encrypt,
14414 + .decrypt = aead_decrypt,
14415 + .ivsize = AES_BLOCK_SIZE,
14416 + .maxauthsize = SHA512_DIGEST_SIZE,
14417 + },
14418 + .caam = {
14419 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14420 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14421 + OP_ALG_AAI_HMAC_PRECOMP,
14422 + }
14423 + },
14424 + {
14425 + .aead = {
14426 + .base = {
14427 + .cra_name = "echainiv(authenc(hmac(sha512),"
14428 + "cbc(aes)))",
14429 + .cra_driver_name = "echainiv-authenc-"
14430 + "hmac-sha512-cbc-aes-"
14431 + "caam-qi",
14432 + .cra_blocksize = AES_BLOCK_SIZE,
14433 + },
14434 + .setkey = aead_setkey,
14435 + .setauthsize = aead_setauthsize,
14436 + .encrypt = aead_encrypt,
14437 + .decrypt = aead_decrypt,
14438 + .ivsize = AES_BLOCK_SIZE,
14439 + .maxauthsize = SHA512_DIGEST_SIZE,
14440 + },
14441 + .caam = {
14442 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
14443 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14444 + OP_ALG_AAI_HMAC_PRECOMP,
14445 + .geniv = true,
14446 + }
14447 + },
14448 + {
14449 + .aead = {
14450 + .base = {
14451 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
14452 + .cra_driver_name = "authenc-hmac-md5-"
14453 + "cbc-des3_ede-caam-qi",
14454 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14455 + },
14456 + .setkey = aead_setkey,
14457 + .setauthsize = aead_setauthsize,
14458 + .encrypt = aead_encrypt,
14459 + .decrypt = aead_decrypt,
14460 + .ivsize = DES3_EDE_BLOCK_SIZE,
14461 + .maxauthsize = MD5_DIGEST_SIZE,
14462 + },
14463 + .caam = {
14464 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14465 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14466 + OP_ALG_AAI_HMAC_PRECOMP,
14467 + }
14468 + },
14469 + {
14470 + .aead = {
14471 + .base = {
14472 + .cra_name = "echainiv(authenc(hmac(md5),"
14473 + "cbc(des3_ede)))",
14474 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14475 + "cbc-des3_ede-caam-qi",
14476 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14477 + },
14478 + .setkey = aead_setkey,
14479 + .setauthsize = aead_setauthsize,
14480 + .encrypt = aead_encrypt,
14481 + .decrypt = aead_decrypt,
14482 + .ivsize = DES3_EDE_BLOCK_SIZE,
14483 + .maxauthsize = MD5_DIGEST_SIZE,
14484 + },
14485 + .caam = {
14486 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14487 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14488 + OP_ALG_AAI_HMAC_PRECOMP,
14489 + .geniv = true,
14490 + }
14491 + },
14492 + {
14493 + .aead = {
14494 + .base = {
14495 + .cra_name = "authenc(hmac(sha1),"
14496 + "cbc(des3_ede))",
14497 + .cra_driver_name = "authenc-hmac-sha1-"
14498 + "cbc-des3_ede-caam-qi",
14499 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14500 + },
14501 + .setkey = aead_setkey,
14502 + .setauthsize = aead_setauthsize,
14503 + .encrypt = aead_encrypt,
14504 + .decrypt = aead_decrypt,
14505 + .ivsize = DES3_EDE_BLOCK_SIZE,
14506 + .maxauthsize = SHA1_DIGEST_SIZE,
14507 + },
14508 + .caam = {
14509 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14510 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14511 + OP_ALG_AAI_HMAC_PRECOMP,
14512 + },
14513 + },
14514 + {
14515 + .aead = {
14516 + .base = {
14517 + .cra_name = "echainiv(authenc(hmac(sha1),"
14518 + "cbc(des3_ede)))",
14519 + .cra_driver_name = "echainiv-authenc-"
14520 + "hmac-sha1-"
14521 + "cbc-des3_ede-caam-qi",
14522 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14523 + },
14524 + .setkey = aead_setkey,
14525 + .setauthsize = aead_setauthsize,
14526 + .encrypt = aead_encrypt,
14527 + .decrypt = aead_decrypt,
14528 + .ivsize = DES3_EDE_BLOCK_SIZE,
14529 + .maxauthsize = SHA1_DIGEST_SIZE,
14530 + },
14531 + .caam = {
14532 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14533 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14534 + OP_ALG_AAI_HMAC_PRECOMP,
14535 + .geniv = true,
14536 + }
14537 + },
14538 + {
14539 + .aead = {
14540 + .base = {
14541 + .cra_name = "authenc(hmac(sha224),"
14542 + "cbc(des3_ede))",
14543 + .cra_driver_name = "authenc-hmac-sha224-"
14544 + "cbc-des3_ede-caam-qi",
14545 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14546 + },
14547 + .setkey = aead_setkey,
14548 + .setauthsize = aead_setauthsize,
14549 + .encrypt = aead_encrypt,
14550 + .decrypt = aead_decrypt,
14551 + .ivsize = DES3_EDE_BLOCK_SIZE,
14552 + .maxauthsize = SHA224_DIGEST_SIZE,
14553 + },
14554 + .caam = {
14555 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14556 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14557 + OP_ALG_AAI_HMAC_PRECOMP,
14558 + },
14559 + },
14560 + {
14561 + .aead = {
14562 + .base = {
14563 + .cra_name = "echainiv(authenc(hmac(sha224),"
14564 + "cbc(des3_ede)))",
14565 + .cra_driver_name = "echainiv-authenc-"
14566 + "hmac-sha224-"
14567 + "cbc-des3_ede-caam-qi",
14568 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14569 + },
14570 + .setkey = aead_setkey,
14571 + .setauthsize = aead_setauthsize,
14572 + .encrypt = aead_encrypt,
14573 + .decrypt = aead_decrypt,
14574 + .ivsize = DES3_EDE_BLOCK_SIZE,
14575 + .maxauthsize = SHA224_DIGEST_SIZE,
14576 + },
14577 + .caam = {
14578 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14579 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14580 + OP_ALG_AAI_HMAC_PRECOMP,
14581 + .geniv = true,
14582 + }
14583 + },
14584 + {
14585 + .aead = {
14586 + .base = {
14587 + .cra_name = "authenc(hmac(sha256),"
14588 + "cbc(des3_ede))",
14589 + .cra_driver_name = "authenc-hmac-sha256-"
14590 + "cbc-des3_ede-caam-qi",
14591 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14592 + },
14593 + .setkey = aead_setkey,
14594 + .setauthsize = aead_setauthsize,
14595 + .encrypt = aead_encrypt,
14596 + .decrypt = aead_decrypt,
14597 + .ivsize = DES3_EDE_BLOCK_SIZE,
14598 + .maxauthsize = SHA256_DIGEST_SIZE,
14599 + },
14600 + .caam = {
14601 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14602 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14603 + OP_ALG_AAI_HMAC_PRECOMP,
14604 + },
14605 + },
14606 + {
14607 + .aead = {
14608 + .base = {
14609 + .cra_name = "echainiv(authenc(hmac(sha256),"
14610 + "cbc(des3_ede)))",
14611 + .cra_driver_name = "echainiv-authenc-"
14612 + "hmac-sha256-"
14613 + "cbc-des3_ede-caam-qi",
14614 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14615 + },
14616 + .setkey = aead_setkey,
14617 + .setauthsize = aead_setauthsize,
14618 + .encrypt = aead_encrypt,
14619 + .decrypt = aead_decrypt,
14620 + .ivsize = DES3_EDE_BLOCK_SIZE,
14621 + .maxauthsize = SHA256_DIGEST_SIZE,
14622 + },
14623 + .caam = {
14624 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14625 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14626 + OP_ALG_AAI_HMAC_PRECOMP,
14627 + .geniv = true,
14628 + }
14629 + },
14630 + {
14631 + .aead = {
14632 + .base = {
14633 + .cra_name = "authenc(hmac(sha384),"
14634 + "cbc(des3_ede))",
14635 + .cra_driver_name = "authenc-hmac-sha384-"
14636 + "cbc-des3_ede-caam-qi",
14637 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14638 + },
14639 + .setkey = aead_setkey,
14640 + .setauthsize = aead_setauthsize,
14641 + .encrypt = aead_encrypt,
14642 + .decrypt = aead_decrypt,
14643 + .ivsize = DES3_EDE_BLOCK_SIZE,
14644 + .maxauthsize = SHA384_DIGEST_SIZE,
14645 + },
14646 + .caam = {
14647 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14648 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14649 + OP_ALG_AAI_HMAC_PRECOMP,
14650 + },
14651 + },
14652 + {
14653 + .aead = {
14654 + .base = {
14655 + .cra_name = "echainiv(authenc(hmac(sha384),"
14656 + "cbc(des3_ede)))",
14657 + .cra_driver_name = "echainiv-authenc-"
14658 + "hmac-sha384-"
14659 + "cbc-des3_ede-caam-qi",
14660 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14661 + },
14662 + .setkey = aead_setkey,
14663 + .setauthsize = aead_setauthsize,
14664 + .encrypt = aead_encrypt,
14665 + .decrypt = aead_decrypt,
14666 + .ivsize = DES3_EDE_BLOCK_SIZE,
14667 + .maxauthsize = SHA384_DIGEST_SIZE,
14668 + },
14669 + .caam = {
14670 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14671 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14672 + OP_ALG_AAI_HMAC_PRECOMP,
14673 + .geniv = true,
14674 + }
14675 + },
14676 + {
14677 + .aead = {
14678 + .base = {
14679 + .cra_name = "authenc(hmac(sha512),"
14680 + "cbc(des3_ede))",
14681 + .cra_driver_name = "authenc-hmac-sha512-"
14682 + "cbc-des3_ede-caam-qi",
14683 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14684 + },
14685 + .setkey = aead_setkey,
14686 + .setauthsize = aead_setauthsize,
14687 + .encrypt = aead_encrypt,
14688 + .decrypt = aead_decrypt,
14689 + .ivsize = DES3_EDE_BLOCK_SIZE,
14690 + .maxauthsize = SHA512_DIGEST_SIZE,
14691 + },
14692 + .caam = {
14693 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14694 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14695 + OP_ALG_AAI_HMAC_PRECOMP,
14696 + },
14697 + },
14698 + {
14699 + .aead = {
14700 + .base = {
14701 + .cra_name = "echainiv(authenc(hmac(sha512),"
14702 + "cbc(des3_ede)))",
14703 + .cra_driver_name = "echainiv-authenc-"
14704 + "hmac-sha512-"
14705 + "cbc-des3_ede-caam-qi",
14706 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
14707 + },
14708 + .setkey = aead_setkey,
14709 + .setauthsize = aead_setauthsize,
14710 + .encrypt = aead_encrypt,
14711 + .decrypt = aead_decrypt,
14712 + .ivsize = DES3_EDE_BLOCK_SIZE,
14713 + .maxauthsize = SHA512_DIGEST_SIZE,
14714 + },
14715 + .caam = {
14716 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
14717 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14718 + OP_ALG_AAI_HMAC_PRECOMP,
14719 + .geniv = true,
14720 + }
14721 + },
14722 + {
14723 + .aead = {
14724 + .base = {
14725 + .cra_name = "authenc(hmac(md5),cbc(des))",
14726 + .cra_driver_name = "authenc-hmac-md5-"
14727 + "cbc-des-caam-qi",
14728 + .cra_blocksize = DES_BLOCK_SIZE,
14729 + },
14730 + .setkey = aead_setkey,
14731 + .setauthsize = aead_setauthsize,
14732 + .encrypt = aead_encrypt,
14733 + .decrypt = aead_decrypt,
14734 + .ivsize = DES_BLOCK_SIZE,
14735 + .maxauthsize = MD5_DIGEST_SIZE,
14736 + },
14737 + .caam = {
14738 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14739 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14740 + OP_ALG_AAI_HMAC_PRECOMP,
14741 + },
14742 + },
14743 + {
14744 + .aead = {
14745 + .base = {
14746 + .cra_name = "echainiv(authenc(hmac(md5),"
14747 + "cbc(des)))",
14748 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
14749 + "cbc-des-caam-qi",
14750 + .cra_blocksize = DES_BLOCK_SIZE,
14751 + },
14752 + .setkey = aead_setkey,
14753 + .setauthsize = aead_setauthsize,
14754 + .encrypt = aead_encrypt,
14755 + .decrypt = aead_decrypt,
14756 + .ivsize = DES_BLOCK_SIZE,
14757 + .maxauthsize = MD5_DIGEST_SIZE,
14758 + },
14759 + .caam = {
14760 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14761 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
14762 + OP_ALG_AAI_HMAC_PRECOMP,
14763 + .geniv = true,
14764 + }
14765 + },
14766 + {
14767 + .aead = {
14768 + .base = {
14769 + .cra_name = "authenc(hmac(sha1),cbc(des))",
14770 + .cra_driver_name = "authenc-hmac-sha1-"
14771 + "cbc-des-caam-qi",
14772 + .cra_blocksize = DES_BLOCK_SIZE,
14773 + },
14774 + .setkey = aead_setkey,
14775 + .setauthsize = aead_setauthsize,
14776 + .encrypt = aead_encrypt,
14777 + .decrypt = aead_decrypt,
14778 + .ivsize = DES_BLOCK_SIZE,
14779 + .maxauthsize = SHA1_DIGEST_SIZE,
14780 + },
14781 + .caam = {
14782 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14783 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14784 + OP_ALG_AAI_HMAC_PRECOMP,
14785 + },
14786 + },
14787 + {
14788 + .aead = {
14789 + .base = {
14790 + .cra_name = "echainiv(authenc(hmac(sha1),"
14791 + "cbc(des)))",
14792 + .cra_driver_name = "echainiv-authenc-"
14793 + "hmac-sha1-cbc-des-caam-qi",
14794 + .cra_blocksize = DES_BLOCK_SIZE,
14795 + },
14796 + .setkey = aead_setkey,
14797 + .setauthsize = aead_setauthsize,
14798 + .encrypt = aead_encrypt,
14799 + .decrypt = aead_decrypt,
14800 + .ivsize = DES_BLOCK_SIZE,
14801 + .maxauthsize = SHA1_DIGEST_SIZE,
14802 + },
14803 + .caam = {
14804 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14805 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
14806 + OP_ALG_AAI_HMAC_PRECOMP,
14807 + .geniv = true,
14808 + }
14809 + },
14810 + {
14811 + .aead = {
14812 + .base = {
14813 + .cra_name = "authenc(hmac(sha224),cbc(des))",
14814 + .cra_driver_name = "authenc-hmac-sha224-"
14815 + "cbc-des-caam-qi",
14816 + .cra_blocksize = DES_BLOCK_SIZE,
14817 + },
14818 + .setkey = aead_setkey,
14819 + .setauthsize = aead_setauthsize,
14820 + .encrypt = aead_encrypt,
14821 + .decrypt = aead_decrypt,
14822 + .ivsize = DES_BLOCK_SIZE,
14823 + .maxauthsize = SHA224_DIGEST_SIZE,
14824 + },
14825 + .caam = {
14826 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14827 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14828 + OP_ALG_AAI_HMAC_PRECOMP,
14829 + },
14830 + },
14831 + {
14832 + .aead = {
14833 + .base = {
14834 + .cra_name = "echainiv(authenc(hmac(sha224),"
14835 + "cbc(des)))",
14836 + .cra_driver_name = "echainiv-authenc-"
14837 + "hmac-sha224-cbc-des-"
14838 + "caam-qi",
14839 + .cra_blocksize = DES_BLOCK_SIZE,
14840 + },
14841 + .setkey = aead_setkey,
14842 + .setauthsize = aead_setauthsize,
14843 + .encrypt = aead_encrypt,
14844 + .decrypt = aead_decrypt,
14845 + .ivsize = DES_BLOCK_SIZE,
14846 + .maxauthsize = SHA224_DIGEST_SIZE,
14847 + },
14848 + .caam = {
14849 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14850 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
14851 + OP_ALG_AAI_HMAC_PRECOMP,
14852 + .geniv = true,
14853 + }
14854 + },
14855 + {
14856 + .aead = {
14857 + .base = {
14858 + .cra_name = "authenc(hmac(sha256),cbc(des))",
14859 + .cra_driver_name = "authenc-hmac-sha256-"
14860 + "cbc-des-caam-qi",
14861 + .cra_blocksize = DES_BLOCK_SIZE,
14862 + },
14863 + .setkey = aead_setkey,
14864 + .setauthsize = aead_setauthsize,
14865 + .encrypt = aead_encrypt,
14866 + .decrypt = aead_decrypt,
14867 + .ivsize = DES_BLOCK_SIZE,
14868 + .maxauthsize = SHA256_DIGEST_SIZE,
14869 + },
14870 + .caam = {
14871 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14872 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14873 + OP_ALG_AAI_HMAC_PRECOMP,
14874 + },
14875 + },
14876 + {
14877 + .aead = {
14878 + .base = {
14879 + .cra_name = "echainiv(authenc(hmac(sha256),"
14880 + "cbc(des)))",
14881 + .cra_driver_name = "echainiv-authenc-"
14882 + "hmac-sha256-cbc-des-"
14883 + "caam-qi",
14884 + .cra_blocksize = DES_BLOCK_SIZE,
14885 + },
14886 + .setkey = aead_setkey,
14887 + .setauthsize = aead_setauthsize,
14888 + .encrypt = aead_encrypt,
14889 + .decrypt = aead_decrypt,
14890 + .ivsize = DES_BLOCK_SIZE,
14891 + .maxauthsize = SHA256_DIGEST_SIZE,
14892 + },
14893 + .caam = {
14894 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14895 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
14896 + OP_ALG_AAI_HMAC_PRECOMP,
14897 + .geniv = true,
14898 + },
14899 + },
14900 + {
14901 + .aead = {
14902 + .base = {
14903 + .cra_name = "authenc(hmac(sha384),cbc(des))",
14904 + .cra_driver_name = "authenc-hmac-sha384-"
14905 + "cbc-des-caam-qi",
14906 + .cra_blocksize = DES_BLOCK_SIZE,
14907 + },
14908 + .setkey = aead_setkey,
14909 + .setauthsize = aead_setauthsize,
14910 + .encrypt = aead_encrypt,
14911 + .decrypt = aead_decrypt,
14912 + .ivsize = DES_BLOCK_SIZE,
14913 + .maxauthsize = SHA384_DIGEST_SIZE,
14914 + },
14915 + .caam = {
14916 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14917 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14918 + OP_ALG_AAI_HMAC_PRECOMP,
14919 + },
14920 + },
14921 + {
14922 + .aead = {
14923 + .base = {
14924 + .cra_name = "echainiv(authenc(hmac(sha384),"
14925 + "cbc(des)))",
14926 + .cra_driver_name = "echainiv-authenc-"
14927 + "hmac-sha384-cbc-des-"
14928 + "caam-qi",
14929 + .cra_blocksize = DES_BLOCK_SIZE,
14930 + },
14931 + .setkey = aead_setkey,
14932 + .setauthsize = aead_setauthsize,
14933 + .encrypt = aead_encrypt,
14934 + .decrypt = aead_decrypt,
14935 + .ivsize = DES_BLOCK_SIZE,
14936 + .maxauthsize = SHA384_DIGEST_SIZE,
14937 + },
14938 + .caam = {
14939 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14940 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
14941 + OP_ALG_AAI_HMAC_PRECOMP,
14942 + .geniv = true,
14943 + }
14944 + },
14945 + {
14946 + .aead = {
14947 + .base = {
14948 + .cra_name = "authenc(hmac(sha512),cbc(des))",
14949 + .cra_driver_name = "authenc-hmac-sha512-"
14950 + "cbc-des-caam-qi",
14951 + .cra_blocksize = DES_BLOCK_SIZE,
14952 + },
14953 + .setkey = aead_setkey,
14954 + .setauthsize = aead_setauthsize,
14955 + .encrypt = aead_encrypt,
14956 + .decrypt = aead_decrypt,
14957 + .ivsize = DES_BLOCK_SIZE,
14958 + .maxauthsize = SHA512_DIGEST_SIZE,
14959 + },
14960 + .caam = {
14961 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14962 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14963 + OP_ALG_AAI_HMAC_PRECOMP,
14964 + }
14965 + },
14966 + {
14967 + .aead = {
14968 + .base = {
14969 + .cra_name = "echainiv(authenc(hmac(sha512),"
14970 + "cbc(des)))",
14971 + .cra_driver_name = "echainiv-authenc-"
14972 + "hmac-sha512-cbc-des-"
14973 + "caam-qi",
14974 + .cra_blocksize = DES_BLOCK_SIZE,
14975 + },
14976 + .setkey = aead_setkey,
14977 + .setauthsize = aead_setauthsize,
14978 + .encrypt = aead_encrypt,
14979 + .decrypt = aead_decrypt,
14980 + .ivsize = DES_BLOCK_SIZE,
14981 + .maxauthsize = SHA512_DIGEST_SIZE,
14982 + },
14983 + .caam = {
14984 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
14985 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
14986 + OP_ALG_AAI_HMAC_PRECOMP,
14987 + .geniv = true,
14988 + }
14989 + },
14990 + {
14991 + .aead = {
14992 + .base = {
14993 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
14994 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
14995 + .cra_blocksize = AES_BLOCK_SIZE,
14996 + },
14997 + .setkey = tls_setkey,
14998 + .setauthsize = tls_setauthsize,
14999 + .encrypt = tls_encrypt,
15000 + .decrypt = tls_decrypt,
15001 + .ivsize = AES_BLOCK_SIZE,
15002 + .maxauthsize = SHA1_DIGEST_SIZE,
15003 + },
15004 + .caam = {
15005 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
15006 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
15007 + OP_ALG_AAI_HMAC_PRECOMP,
15008 + }
15009 + }
15010 +};
15011 +
15012 +struct caam_crypto_alg {
15013 + struct list_head entry;
15014 + struct crypto_alg crypto_alg;
15015 + struct caam_alg_entry caam;
15016 +};
15017 +
15018 +static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
15019 +{
15020 + struct caam_drv_private *priv;
15021 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
15022 + static const u8 digest_size[] = {
15023 + MD5_DIGEST_SIZE,
15024 + SHA1_DIGEST_SIZE,
15025 + SHA224_DIGEST_SIZE,
15026 + SHA256_DIGEST_SIZE,
15027 + SHA384_DIGEST_SIZE,
15028 + SHA512_DIGEST_SIZE
15029 + };
15030 + u8 op_id;
15031 +
15032 + /*
15033 + * distribute tfms across job rings to ensure in-order
15034 + * crypto request processing per tfm
15035 + */
15036 + ctx->jrdev = caam_jr_alloc();
15037 + if (IS_ERR(ctx->jrdev)) {
15038 + pr_err("Job Ring Device allocation for transform failed\n");
15039 + return PTR_ERR(ctx->jrdev);
15040 + }
15041 +
15042 + ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
15043 + DMA_TO_DEVICE);
15044 + if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
15045 + dev_err(ctx->jrdev, "unable to map key\n");
15046 + caam_jr_free(ctx->jrdev);
15047 + return -ENOMEM;
15048 + }
15049 +
15050 + /* copy descriptor header template value */
15051 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
15052 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
15053 +
15054 + if (ctx->adata.algtype) {
15055 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
15056 + >> OP_ALG_ALGSEL_SHIFT;
15057 + if (op_id < ARRAY_SIZE(digest_size)) {
15058 + ctx->authsize = digest_size[op_id];
15059 + } else {
15060 + dev_err(ctx->jrdev,
15061 + "incorrect op_id %d; must be less than %zu\n",
15062 + op_id, ARRAY_SIZE(digest_size));
15063 + caam_jr_free(ctx->jrdev);
15064 + return -EINVAL;
15065 + }
15066 + } else {
15067 + ctx->authsize = 0;
15068 + }
15069 +
15070 + priv = dev_get_drvdata(ctx->jrdev->parent);
15071 + ctx->qidev = priv->qidev;
15072 +
15073 + spin_lock_init(&ctx->lock);
15074 + ctx->drv_ctx[ENCRYPT] = NULL;
15075 + ctx->drv_ctx[DECRYPT] = NULL;
15076 + ctx->drv_ctx[GIVENCRYPT] = NULL;
15077 +
15078 + return 0;
15079 +}
15080 +
15081 +static int caam_cra_init(struct crypto_tfm *tfm)
15082 +{
15083 + struct crypto_alg *alg = tfm->__crt_alg;
15084 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
15085 + crypto_alg);
15086 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
15087 +
15088 + return caam_init_common(ctx, &caam_alg->caam);
15089 +}
15090 +
15091 +static int caam_aead_init(struct crypto_aead *tfm)
15092 +{
15093 + struct aead_alg *alg = crypto_aead_alg(tfm);
15094 + struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
15095 + aead);
15096 + struct caam_ctx *ctx = crypto_aead_ctx(tfm);
15097 +
15098 + return caam_init_common(ctx, &caam_alg->caam);
15099 +}
15100 +
15101 +static void caam_exit_common(struct caam_ctx *ctx)
15102 +{
15103 + caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
15104 + caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
15105 + caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
15106 +
15107 + dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
15108 + DMA_TO_DEVICE);
15109 +
15110 + caam_jr_free(ctx->jrdev);
15111 +}
15112 +
15113 +static void caam_cra_exit(struct crypto_tfm *tfm)
15114 +{
15115 + caam_exit_common(crypto_tfm_ctx(tfm));
15116 +}
15117 +
15118 +static void caam_aead_exit(struct crypto_aead *tfm)
15119 +{
15120 + caam_exit_common(crypto_aead_ctx(tfm));
15121 +}
15122 +
15123 +static struct list_head alg_list;
15124 +static void __exit caam_qi_algapi_exit(void)
15125 +{
15126 + struct caam_crypto_alg *t_alg, *n;
15127 + int i;
15128 +
15129 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15130 + struct caam_aead_alg *t_alg = driver_aeads + i;
15131 +
15132 + if (t_alg->registered)
15133 + crypto_unregister_aead(&t_alg->aead);
15134 + }
15135 +
15136 + if (!alg_list.next)
15137 + return;
15138 +
15139 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
15140 + crypto_unregister_alg(&t_alg->crypto_alg);
15141 + list_del(&t_alg->entry);
15142 + kfree(t_alg);
15143 + }
15144 +}
15145 +
15146 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
15147 + *template)
15148 +{
15149 + struct caam_crypto_alg *t_alg;
15150 + struct crypto_alg *alg;
15151 +
15152 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
15153 + if (!t_alg)
15154 + return ERR_PTR(-ENOMEM);
15155 +
15156 + alg = &t_alg->crypto_alg;
15157 +
15158 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
15159 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
15160 + template->driver_name);
15161 + alg->cra_module = THIS_MODULE;
15162 + alg->cra_init = caam_cra_init;
15163 + alg->cra_exit = caam_cra_exit;
15164 + alg->cra_priority = CAAM_CRA_PRIORITY;
15165 + alg->cra_blocksize = template->blocksize;
15166 + alg->cra_alignmask = 0;
15167 + alg->cra_ctxsize = sizeof(struct caam_ctx);
15168 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
15169 + template->type;
15170 + switch (template->type) {
15171 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15172 + alg->cra_type = &crypto_givcipher_type;
15173 + alg->cra_ablkcipher = template->template_ablkcipher;
15174 + break;
15175 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15176 + alg->cra_type = &crypto_ablkcipher_type;
15177 + alg->cra_ablkcipher = template->template_ablkcipher;
15178 + break;
15179 + }
15180 +
15181 + t_alg->caam.class1_alg_type = template->class1_alg_type;
15182 + t_alg->caam.class2_alg_type = template->class2_alg_type;
15183 +
15184 + return t_alg;
15185 +}
15186 +
15187 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
15188 +{
15189 + struct aead_alg *alg = &t_alg->aead;
15190 +
15191 + alg->base.cra_module = THIS_MODULE;
15192 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
15193 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
15194 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
15195 +
15196 + alg->init = caam_aead_init;
15197 + alg->exit = caam_aead_exit;
15198 +}
15199 +
15200 +static int __init caam_qi_algapi_init(void)
15201 +{
15202 + struct device_node *dev_node;
15203 + struct platform_device *pdev;
15204 + struct device *ctrldev;
15205 + struct caam_drv_private *priv;
15206 + int i = 0, err = 0;
15207 + u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
15208 + unsigned int md_limit = SHA512_DIGEST_SIZE;
15209 + bool registered = false;
15210 +
15211 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
15212 + if (!dev_node) {
15213 + dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
15214 + if (!dev_node)
15215 + return -ENODEV;
15216 + }
15217 +
15218 + pdev = of_find_device_by_node(dev_node);
15219 + of_node_put(dev_node);
15220 + if (!pdev)
15221 + return -ENODEV;
15222 +
15223 + ctrldev = &pdev->dev;
15224 + priv = dev_get_drvdata(ctrldev);
15225 +
15226 + /*
15227 + * If priv is NULL, it's probably because the caam driver wasn't
15228 + * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
15229 + */
15230 + if (!priv || !priv->qi_present)
15231 + return -ENODEV;
15232 +
15233 + if (caam_dpaa2) {
15234 + dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
15235 + return -ENODEV;
15236 + }
15237 +
15238 + INIT_LIST_HEAD(&alg_list);
15239 +
15240 + /*
15241 + * Register crypto algorithms the device supports.
15242 + * First, detect presence and attributes of DES, AES, and MD blocks.
15243 + */
15244 + cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
15245 + cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
15246 + des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
15247 + aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
15248 + md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
15249 +
15250 + /* If MD is present, limit digest size based on LP256 */
15251 + if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
15252 + md_limit = SHA256_DIGEST_SIZE;
15253 +
15254 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
15255 + struct caam_crypto_alg *t_alg;
15256 + struct caam_alg_template *alg = driver_algs + i;
15257 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
15258 +
15259 + /* Skip DES algorithms if not supported by device */
15260 + if (!des_inst &&
15261 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
15262 + (alg_sel == OP_ALG_ALGSEL_DES)))
15263 + continue;
15264 +
15265 + /* Skip AES algorithms if not supported by device */
15266 + if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
15267 + continue;
15268 +
15269 + t_alg = caam_alg_alloc(alg);
15270 + if (IS_ERR(t_alg)) {
15271 + err = PTR_ERR(t_alg);
15272 + dev_warn(priv->qidev, "%s alg allocation failed\n",
15273 + alg->driver_name);
15274 + continue;
15275 + }
15276 +
15277 + err = crypto_register_alg(&t_alg->crypto_alg);
15278 + if (err) {
15279 + dev_warn(priv->qidev, "%s alg registration failed\n",
15280 + t_alg->crypto_alg.cra_driver_name);
15281 + kfree(t_alg);
15282 + continue;
15283 + }
15284 +
15285 + list_add_tail(&t_alg->entry, &alg_list);
15286 + registered = true;
15287 + }
15288 +
15289 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
15290 + struct caam_aead_alg *t_alg = driver_aeads + i;
15291 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
15292 + OP_ALG_ALGSEL_MASK;
15293 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
15294 + OP_ALG_ALGSEL_MASK;
15295 + u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
15296 +
15297 + /* Skip DES algorithms if not supported by device */
15298 + if (!des_inst &&
15299 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
15300 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
15301 + continue;
15302 +
15303 + /* Skip AES algorithms if not supported by device */
15304 + if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
15305 + continue;
15306 +
15307 + /*
15308 + * Check support for AES algorithms not available
15309 + * on LP devices.
15310 + */
15311 + if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
15312 + (alg_aai == OP_ALG_AAI_GCM))
15313 + continue;
15314 +
15315 + /*
15316 + * Skip algorithms requiring message digests
15317 + * if MD or MD size is not supported by device.
15318 + */
15319 + if (c2_alg_sel &&
15320 + (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
15321 + continue;
15322 +
15323 + caam_aead_alg_init(t_alg);
15324 +
15325 + err = crypto_register_aead(&t_alg->aead);
15326 + if (err) {
15327 + pr_warn("%s alg registration failed\n",
15328 + t_alg->aead.base.cra_driver_name);
15329 + continue;
15330 + }
15331 +
15332 + t_alg->registered = true;
15333 + registered = true;
15334 + }
15335 +
15336 + if (registered)
15337 + dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
15338 +
15339 + return err;
15340 +}
15341 +
15342 +module_init(caam_qi_algapi_init);
15343 +module_exit(caam_qi_algapi_exit);
15344 +
15345 +MODULE_LICENSE("GPL");
15346 +MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
15347 +MODULE_AUTHOR("Freescale Semiconductor");
15348 --- /dev/null
15349 +++ b/drivers/crypto/caam/caamalg_qi2.c
15350 @@ -0,0 +1,5920 @@
15351 +/*
15352 + * Copyright 2015-2016 Freescale Semiconductor Inc.
15353 + * Copyright 2017 NXP
15354 + *
15355 + * Redistribution and use in source and binary forms, with or without
15356 + * modification, are permitted provided that the following conditions are met:
15357 + * * Redistributions of source code must retain the above copyright
15358 + * notice, this list of conditions and the following disclaimer.
15359 + * * Redistributions in binary form must reproduce the above copyright
15360 + * notice, this list of conditions and the following disclaimer in the
15361 + * documentation and/or other materials provided with the distribution.
15362 + * * Neither the names of the above-listed copyright holders nor the
15363 + * names of any contributors may be used to endorse or promote products
15364 + * derived from this software without specific prior written permission.
15365 + *
15366 + *
15367 + * ALTERNATIVELY, this software may be distributed under the terms of the
15368 + * GNU General Public License ("GPL") as published by the Free Software
15369 + * Foundation, either version 2 of that License or (at your option) any
15370 + * later version.
15371 + *
15372 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15373 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15374 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
15375 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
15376 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15377 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15378 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
15379 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
15380 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
15381 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
15382 + * POSSIBILITY OF SUCH DAMAGE.
15383 + */
15384 +
15385 +#include "compat.h"
15386 +#include "regs.h"
15387 +#include "caamalg_qi2.h"
15388 +#include "dpseci_cmd.h"
15389 +#include "desc_constr.h"
15390 +#include "error.h"
15391 +#include "sg_sw_sec4.h"
15392 +#include "sg_sw_qm2.h"
15393 +#include "key_gen.h"
15394 +#include "caamalg_desc.h"
15395 +#include "caamhash_desc.h"
15396 +#include "../../../drivers/staging/fsl-mc/include/mc.h"
15397 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
15398 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
15399 +
15400 +#define CAAM_CRA_PRIORITY 2000
15401 +
15402 +/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
15403 +#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
15404 + SHA512_DIGEST_SIZE * 2)
15405 +
15406 +#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
15407 +bool caam_little_end;
15408 +EXPORT_SYMBOL(caam_little_end);
15409 +bool caam_imx;
15410 +EXPORT_SYMBOL(caam_imx);
15411 +#endif
15412 +
15413 +/*
15414 + * This is a a cache of buffers, from which the users of CAAM QI driver
15415 + * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
15416 + * NOTE: A more elegant solution would be to have some headroom in the frames
15417 + * being processed. This can be added by the dpaa2-eth driver. This would
15418 + * pose a problem for userspace application processing which cannot
15419 + * know of this limitation. So for now, this will work.
15420 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
15421 + */
15422 +static struct kmem_cache *qi_cache;
15423 +
15424 +struct caam_alg_entry {
15425 + struct device *dev;
15426 + int class1_alg_type;
15427 + int class2_alg_type;
15428 + bool rfc3686;
15429 + bool geniv;
15430 +};
15431 +
15432 +struct caam_aead_alg {
15433 + struct aead_alg aead;
15434 + struct caam_alg_entry caam;
15435 + bool registered;
15436 +};
15437 +
15438 +/**
15439 + * caam_ctx - per-session context
15440 + * @flc: Flow Contexts array
15441 + * @key: virtual address of the key(s): [authentication key], encryption key
15442 + * @flc_dma: I/O virtual addresses of the Flow Contexts
15443 + * @key_dma: I/O virtual address of the key
15444 + * @dev: dpseci device
15445 + * @adata: authentication algorithm details
15446 + * @cdata: encryption algorithm details
15447 + * @authsize: authentication tag (a.k.a. ICV / MAC) size
15448 + */
15449 +struct caam_ctx {
15450 + struct caam_flc flc[NUM_OP];
15451 + u8 key[CAAM_MAX_KEY_SIZE];
15452 + dma_addr_t flc_dma[NUM_OP];
15453 + dma_addr_t key_dma;
15454 + struct device *dev;
15455 + struct alginfo adata;
15456 + struct alginfo cdata;
15457 + unsigned int authsize;
15458 +};
15459 +
15460 +void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
15461 + dma_addr_t iova_addr)
15462 +{
15463 + phys_addr_t phys_addr;
15464 +
15465 + phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
15466 + iova_addr;
15467 +
15468 + return phys_to_virt(phys_addr);
15469 +}
15470 +
15471 +/*
15472 + * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
15473 + *
15474 + * Allocate data on the hotpath. Instead of using kzalloc, one can use the
15475 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
15476 + * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
15477 + * hosting 16 SG entries.
15478 + *
15479 + * @flags - flags that would be used for the equivalent kmalloc(..) call
15480 + *
15481 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
15482 + */
15483 +static inline void *qi_cache_zalloc(gfp_t flags)
15484 +{
15485 + return kmem_cache_zalloc(qi_cache, flags);
15486 +}
15487 +
15488 +/*
15489 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
15490 + *
15491 + * @obj - buffer previously allocated by qi_cache_zalloc
15492 + *
15493 + * No checking is being done, the call is a passthrough call to
15494 + * kmem_cache_free(...)
15495 + */
15496 +static inline void qi_cache_free(void *obj)
15497 +{
15498 + kmem_cache_free(qi_cache, obj);
15499 +}
15500 +
15501 +static struct caam_request *to_caam_req(struct crypto_async_request *areq)
15502 +{
15503 + switch (crypto_tfm_alg_type(areq->tfm)) {
15504 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
15505 + case CRYPTO_ALG_TYPE_GIVCIPHER:
15506 + return ablkcipher_request_ctx(ablkcipher_request_cast(areq));
15507 + case CRYPTO_ALG_TYPE_AEAD:
15508 + return aead_request_ctx(container_of(areq, struct aead_request,
15509 + base));
15510 + case CRYPTO_ALG_TYPE_AHASH:
15511 + return ahash_request_ctx(ahash_request_cast(areq));
15512 + default:
15513 + return ERR_PTR(-EINVAL);
15514 + }
15515 +}
15516 +
15517 +static void caam_unmap(struct device *dev, struct scatterlist *src,
15518 + struct scatterlist *dst, int src_nents,
15519 + int dst_nents, dma_addr_t iv_dma, int ivsize,
15520 + enum optype op_type, dma_addr_t qm_sg_dma,
15521 + int qm_sg_bytes)
15522 +{
15523 + if (dst != src) {
15524 + if (src_nents)
15525 + dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
15526 + dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
15527 + } else {
15528 + dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
15529 + }
15530 +
15531 + if (iv_dma)
15532 + dma_unmap_single(dev, iv_dma, ivsize,
15533 + op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
15534 + DMA_TO_DEVICE);
15535 +
15536 + if (qm_sg_bytes)
15537 + dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
15538 +}
15539 +
15540 +static int aead_set_sh_desc(struct crypto_aead *aead)
15541 +{
15542 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15543 + typeof(*alg), aead);
15544 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15545 + unsigned int ivsize = crypto_aead_ivsize(aead);
15546 + struct device *dev = ctx->dev;
15547 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
15548 + struct caam_flc *flc;
15549 + u32 *desc;
15550 + u32 ctx1_iv_off = 0;
15551 + u32 *nonce = NULL;
15552 + unsigned int data_len[2];
15553 + u32 inl_mask;
15554 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
15555 + OP_ALG_AAI_CTR_MOD128);
15556 + const bool is_rfc3686 = alg->caam.rfc3686;
15557 +
15558 + if (!ctx->cdata.keylen || !ctx->authsize)
15559 + return 0;
15560 +
15561 + /*
15562 + * AES-CTR needs to load IV in CONTEXT1 reg
15563 + * at an offset of 128bits (16bytes)
15564 + * CONTEXT1[255:128] = IV
15565 + */
15566 + if (ctr_mode)
15567 + ctx1_iv_off = 16;
15568 +
15569 + /*
15570 + * RFC3686 specific:
15571 + * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
15572 + */
15573 + if (is_rfc3686) {
15574 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
15575 + nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
15576 + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
15577 + }
15578 +
15579 + data_len[0] = ctx->adata.keylen_pad;
15580 + data_len[1] = ctx->cdata.keylen;
15581 +
15582 + /* aead_encrypt shared descriptor */
15583 + if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
15584 + DESC_QI_AEAD_ENC_LEN) +
15585 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15586 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15587 + ARRAY_SIZE(data_len)) < 0)
15588 + return -EINVAL;
15589 +
15590 + if (inl_mask & 1)
15591 + ctx->adata.key_virt = ctx->key;
15592 + else
15593 + ctx->adata.key_dma = ctx->key_dma;
15594 +
15595 + if (inl_mask & 2)
15596 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15597 + else
15598 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15599 +
15600 + ctx->adata.key_inline = !!(inl_mask & 1);
15601 + ctx->cdata.key_inline = !!(inl_mask & 2);
15602 +
15603 + flc = &ctx->flc[ENCRYPT];
15604 + desc = flc->sh_desc;
15605 +
15606 + if (alg->caam.geniv)
15607 + cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
15608 + ivsize, ctx->authsize, is_rfc3686,
15609 + nonce, ctx1_iv_off, true,
15610 + priv->sec_attr.era);
15611 + else
15612 + cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
15613 + ivsize, ctx->authsize, is_rfc3686, nonce,
15614 + ctx1_iv_off, true, priv->sec_attr.era);
15615 +
15616 + flc->flc[1] = desc_len(desc); /* SDL */
15617 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
15618 + sizeof(flc->flc) + desc_bytes(desc),
15619 + DMA_BIDIRECTIONAL);
15620 +
15621 + /* aead_decrypt shared descriptor */
15622 + if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
15623 + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
15624 + DESC_JOB_IO_LEN, data_len, &inl_mask,
15625 + ARRAY_SIZE(data_len)) < 0)
15626 + return -EINVAL;
15627 +
15628 + if (inl_mask & 1)
15629 + ctx->adata.key_virt = ctx->key;
15630 + else
15631 + ctx->adata.key_dma = ctx->key_dma;
15632 +
15633 + if (inl_mask & 2)
15634 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
15635 + else
15636 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
15637 +
15638 + ctx->adata.key_inline = !!(inl_mask & 1);
15639 + ctx->cdata.key_inline = !!(inl_mask & 2);
15640 +
15641 + flc = &ctx->flc[DECRYPT];
15642 + desc = flc->sh_desc;
15643 + cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
15644 + ivsize, ctx->authsize, alg->caam.geniv,
15645 + is_rfc3686, nonce, ctx1_iv_off, true,
15646 + priv->sec_attr.era);
15647 + flc->flc[1] = desc_len(desc); /* SDL */
15648 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
15649 + sizeof(flc->flc) + desc_bytes(desc),
15650 + DMA_BIDIRECTIONAL);
15651 +
15652 + return 0;
15653 +}
15654 +
15655 +static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
15656 +{
15657 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
15658 +
15659 + ctx->authsize = authsize;
15660 + aead_set_sh_desc(authenc);
15661 +
15662 + return 0;
15663 +}
15664 +
15665 +struct split_key_sh_result {
15666 + struct completion completion;
15667 + int err;
15668 + struct device *dev;
15669 +};
15670 +
15671 +static void split_key_sh_done(void *cbk_ctx, u32 err)
15672 +{
15673 + struct split_key_sh_result *res = cbk_ctx;
15674 +
15675 +#ifdef DEBUG
15676 + dev_err(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
15677 +#endif
15678 +
15679 + if (err)
15680 + caam_qi2_strstatus(res->dev, err);
15681 +
15682 + res->err = err;
15683 + complete(&res->completion);
15684 +}
15685 +
15686 +static int aead_setkey(struct crypto_aead *aead, const u8 *key,
15687 + unsigned int keylen)
15688 +{
15689 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15690 + struct device *dev = ctx->dev;
15691 + struct crypto_authenc_keys keys;
15692 +
15693 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
15694 + goto badkey;
15695 +
15696 +#ifdef DEBUG
15697 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
15698 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
15699 + keys.authkeylen);
15700 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
15701 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
15702 +#endif
15703 +
15704 + ctx->adata.keylen = keys.authkeylen;
15705 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
15706 + OP_ALG_ALGSEL_MASK);
15707 +
15708 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
15709 + goto badkey;
15710 +
15711 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
15712 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
15713 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
15714 + keys.enckeylen, DMA_BIDIRECTIONAL);
15715 +#ifdef DEBUG
15716 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
15717 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
15718 + ctx->adata.keylen_pad + keys.enckeylen, 1);
15719 +#endif
15720 +
15721 + ctx->cdata.keylen = keys.enckeylen;
15722 +
15723 + return aead_set_sh_desc(aead);
15724 +badkey:
15725 + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
15726 + return -EINVAL;
15727 +}
15728 +
15729 +static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
15730 + bool encrypt)
15731 +{
15732 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
15733 + struct caam_request *req_ctx = aead_request_ctx(req);
15734 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15735 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15736 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
15737 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
15738 + typeof(*alg), aead);
15739 + struct device *dev = ctx->dev;
15740 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
15741 + GFP_KERNEL : GFP_ATOMIC;
15742 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15743 + struct aead_edesc *edesc;
15744 + dma_addr_t qm_sg_dma, iv_dma = 0;
15745 + int ivsize = 0;
15746 + unsigned int authsize = ctx->authsize;
15747 + int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
15748 + int in_len, out_len;
15749 + struct dpaa2_sg_entry *sg_table;
15750 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15751 +
15752 + /* allocate space for base edesc and link tables */
15753 + edesc = qi_cache_zalloc(GFP_DMA | flags);
15754 + if (unlikely(!edesc)) {
15755 + dev_err(dev, "could not allocate extended descriptor\n");
15756 + return ERR_PTR(-ENOMEM);
15757 + }
15758 +
15759 + if (unlikely(req->dst != req->src)) {
15760 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15761 + req->cryptlen);
15762 + if (unlikely(src_nents < 0)) {
15763 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15764 + req->assoclen + req->cryptlen);
15765 + qi_cache_free(edesc);
15766 + return ERR_PTR(src_nents);
15767 + }
15768 +
15769 + dst_nents = sg_nents_for_len(req->dst, req->assoclen +
15770 + req->cryptlen +
15771 + (encrypt ? authsize :
15772 + (-authsize)));
15773 + if (unlikely(dst_nents < 0)) {
15774 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15775 + req->assoclen + req->cryptlen +
15776 + (encrypt ? authsize : (-authsize)));
15777 + qi_cache_free(edesc);
15778 + return ERR_PTR(dst_nents);
15779 + }
15780 +
15781 + if (src_nents) {
15782 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15783 + DMA_TO_DEVICE);
15784 + if (unlikely(!mapped_src_nents)) {
15785 + dev_err(dev, "unable to map source\n");
15786 + qi_cache_free(edesc);
15787 + return ERR_PTR(-ENOMEM);
15788 + }
15789 + } else {
15790 + mapped_src_nents = 0;
15791 + }
15792 +
15793 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
15794 + DMA_FROM_DEVICE);
15795 + if (unlikely(!mapped_dst_nents)) {
15796 + dev_err(dev, "unable to map destination\n");
15797 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
15798 + qi_cache_free(edesc);
15799 + return ERR_PTR(-ENOMEM);
15800 + }
15801 + } else {
15802 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15803 + req->cryptlen +
15804 + (encrypt ? authsize : 0));
15805 + if (unlikely(src_nents < 0)) {
15806 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15807 + req->assoclen + req->cryptlen +
15808 + (encrypt ? authsize : 0));
15809 + qi_cache_free(edesc);
15810 + return ERR_PTR(src_nents);
15811 + }
15812 +
15813 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15814 + DMA_BIDIRECTIONAL);
15815 + if (unlikely(!mapped_src_nents)) {
15816 + dev_err(dev, "unable to map source\n");
15817 + qi_cache_free(edesc);
15818 + return ERR_PTR(-ENOMEM);
15819 + }
15820 + }
15821 +
15822 + if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
15823 + ivsize = crypto_aead_ivsize(aead);
15824 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
15825 + if (dma_mapping_error(dev, iv_dma)) {
15826 + dev_err(dev, "unable to map IV\n");
15827 + caam_unmap(dev, req->src, req->dst, src_nents,
15828 + dst_nents, 0, 0, op_type, 0, 0);
15829 + qi_cache_free(edesc);
15830 + return ERR_PTR(-ENOMEM);
15831 + }
15832 + }
15833 +
15834 + /*
15835 + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
15836 + * Input is not contiguous.
15837 + */
15838 + qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
15839 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
15840 + if (unlikely(qm_sg_nents > CAAM_QI_MAX_AEAD_SG)) {
15841 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
15842 + qm_sg_nents, CAAM_QI_MAX_AEAD_SG);
15843 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15844 + iv_dma, ivsize, op_type, 0, 0);
15845 + qi_cache_free(edesc);
15846 + return ERR_PTR(-ENOMEM);
15847 + }
15848 + sg_table = &edesc->sgt[0];
15849 + qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
15850 +
15851 + edesc->src_nents = src_nents;
15852 + edesc->dst_nents = dst_nents;
15853 + edesc->iv_dma = iv_dma;
15854 +
15855 + edesc->assoclen_dma = dma_map_single(dev, &req->assoclen, 4,
15856 + DMA_TO_DEVICE);
15857 + if (dma_mapping_error(dev, edesc->assoclen_dma)) {
15858 + dev_err(dev, "unable to map assoclen\n");
15859 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15860 + iv_dma, ivsize, op_type, 0, 0);
15861 + qi_cache_free(edesc);
15862 + return ERR_PTR(-ENOMEM);
15863 + }
15864 +
15865 + dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
15866 + qm_sg_index++;
15867 + if (ivsize) {
15868 + dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
15869 + qm_sg_index++;
15870 + }
15871 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
15872 + qm_sg_index += mapped_src_nents;
15873 +
15874 + if (mapped_dst_nents > 1)
15875 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
15876 + qm_sg_index, 0);
15877 +
15878 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
15879 + if (dma_mapping_error(dev, qm_sg_dma)) {
15880 + dev_err(dev, "unable to map S/G table\n");
15881 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
15882 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
15883 + iv_dma, ivsize, op_type, 0, 0);
15884 + qi_cache_free(edesc);
15885 + return ERR_PTR(-ENOMEM);
15886 + }
15887 +
15888 + edesc->qm_sg_dma = qm_sg_dma;
15889 + edesc->qm_sg_bytes = qm_sg_bytes;
15890 +
15891 + out_len = req->assoclen + req->cryptlen +
15892 + (encrypt ? ctx->authsize : (-ctx->authsize));
15893 + in_len = 4 + ivsize + req->assoclen + req->cryptlen;
15894 +
15895 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
15896 + dpaa2_fl_set_final(in_fle, true);
15897 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
15898 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
15899 + dpaa2_fl_set_len(in_fle, in_len);
15900 +
15901 + if (req->dst == req->src) {
15902 + if (mapped_src_nents == 1) {
15903 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15904 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
15905 + } else {
15906 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15907 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
15908 + (1 + !!ivsize) * sizeof(*sg_table));
15909 + }
15910 + } else if (mapped_dst_nents == 1) {
15911 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
15912 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
15913 + } else {
15914 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
15915 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
15916 + sizeof(*sg_table));
15917 + }
15918 +
15919 + dpaa2_fl_set_len(out_fle, out_len);
15920 +
15921 + return edesc;
15922 +}
15923 +
15924 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
15925 + bool encrypt)
15926 +{
15927 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
15928 + unsigned int blocksize = crypto_aead_blocksize(tls);
15929 + unsigned int padsize, authsize;
15930 + struct caam_request *req_ctx = aead_request_ctx(req);
15931 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
15932 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
15933 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
15934 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(tls),
15935 + typeof(*alg), aead);
15936 + struct device *dev = ctx->dev;
15937 + gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
15938 + CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
15939 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
15940 + struct tls_edesc *edesc;
15941 + dma_addr_t qm_sg_dma, iv_dma = 0;
15942 + int ivsize = 0;
15943 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
15944 + int in_len, out_len;
15945 + struct dpaa2_sg_entry *sg_table;
15946 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
15947 + struct scatterlist *dst;
15948 +
15949 + if (encrypt) {
15950 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
15951 + blocksize);
15952 + authsize = ctx->authsize + padsize;
15953 + } else {
15954 + authsize = ctx->authsize;
15955 + }
15956 +
15957 + /* allocate space for base edesc and link tables */
15958 + edesc = qi_cache_zalloc(GFP_DMA | flags);
15959 + if (unlikely(!edesc)) {
15960 + dev_err(dev, "could not allocate extended descriptor\n");
15961 + return ERR_PTR(-ENOMEM);
15962 + }
15963 +
15964 + if (likely(req->src == req->dst)) {
15965 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15966 + req->cryptlen +
15967 + (encrypt ? authsize : 0));
15968 + if (unlikely(src_nents < 0)) {
15969 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15970 + req->assoclen + req->cryptlen +
15971 + (encrypt ? authsize : 0));
15972 + qi_cache_free(edesc);
15973 + return ERR_PTR(src_nents);
15974 + }
15975 +
15976 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
15977 + DMA_BIDIRECTIONAL);
15978 + if (unlikely(!mapped_src_nents)) {
15979 + dev_err(dev, "unable to map source\n");
15980 + qi_cache_free(edesc);
15981 + return ERR_PTR(-ENOMEM);
15982 + }
15983 + dst = req->dst;
15984 + } else {
15985 + src_nents = sg_nents_for_len(req->src, req->assoclen +
15986 + req->cryptlen);
15987 + if (unlikely(src_nents < 0)) {
15988 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
15989 + req->assoclen + req->cryptlen);
15990 + qi_cache_free(edesc);
15991 + return ERR_PTR(src_nents);
15992 + }
15993 +
15994 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
15995 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
15996 + (encrypt ? authsize : 0));
15997 + if (unlikely(dst_nents < 0)) {
15998 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
15999 + req->cryptlen +
16000 + (encrypt ? authsize : 0));
16001 + qi_cache_free(edesc);
16002 + return ERR_PTR(dst_nents);
16003 + }
16004 +
16005 + if (src_nents) {
16006 + mapped_src_nents = dma_map_sg(dev, req->src,
16007 + src_nents, DMA_TO_DEVICE);
16008 + if (unlikely(!mapped_src_nents)) {
16009 + dev_err(dev, "unable to map source\n");
16010 + qi_cache_free(edesc);
16011 + return ERR_PTR(-ENOMEM);
16012 + }
16013 + } else {
16014 + mapped_src_nents = 0;
16015 + }
16016 +
16017 + mapped_dst_nents = dma_map_sg(dev, dst, dst_nents,
16018 + DMA_FROM_DEVICE);
16019 + if (unlikely(!mapped_dst_nents)) {
16020 + dev_err(dev, "unable to map destination\n");
16021 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16022 + qi_cache_free(edesc);
16023 + return ERR_PTR(-ENOMEM);
16024 + }
16025 + }
16026 +
16027 + ivsize = crypto_aead_ivsize(tls);
16028 + iv_dma = dma_map_single(dev, req->iv, ivsize, DMA_TO_DEVICE);
16029 + if (dma_mapping_error(dev, iv_dma)) {
16030 + dev_err(dev, "unable to map IV\n");
16031 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, 0, 0,
16032 + op_type, 0, 0);
16033 + qi_cache_free(edesc);
16034 + return ERR_PTR(-ENOMEM);
16035 + }
16036 +
16037 + /*
16038 + * Create S/G table: IV, src, dst.
16039 + * Input is not contiguous.
16040 + */
16041 + qm_sg_ents = 1 + mapped_src_nents +
16042 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
16043 + sg_table = &edesc->sgt[0];
16044 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16045 +
16046 + edesc->src_nents = src_nents;
16047 + edesc->dst_nents = dst_nents;
16048 + edesc->dst = dst;
16049 + edesc->iv_dma = iv_dma;
16050 +
16051 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16052 + qm_sg_index = 1;
16053 +
16054 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
16055 + qm_sg_index += mapped_src_nents;
16056 +
16057 + if (mapped_dst_nents > 1)
16058 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
16059 + qm_sg_index, 0);
16060 +
16061 + qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
16062 + if (dma_mapping_error(dev, qm_sg_dma)) {
16063 + dev_err(dev, "unable to map S/G table\n");
16064 + caam_unmap(dev, req->src, dst, src_nents, dst_nents, iv_dma,
16065 + ivsize, op_type, 0, 0);
16066 + qi_cache_free(edesc);
16067 + return ERR_PTR(-ENOMEM);
16068 + }
16069 +
16070 + edesc->qm_sg_dma = qm_sg_dma;
16071 + edesc->qm_sg_bytes = qm_sg_bytes;
16072 +
16073 + out_len = req->cryptlen + (encrypt ? authsize : 0);
16074 + in_len = ivsize + req->assoclen + req->cryptlen;
16075 +
16076 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16077 + dpaa2_fl_set_final(in_fle, true);
16078 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16079 + dpaa2_fl_set_addr(in_fle, qm_sg_dma);
16080 + dpaa2_fl_set_len(in_fle, in_len);
16081 +
16082 + if (req->dst == req->src) {
16083 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16084 + dpaa2_fl_set_addr(out_fle, qm_sg_dma +
16085 + (sg_nents_for_len(req->src, req->assoclen) +
16086 + 1) * sizeof(*sg_table));
16087 + } else if (mapped_dst_nents == 1) {
16088 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16089 + dpaa2_fl_set_addr(out_fle, sg_dma_address(dst));
16090 + } else {
16091 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16092 + dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
16093 + sizeof(*sg_table));
16094 + }
16095 +
16096 + dpaa2_fl_set_len(out_fle, out_len);
16097 +
16098 + return edesc;
16099 +}
16100 +
16101 +static int tls_set_sh_desc(struct crypto_aead *tls)
16102 +{
16103 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16104 + unsigned int ivsize = crypto_aead_ivsize(tls);
16105 + unsigned int blocksize = crypto_aead_blocksize(tls);
16106 + struct device *dev = ctx->dev;
16107 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
16108 + struct caam_flc *flc;
16109 + u32 *desc;
16110 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
16111 + unsigned int data_len[2];
16112 + u32 inl_mask;
16113 +
16114 + if (!ctx->cdata.keylen || !ctx->authsize)
16115 + return 0;
16116 +
16117 + /*
16118 + * TLS 1.0 encrypt shared descriptor
16119 + * Job Descriptor and Shared Descriptor
16120 + * must fit into the 64-word Descriptor h/w Buffer
16121 + */
16122 + data_len[0] = ctx->adata.keylen_pad;
16123 + data_len[1] = ctx->cdata.keylen;
16124 +
16125 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
16126 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
16127 + return -EINVAL;
16128 +
16129 + if (inl_mask & 1)
16130 + ctx->adata.key_virt = ctx->key;
16131 + else
16132 + ctx->adata.key_dma = ctx->key_dma;
16133 +
16134 + if (inl_mask & 2)
16135 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
16136 + else
16137 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16138 +
16139 + ctx->adata.key_inline = !!(inl_mask & 1);
16140 + ctx->cdata.key_inline = !!(inl_mask & 2);
16141 +
16142 + flc = &ctx->flc[ENCRYPT];
16143 + desc = flc->sh_desc;
16144 + cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
16145 + assoclen, ivsize, ctx->authsize, blocksize,
16146 + priv->sec_attr.era);
16147 + flc->flc[1] = desc_len(desc);
16148 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16149 + sizeof(flc->flc) + desc_bytes(desc),
16150 + DMA_BIDIRECTIONAL);
16151 +
16152 + /*
16153 + * TLS 1.0 decrypt shared descriptor
16154 + * Keys do not fit inline, regardless of algorithms used
16155 + */
16156 + ctx->adata.key_inline = false;
16157 + ctx->adata.key_dma = ctx->key_dma;
16158 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
16159 +
16160 + flc = &ctx->flc[DECRYPT];
16161 + desc = flc->sh_desc;
16162 + cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
16163 + ctx->authsize, blocksize, priv->sec_attr.era);
16164 + flc->flc[1] = desc_len(desc); /* SDL */
16165 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16166 + sizeof(flc->flc) + desc_bytes(desc),
16167 + DMA_BIDIRECTIONAL);
16168 +
16169 + return 0;
16170 +}
16171 +
16172 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
16173 + unsigned int keylen)
16174 +{
16175 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16176 + struct device *dev = ctx->dev;
16177 + struct crypto_authenc_keys keys;
16178 +
16179 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
16180 + goto badkey;
16181 +
16182 +#ifdef DEBUG
16183 + dev_err(dev, "keylen %d enckeylen %d authkeylen %d\n",
16184 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
16185 + keys.authkeylen);
16186 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16187 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16188 +#endif
16189 +
16190 + ctx->adata.keylen = keys.authkeylen;
16191 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
16192 + OP_ALG_ALGSEL_MASK);
16193 +
16194 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
16195 + goto badkey;
16196 +
16197 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
16198 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
16199 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
16200 + keys.enckeylen, DMA_BIDIRECTIONAL);
16201 +#ifdef DEBUG
16202 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
16203 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
16204 + ctx->adata.keylen_pad + keys.enckeylen, 1);
16205 +#endif
16206 +
16207 + ctx->cdata.keylen = keys.enckeylen;
16208 +
16209 + return tls_set_sh_desc(tls);
16210 +badkey:
16211 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
16212 + return -EINVAL;
16213 +}
16214 +
16215 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
16216 +{
16217 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
16218 +
16219 + ctx->authsize = authsize;
16220 + tls_set_sh_desc(tls);
16221 +
16222 + return 0;
16223 +}
16224 +
16225 +static int gcm_set_sh_desc(struct crypto_aead *aead)
16226 +{
16227 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16228 + struct device *dev = ctx->dev;
16229 + unsigned int ivsize = crypto_aead_ivsize(aead);
16230 + struct caam_flc *flc;
16231 + u32 *desc;
16232 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16233 + ctx->cdata.keylen;
16234 +
16235 + if (!ctx->cdata.keylen || !ctx->authsize)
16236 + return 0;
16237 +
16238 + /*
16239 + * AES GCM encrypt shared descriptor
16240 + * Job Descriptor and Shared Descriptor
16241 + * must fit into the 64-word Descriptor h/w Buffer
16242 + */
16243 + if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
16244 + ctx->cdata.key_inline = true;
16245 + ctx->cdata.key_virt = ctx->key;
16246 + } else {
16247 + ctx->cdata.key_inline = false;
16248 + ctx->cdata.key_dma = ctx->key_dma;
16249 + }
16250 +
16251 + flc = &ctx->flc[ENCRYPT];
16252 + desc = flc->sh_desc;
16253 + cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16254 + flc->flc[1] = desc_len(desc); /* SDL */
16255 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16256 + sizeof(flc->flc) + desc_bytes(desc),
16257 + DMA_BIDIRECTIONAL);
16258 +
16259 + /*
16260 + * Job Descriptor and Shared Descriptors
16261 + * must all fit into the 64-word Descriptor h/w Buffer
16262 + */
16263 + if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
16264 + ctx->cdata.key_inline = true;
16265 + ctx->cdata.key_virt = ctx->key;
16266 + } else {
16267 + ctx->cdata.key_inline = false;
16268 + ctx->cdata.key_dma = ctx->key_dma;
16269 + }
16270 +
16271 + flc = &ctx->flc[DECRYPT];
16272 + desc = flc->sh_desc;
16273 + cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
16274 + flc->flc[1] = desc_len(desc); /* SDL */
16275 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16276 + sizeof(flc->flc) + desc_bytes(desc),
16277 + DMA_BIDIRECTIONAL);
16278 +
16279 + return 0;
16280 +}
16281 +
16282 +static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
16283 +{
16284 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16285 +
16286 + ctx->authsize = authsize;
16287 + gcm_set_sh_desc(authenc);
16288 +
16289 + return 0;
16290 +}
16291 +
16292 +static int gcm_setkey(struct crypto_aead *aead,
16293 + const u8 *key, unsigned int keylen)
16294 +{
16295 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16296 + struct device *dev = ctx->dev;
16297 +
16298 +#ifdef DEBUG
16299 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16300 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16301 +#endif
16302 +
16303 + memcpy(ctx->key, key, keylen);
16304 + dma_sync_single_for_device(dev, ctx->key_dma, keylen,
16305 + DMA_BIDIRECTIONAL);
16306 + ctx->cdata.keylen = keylen;
16307 +
16308 + return gcm_set_sh_desc(aead);
16309 +}
16310 +
16311 +static int rfc4106_set_sh_desc(struct crypto_aead *aead)
16312 +{
16313 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16314 + struct device *dev = ctx->dev;
16315 + unsigned int ivsize = crypto_aead_ivsize(aead);
16316 + struct caam_flc *flc;
16317 + u32 *desc;
16318 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16319 + ctx->cdata.keylen;
16320 +
16321 + if (!ctx->cdata.keylen || !ctx->authsize)
16322 + return 0;
16323 +
16324 + ctx->cdata.key_virt = ctx->key;
16325 +
16326 + /*
16327 + * RFC4106 encrypt shared descriptor
16328 + * Job Descriptor and Shared Descriptor
16329 + * must fit into the 64-word Descriptor h/w Buffer
16330 + */
16331 + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
16332 + ctx->cdata.key_inline = true;
16333 + } else {
16334 + ctx->cdata.key_inline = false;
16335 + ctx->cdata.key_dma = ctx->key_dma;
16336 + }
16337 +
16338 + flc = &ctx->flc[ENCRYPT];
16339 + desc = flc->sh_desc;
16340 + cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16341 + true);
16342 + flc->flc[1] = desc_len(desc); /* SDL */
16343 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16344 + sizeof(flc->flc) + desc_bytes(desc),
16345 + DMA_BIDIRECTIONAL);
16346 +
16347 + /*
16348 + * Job Descriptor and Shared Descriptors
16349 + * must all fit into the 64-word Descriptor h/w Buffer
16350 + */
16351 + if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
16352 + ctx->cdata.key_inline = true;
16353 + } else {
16354 + ctx->cdata.key_inline = false;
16355 + ctx->cdata.key_dma = ctx->key_dma;
16356 + }
16357 +
16358 + flc = &ctx->flc[DECRYPT];
16359 + desc = flc->sh_desc;
16360 + cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16361 + true);
16362 + flc->flc[1] = desc_len(desc); /* SDL */
16363 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16364 + sizeof(flc->flc) + desc_bytes(desc),
16365 + DMA_BIDIRECTIONAL);
16366 +
16367 + return 0;
16368 +}
16369 +
16370 +static int rfc4106_setauthsize(struct crypto_aead *authenc,
16371 + unsigned int authsize)
16372 +{
16373 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16374 +
16375 + ctx->authsize = authsize;
16376 + rfc4106_set_sh_desc(authenc);
16377 +
16378 + return 0;
16379 +}
16380 +
16381 +static int rfc4106_setkey(struct crypto_aead *aead,
16382 + const u8 *key, unsigned int keylen)
16383 +{
16384 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16385 + struct device *dev = ctx->dev;
16386 +
16387 + if (keylen < 4)
16388 + return -EINVAL;
16389 +
16390 +#ifdef DEBUG
16391 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16392 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16393 +#endif
16394 +
16395 + memcpy(ctx->key, key, keylen);
16396 + /*
16397 + * The last four bytes of the key material are used as the salt value
16398 + * in the nonce. Update the AES key length.
16399 + */
16400 + ctx->cdata.keylen = keylen - 4;
16401 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
16402 + DMA_BIDIRECTIONAL);
16403 +
16404 + return rfc4106_set_sh_desc(aead);
16405 +}
16406 +
16407 +static int rfc4543_set_sh_desc(struct crypto_aead *aead)
16408 +{
16409 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16410 + struct device *dev = ctx->dev;
16411 + unsigned int ivsize = crypto_aead_ivsize(aead);
16412 + struct caam_flc *flc;
16413 + u32 *desc;
16414 + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
16415 + ctx->cdata.keylen;
16416 +
16417 + if (!ctx->cdata.keylen || !ctx->authsize)
16418 + return 0;
16419 +
16420 + ctx->cdata.key_virt = ctx->key;
16421 +
16422 + /*
16423 + * RFC4543 encrypt shared descriptor
16424 + * Job Descriptor and Shared Descriptor
16425 + * must fit into the 64-word Descriptor h/w Buffer
16426 + */
16427 + if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
16428 + ctx->cdata.key_inline = true;
16429 + } else {
16430 + ctx->cdata.key_inline = false;
16431 + ctx->cdata.key_dma = ctx->key_dma;
16432 + }
16433 +
16434 + flc = &ctx->flc[ENCRYPT];
16435 + desc = flc->sh_desc;
16436 + cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
16437 + true);
16438 + flc->flc[1] = desc_len(desc); /* SDL */
16439 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16440 + sizeof(flc->flc) + desc_bytes(desc),
16441 + DMA_BIDIRECTIONAL);
16442 +
16443 + /*
16444 + * Job Descriptor and Shared Descriptors
16445 + * must all fit into the 64-word Descriptor h/w Buffer
16446 + */
16447 + if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
16448 + ctx->cdata.key_inline = true;
16449 + } else {
16450 + ctx->cdata.key_inline = false;
16451 + ctx->cdata.key_dma = ctx->key_dma;
16452 + }
16453 +
16454 + flc = &ctx->flc[DECRYPT];
16455 + desc = flc->sh_desc;
16456 + cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
16457 + true);
16458 + flc->flc[1] = desc_len(desc); /* SDL */
16459 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16460 + sizeof(flc->flc) + desc_bytes(desc),
16461 + DMA_BIDIRECTIONAL);
16462 +
16463 + return 0;
16464 +}
16465 +
16466 +static int rfc4543_setauthsize(struct crypto_aead *authenc,
16467 + unsigned int authsize)
16468 +{
16469 + struct caam_ctx *ctx = crypto_aead_ctx(authenc);
16470 +
16471 + ctx->authsize = authsize;
16472 + rfc4543_set_sh_desc(authenc);
16473 +
16474 + return 0;
16475 +}
16476 +
16477 +static int rfc4543_setkey(struct crypto_aead *aead,
16478 + const u8 *key, unsigned int keylen)
16479 +{
16480 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16481 + struct device *dev = ctx->dev;
16482 +
16483 + if (keylen < 4)
16484 + return -EINVAL;
16485 +
16486 +#ifdef DEBUG
16487 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16488 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16489 +#endif
16490 +
16491 + memcpy(ctx->key, key, keylen);
16492 + /*
16493 + * The last four bytes of the key material are used as the salt value
16494 + * in the nonce. Update the AES key length.
16495 + */
16496 + ctx->cdata.keylen = keylen - 4;
16497 + dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
16498 + DMA_BIDIRECTIONAL);
16499 +
16500 + return rfc4543_set_sh_desc(aead);
16501 +}
16502 +
16503 +static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16504 + const u8 *key, unsigned int keylen)
16505 +{
16506 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16507 + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
16508 + const char *alg_name = crypto_tfm_alg_name(tfm);
16509 + struct device *dev = ctx->dev;
16510 + struct caam_flc *flc;
16511 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16512 + u32 *desc;
16513 + u32 ctx1_iv_off = 0;
16514 + const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
16515 + OP_ALG_AAI_CTR_MOD128);
16516 + const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
16517 +
16518 +#ifdef DEBUG
16519 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
16520 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
16521 +#endif
16522 + /*
16523 + * AES-CTR needs to load IV in CONTEXT1 reg
16524 + * at an offset of 128bits (16bytes)
16525 + * CONTEXT1[255:128] = IV
16526 + */
16527 + if (ctr_mode)
16528 + ctx1_iv_off = 16;
16529 +
16530 + /*
16531 + * RFC3686 specific:
16532 + * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
16533 + * | *key = {KEY, NONCE}
16534 + */
16535 + if (is_rfc3686) {
16536 + ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
16537 + keylen -= CTR_RFC3686_NONCE_SIZE;
16538 + }
16539 +
16540 + ctx->cdata.keylen = keylen;
16541 + ctx->cdata.key_virt = key;
16542 + ctx->cdata.key_inline = true;
16543 +
16544 + /* ablkcipher_encrypt shared descriptor */
16545 + flc = &ctx->flc[ENCRYPT];
16546 + desc = flc->sh_desc;
16547 + cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
16548 + is_rfc3686, ctx1_iv_off);
16549 + flc->flc[1] = desc_len(desc); /* SDL */
16550 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16551 + sizeof(flc->flc) + desc_bytes(desc),
16552 + DMA_BIDIRECTIONAL);
16553 +
16554 + /* ablkcipher_decrypt shared descriptor */
16555 + flc = &ctx->flc[DECRYPT];
16556 + desc = flc->sh_desc;
16557 + cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
16558 + is_rfc3686, ctx1_iv_off);
16559 + flc->flc[1] = desc_len(desc); /* SDL */
16560 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16561 + sizeof(flc->flc) + desc_bytes(desc),
16562 + DMA_BIDIRECTIONAL);
16563 +
16564 + /* ablkcipher_givencrypt shared descriptor */
16565 + flc = &ctx->flc[GIVENCRYPT];
16566 + desc = flc->sh_desc;
16567 + cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
16568 + ivsize, is_rfc3686, ctx1_iv_off);
16569 + flc->flc[1] = desc_len(desc); /* SDL */
16570 + dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT],
16571 + sizeof(flc->flc) + desc_bytes(desc),
16572 + DMA_BIDIRECTIONAL);
16573 +
16574 + return 0;
16575 +}
16576 +
16577 +static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
16578 + const u8 *key, unsigned int keylen)
16579 +{
16580 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16581 + struct device *dev = ctx->dev;
16582 + struct caam_flc *flc;
16583 + u32 *desc;
16584 +
16585 + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
16586 + dev_err(dev, "key size mismatch\n");
16587 + crypto_ablkcipher_set_flags(ablkcipher,
16588 + CRYPTO_TFM_RES_BAD_KEY_LEN);
16589 + return -EINVAL;
16590 + }
16591 +
16592 + ctx->cdata.keylen = keylen;
16593 + ctx->cdata.key_virt = key;
16594 + ctx->cdata.key_inline = true;
16595 +
16596 + /* xts_ablkcipher_encrypt shared descriptor */
16597 + flc = &ctx->flc[ENCRYPT];
16598 + desc = flc->sh_desc;
16599 + cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
16600 + flc->flc[1] = desc_len(desc); /* SDL */
16601 + dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
16602 + sizeof(flc->flc) + desc_bytes(desc),
16603 + DMA_BIDIRECTIONAL);
16604 +
16605 + /* xts_ablkcipher_decrypt shared descriptor */
16606 + flc = &ctx->flc[DECRYPT];
16607 + desc = flc->sh_desc;
16608 + cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
16609 + flc->flc[1] = desc_len(desc); /* SDL */
16610 + dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
16611 + sizeof(flc->flc) + desc_bytes(desc),
16612 + DMA_BIDIRECTIONAL);
16613 +
16614 + return 0;
16615 +}
16616 +
16617 +static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
16618 + *req, bool encrypt)
16619 +{
16620 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16621 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16622 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16623 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16624 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16625 + struct device *dev = ctx->dev;
16626 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16627 + GFP_KERNEL : GFP_ATOMIC;
16628 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
16629 + struct ablkcipher_edesc *edesc;
16630 + dma_addr_t iv_dma;
16631 + bool in_contig;
16632 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16633 + int dst_sg_idx, qm_sg_ents;
16634 + struct dpaa2_sg_entry *sg_table;
16635 + enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
16636 +
16637 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16638 + if (unlikely(src_nents < 0)) {
16639 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16640 + req->nbytes);
16641 + return ERR_PTR(src_nents);
16642 + }
16643 +
16644 + if (unlikely(req->dst != req->src)) {
16645 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16646 + if (unlikely(dst_nents < 0)) {
16647 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16648 + req->nbytes);
16649 + return ERR_PTR(dst_nents);
16650 + }
16651 +
16652 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16653 + DMA_TO_DEVICE);
16654 + if (unlikely(!mapped_src_nents)) {
16655 + dev_err(dev, "unable to map source\n");
16656 + return ERR_PTR(-ENOMEM);
16657 + }
16658 +
16659 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16660 + DMA_FROM_DEVICE);
16661 + if (unlikely(!mapped_dst_nents)) {
16662 + dev_err(dev, "unable to map destination\n");
16663 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16664 + return ERR_PTR(-ENOMEM);
16665 + }
16666 + } else {
16667 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16668 + DMA_BIDIRECTIONAL);
16669 + if (unlikely(!mapped_src_nents)) {
16670 + dev_err(dev, "unable to map source\n");
16671 + return ERR_PTR(-ENOMEM);
16672 + }
16673 + }
16674 +
16675 + iv_dma = dma_map_single(dev, req->info, ivsize, DMA_TO_DEVICE);
16676 + if (dma_mapping_error(dev, iv_dma)) {
16677 + dev_err(dev, "unable to map IV\n");
16678 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16679 + 0, 0, 0, 0);
16680 + return ERR_PTR(-ENOMEM);
16681 + }
16682 +
16683 + if (mapped_src_nents == 1 &&
16684 + iv_dma + ivsize == sg_dma_address(req->src)) {
16685 + in_contig = true;
16686 + qm_sg_ents = 0;
16687 + } else {
16688 + in_contig = false;
16689 + qm_sg_ents = 1 + mapped_src_nents;
16690 + }
16691 + dst_sg_idx = qm_sg_ents;
16692 +
16693 + qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
16694 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16695 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16696 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16697 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16698 + iv_dma, ivsize, op_type, 0, 0);
16699 + return ERR_PTR(-ENOMEM);
16700 + }
16701 +
16702 + /* allocate space for base edesc and link tables */
16703 + edesc = qi_cache_zalloc(GFP_DMA | flags);
16704 + if (unlikely(!edesc)) {
16705 + dev_err(dev, "could not allocate extended descriptor\n");
16706 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16707 + iv_dma, ivsize, op_type, 0, 0);
16708 + return ERR_PTR(-ENOMEM);
16709 + }
16710 +
16711 + edesc->src_nents = src_nents;
16712 + edesc->dst_nents = dst_nents;
16713 + edesc->iv_dma = iv_dma;
16714 + sg_table = &edesc->sgt[0];
16715 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16716 +
16717 + if (!in_contig) {
16718 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
16719 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
16720 + }
16721 +
16722 + if (mapped_dst_nents > 1)
16723 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16724 + dst_sg_idx, 0);
16725 +
16726 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16727 + DMA_TO_DEVICE);
16728 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16729 + dev_err(dev, "unable to map S/G table\n");
16730 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16731 + iv_dma, ivsize, op_type, 0, 0);
16732 + qi_cache_free(edesc);
16733 + return ERR_PTR(-ENOMEM);
16734 + }
16735 +
16736 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16737 + dpaa2_fl_set_final(in_fle, true);
16738 + dpaa2_fl_set_len(in_fle, req->nbytes + ivsize);
16739 + dpaa2_fl_set_len(out_fle, req->nbytes);
16740 +
16741 + if (!in_contig) {
16742 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16743 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16744 + } else {
16745 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16746 + dpaa2_fl_set_addr(in_fle, iv_dma);
16747 + }
16748 +
16749 + if (req->src == req->dst) {
16750 + if (!in_contig) {
16751 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16752 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
16753 + sizeof(*sg_table));
16754 + } else {
16755 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16756 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
16757 + }
16758 + } else if (mapped_dst_nents > 1) {
16759 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16760 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16761 + sizeof(*sg_table));
16762 + } else {
16763 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16764 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16765 + }
16766 +
16767 + return edesc;
16768 +}
16769 +
16770 +static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
16771 + struct skcipher_givcrypt_request *greq)
16772 +{
16773 + struct ablkcipher_request *req = &greq->creq;
16774 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16775 + struct caam_request *req_ctx = ablkcipher_request_ctx(req);
16776 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
16777 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
16778 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
16779 + struct device *dev = ctx->dev;
16780 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
16781 + GFP_KERNEL : GFP_ATOMIC;
16782 + int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
16783 + struct ablkcipher_edesc *edesc;
16784 + dma_addr_t iv_dma;
16785 + bool out_contig;
16786 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16787 + struct dpaa2_sg_entry *sg_table;
16788 + int dst_sg_idx, qm_sg_ents;
16789 +
16790 + src_nents = sg_nents_for_len(req->src, req->nbytes);
16791 + if (unlikely(src_nents < 0)) {
16792 + dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
16793 + req->nbytes);
16794 + return ERR_PTR(src_nents);
16795 + }
16796 +
16797 + if (unlikely(req->dst != req->src)) {
16798 + dst_nents = sg_nents_for_len(req->dst, req->nbytes);
16799 + if (unlikely(dst_nents < 0)) {
16800 + dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
16801 + req->nbytes);
16802 + return ERR_PTR(dst_nents);
16803 + }
16804 +
16805 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16806 + DMA_TO_DEVICE);
16807 + if (unlikely(!mapped_src_nents)) {
16808 + dev_err(dev, "unable to map source\n");
16809 + return ERR_PTR(-ENOMEM);
16810 + }
16811 +
16812 + mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
16813 + DMA_FROM_DEVICE);
16814 + if (unlikely(!mapped_dst_nents)) {
16815 + dev_err(dev, "unable to map destination\n");
16816 + dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
16817 + return ERR_PTR(-ENOMEM);
16818 + }
16819 + } else {
16820 + mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
16821 + DMA_BIDIRECTIONAL);
16822 + if (unlikely(!mapped_src_nents)) {
16823 + dev_err(dev, "unable to map source\n");
16824 + return ERR_PTR(-ENOMEM);
16825 + }
16826 +
16827 + dst_nents = src_nents;
16828 + mapped_dst_nents = src_nents;
16829 + }
16830 +
16831 + iv_dma = dma_map_single(dev, greq->giv, ivsize, DMA_FROM_DEVICE);
16832 + if (dma_mapping_error(dev, iv_dma)) {
16833 + dev_err(dev, "unable to map IV\n");
16834 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
16835 + 0, 0, 0, 0);
16836 + return ERR_PTR(-ENOMEM);
16837 + }
16838 +
16839 + qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
16840 + dst_sg_idx = qm_sg_ents;
16841 + if (mapped_dst_nents == 1 &&
16842 + iv_dma + ivsize == sg_dma_address(req->dst)) {
16843 + out_contig = true;
16844 + } else {
16845 + out_contig = false;
16846 + qm_sg_ents += 1 + mapped_dst_nents;
16847 + }
16848 +
16849 + if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
16850 + dev_err(dev, "Insufficient S/G entries: %d > %lu\n",
16851 + qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
16852 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16853 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16854 + return ERR_PTR(-ENOMEM);
16855 + }
16856 +
16857 + /* allocate space for base edesc and link tables */
16858 + edesc = qi_cache_zalloc(GFP_DMA | flags);
16859 + if (!edesc) {
16860 + dev_err(dev, "could not allocate extended descriptor\n");
16861 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16862 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16863 + return ERR_PTR(-ENOMEM);
16864 + }
16865 +
16866 + edesc->src_nents = src_nents;
16867 + edesc->dst_nents = dst_nents;
16868 + edesc->iv_dma = iv_dma;
16869 + sg_table = &edesc->sgt[0];
16870 + edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
16871 +
16872 + if (mapped_src_nents > 1)
16873 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
16874 +
16875 + if (!out_contig) {
16876 + dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
16877 + sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
16878 + dst_sg_idx + 1, 0);
16879 + }
16880 +
16881 + edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
16882 + DMA_TO_DEVICE);
16883 + if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
16884 + dev_err(dev, "unable to map S/G table\n");
16885 + caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
16886 + iv_dma, ivsize, GIVENCRYPT, 0, 0);
16887 + qi_cache_free(edesc);
16888 + return ERR_PTR(-ENOMEM);
16889 + }
16890 +
16891 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
16892 + dpaa2_fl_set_final(in_fle, true);
16893 + dpaa2_fl_set_len(in_fle, req->nbytes);
16894 + dpaa2_fl_set_len(out_fle, ivsize + req->nbytes);
16895 +
16896 + if (mapped_src_nents > 1) {
16897 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
16898 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
16899 + } else {
16900 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
16901 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
16902 + }
16903 +
16904 + if (!out_contig) {
16905 + dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
16906 + dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
16907 + sizeof(*sg_table));
16908 + } else {
16909 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
16910 + dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
16911 + }
16912 +
16913 + return edesc;
16914 +}
16915 +
16916 +static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
16917 + struct aead_request *req)
16918 +{
16919 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16920 + int ivsize = crypto_aead_ivsize(aead);
16921 + struct caam_request *caam_req = aead_request_ctx(req);
16922 +
16923 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16924 + edesc->iv_dma, ivsize, caam_req->op_type,
16925 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
16926 + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
16927 +}
16928 +
16929 +static void tls_unmap(struct device *dev, struct tls_edesc *edesc,
16930 + struct aead_request *req)
16931 +{
16932 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
16933 + int ivsize = crypto_aead_ivsize(tls);
16934 + struct caam_request *caam_req = aead_request_ctx(req);
16935 +
16936 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
16937 + edesc->dst_nents, edesc->iv_dma, ivsize, caam_req->op_type,
16938 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
16939 +}
16940 +
16941 +static void ablkcipher_unmap(struct device *dev,
16942 + struct ablkcipher_edesc *edesc,
16943 + struct ablkcipher_request *req)
16944 +{
16945 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
16946 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
16947 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
16948 +
16949 + caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
16950 + edesc->iv_dma, ivsize, caam_req->op_type,
16951 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
16952 +}
16953 +
16954 +static void aead_encrypt_done(void *cbk_ctx, u32 status)
16955 +{
16956 + struct crypto_async_request *areq = cbk_ctx;
16957 + struct aead_request *req = container_of(areq, struct aead_request,
16958 + base);
16959 + struct caam_request *req_ctx = to_caam_req(areq);
16960 + struct aead_edesc *edesc = req_ctx->edesc;
16961 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16962 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16963 + int ecode = 0;
16964 +
16965 +#ifdef DEBUG
16966 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
16967 +#endif
16968 +
16969 + if (unlikely(status)) {
16970 + caam_qi2_strstatus(ctx->dev, status);
16971 + ecode = -EIO;
16972 + }
16973 +
16974 + aead_unmap(ctx->dev, edesc, req);
16975 + qi_cache_free(edesc);
16976 + aead_request_complete(req, ecode);
16977 +}
16978 +
16979 +static void aead_decrypt_done(void *cbk_ctx, u32 status)
16980 +{
16981 + struct crypto_async_request *areq = cbk_ctx;
16982 + struct aead_request *req = container_of(areq, struct aead_request,
16983 + base);
16984 + struct caam_request *req_ctx = to_caam_req(areq);
16985 + struct aead_edesc *edesc = req_ctx->edesc;
16986 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
16987 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
16988 + int ecode = 0;
16989 +
16990 +#ifdef DEBUG
16991 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
16992 +#endif
16993 +
16994 + if (unlikely(status)) {
16995 + caam_qi2_strstatus(ctx->dev, status);
16996 + /*
16997 + * verify hw auth check passed else return -EBADMSG
16998 + */
16999 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17000 + JRSTA_CCBERR_ERRID_ICVCHK)
17001 + ecode = -EBADMSG;
17002 + else
17003 + ecode = -EIO;
17004 + }
17005 +
17006 + aead_unmap(ctx->dev, edesc, req);
17007 + qi_cache_free(edesc);
17008 + aead_request_complete(req, ecode);
17009 +}
17010 +
17011 +static int aead_encrypt(struct aead_request *req)
17012 +{
17013 + struct aead_edesc *edesc;
17014 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17015 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17016 + struct caam_request *caam_req = aead_request_ctx(req);
17017 + int ret;
17018 +
17019 + /* allocate extended descriptor */
17020 + edesc = aead_edesc_alloc(req, true);
17021 + if (IS_ERR(edesc))
17022 + return PTR_ERR(edesc);
17023 +
17024 + caam_req->flc = &ctx->flc[ENCRYPT];
17025 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17026 + caam_req->op_type = ENCRYPT;
17027 + caam_req->cbk = aead_encrypt_done;
17028 + caam_req->ctx = &req->base;
17029 + caam_req->edesc = edesc;
17030 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17031 + if (ret != -EINPROGRESS &&
17032 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17033 + aead_unmap(ctx->dev, edesc, req);
17034 + qi_cache_free(edesc);
17035 + }
17036 +
17037 + return ret;
17038 +}
17039 +
17040 +static int aead_decrypt(struct aead_request *req)
17041 +{
17042 + struct aead_edesc *edesc;
17043 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
17044 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
17045 + struct caam_request *caam_req = aead_request_ctx(req);
17046 + int ret;
17047 +
17048 + /* allocate extended descriptor */
17049 + edesc = aead_edesc_alloc(req, false);
17050 + if (IS_ERR(edesc))
17051 + return PTR_ERR(edesc);
17052 +
17053 + caam_req->flc = &ctx->flc[DECRYPT];
17054 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17055 + caam_req->op_type = DECRYPT;
17056 + caam_req->cbk = aead_decrypt_done;
17057 + caam_req->ctx = &req->base;
17058 + caam_req->edesc = edesc;
17059 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17060 + if (ret != -EINPROGRESS &&
17061 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17062 + aead_unmap(ctx->dev, edesc, req);
17063 + qi_cache_free(edesc);
17064 + }
17065 +
17066 + return ret;
17067 +}
17068 +
17069 +static void tls_encrypt_done(void *cbk_ctx, u32 status)
17070 +{
17071 + struct crypto_async_request *areq = cbk_ctx;
17072 + struct aead_request *req = container_of(areq, struct aead_request,
17073 + base);
17074 + struct caam_request *req_ctx = to_caam_req(areq);
17075 + struct tls_edesc *edesc = req_ctx->edesc;
17076 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17077 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17078 + int ecode = 0;
17079 +
17080 +#ifdef DEBUG
17081 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17082 +#endif
17083 +
17084 + if (unlikely(status)) {
17085 + caam_qi2_strstatus(ctx->dev, status);
17086 + ecode = -EIO;
17087 + }
17088 +
17089 + tls_unmap(ctx->dev, edesc, req);
17090 + qi_cache_free(edesc);
17091 + aead_request_complete(req, ecode);
17092 +}
17093 +
17094 +static void tls_decrypt_done(void *cbk_ctx, u32 status)
17095 +{
17096 + struct crypto_async_request *areq = cbk_ctx;
17097 + struct aead_request *req = container_of(areq, struct aead_request,
17098 + base);
17099 + struct caam_request *req_ctx = to_caam_req(areq);
17100 + struct tls_edesc *edesc = req_ctx->edesc;
17101 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17102 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17103 + int ecode = 0;
17104 +
17105 +#ifdef DEBUG
17106 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17107 +#endif
17108 +
17109 + if (unlikely(status)) {
17110 + caam_qi2_strstatus(ctx->dev, status);
17111 + /*
17112 + * verify hw auth check passed else return -EBADMSG
17113 + */
17114 + if ((status & JRSTA_CCBERR_ERRID_MASK) ==
17115 + JRSTA_CCBERR_ERRID_ICVCHK)
17116 + ecode = -EBADMSG;
17117 + else
17118 + ecode = -EIO;
17119 + }
17120 +
17121 + tls_unmap(ctx->dev, edesc, req);
17122 + qi_cache_free(edesc);
17123 + aead_request_complete(req, ecode);
17124 +}
17125 +
17126 +static int tls_encrypt(struct aead_request *req)
17127 +{
17128 + struct tls_edesc *edesc;
17129 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17130 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17131 + struct caam_request *caam_req = aead_request_ctx(req);
17132 + int ret;
17133 +
17134 + /* allocate extended descriptor */
17135 + edesc = tls_edesc_alloc(req, true);
17136 + if (IS_ERR(edesc))
17137 + return PTR_ERR(edesc);
17138 +
17139 + caam_req->flc = &ctx->flc[ENCRYPT];
17140 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17141 + caam_req->op_type = ENCRYPT;
17142 + caam_req->cbk = tls_encrypt_done;
17143 + caam_req->ctx = &req->base;
17144 + caam_req->edesc = edesc;
17145 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17146 + if (ret != -EINPROGRESS &&
17147 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17148 + tls_unmap(ctx->dev, edesc, req);
17149 + qi_cache_free(edesc);
17150 + }
17151 +
17152 + return ret;
17153 +}
17154 +
17155 +static int tls_decrypt(struct aead_request *req)
17156 +{
17157 + struct tls_edesc *edesc;
17158 + struct crypto_aead *tls = crypto_aead_reqtfm(req);
17159 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
17160 + struct caam_request *caam_req = aead_request_ctx(req);
17161 + int ret;
17162 +
17163 + /* allocate extended descriptor */
17164 + edesc = tls_edesc_alloc(req, false);
17165 + if (IS_ERR(edesc))
17166 + return PTR_ERR(edesc);
17167 +
17168 + caam_req->flc = &ctx->flc[DECRYPT];
17169 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17170 + caam_req->op_type = DECRYPT;
17171 + caam_req->cbk = tls_decrypt_done;
17172 + caam_req->ctx = &req->base;
17173 + caam_req->edesc = edesc;
17174 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17175 + if (ret != -EINPROGRESS &&
17176 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17177 + tls_unmap(ctx->dev, edesc, req);
17178 + qi_cache_free(edesc);
17179 + }
17180 +
17181 + return ret;
17182 +}
17183 +
17184 +static int ipsec_gcm_encrypt(struct aead_request *req)
17185 +{
17186 + if (req->assoclen < 8)
17187 + return -EINVAL;
17188 +
17189 + return aead_encrypt(req);
17190 +}
17191 +
17192 +static int ipsec_gcm_decrypt(struct aead_request *req)
17193 +{
17194 + if (req->assoclen < 8)
17195 + return -EINVAL;
17196 +
17197 + return aead_decrypt(req);
17198 +}
17199 +
17200 +static void ablkcipher_done(void *cbk_ctx, u32 status)
17201 +{
17202 + struct crypto_async_request *areq = cbk_ctx;
17203 + struct ablkcipher_request *req = ablkcipher_request_cast(areq);
17204 + struct caam_request *req_ctx = to_caam_req(areq);
17205 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17206 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17207 + struct ablkcipher_edesc *edesc = req_ctx->edesc;
17208 + int ecode = 0;
17209 + int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
17210 +
17211 +#ifdef DEBUG
17212 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
17213 +#endif
17214 +
17215 + if (unlikely(status)) {
17216 + caam_qi2_strstatus(ctx->dev, status);
17217 + ecode = -EIO;
17218 + }
17219 +
17220 +#ifdef DEBUG
17221 + print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
17222 + DUMP_PREFIX_ADDRESS, 16, 4, req->info,
17223 + edesc->src_nents > 1 ? 100 : ivsize, 1);
17224 + caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
17225 + DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
17226 + edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
17227 +#endif
17228 +
17229 + ablkcipher_unmap(ctx->dev, edesc, req);
17230 + qi_cache_free(edesc);
17231 +
17232 + /*
17233 + * The crypto API expects us to set the IV (req->info) to the last
17234 + * ciphertext block. This is used e.g. by the CTS mode.
17235 + */
17236 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
17237 + ivsize, 0);
17238 +
17239 + ablkcipher_request_complete(req, ecode);
17240 +}
17241 +
17242 +static int ablkcipher_encrypt(struct ablkcipher_request *req)
17243 +{
17244 + struct ablkcipher_edesc *edesc;
17245 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17246 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17247 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17248 + int ret;
17249 +
17250 + /* allocate extended descriptor */
17251 + edesc = ablkcipher_edesc_alloc(req, true);
17252 + if (IS_ERR(edesc))
17253 + return PTR_ERR(edesc);
17254 +
17255 + caam_req->flc = &ctx->flc[ENCRYPT];
17256 + caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
17257 + caam_req->op_type = ENCRYPT;
17258 + caam_req->cbk = ablkcipher_done;
17259 + caam_req->ctx = &req->base;
17260 + caam_req->edesc = edesc;
17261 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17262 + if (ret != -EINPROGRESS &&
17263 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17264 + ablkcipher_unmap(ctx->dev, edesc, req);
17265 + qi_cache_free(edesc);
17266 + }
17267 +
17268 + return ret;
17269 +}
17270 +
17271 +static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *greq)
17272 +{
17273 + struct ablkcipher_request *req = &greq->creq;
17274 + struct ablkcipher_edesc *edesc;
17275 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17276 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17277 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17278 + int ret;
17279 +
17280 + /* allocate extended descriptor */
17281 + edesc = ablkcipher_giv_edesc_alloc(greq);
17282 + if (IS_ERR(edesc))
17283 + return PTR_ERR(edesc);
17284 +
17285 + caam_req->flc = &ctx->flc[GIVENCRYPT];
17286 + caam_req->flc_dma = ctx->flc_dma[GIVENCRYPT];
17287 + caam_req->op_type = GIVENCRYPT;
17288 + caam_req->cbk = ablkcipher_done;
17289 + caam_req->ctx = &req->base;
17290 + caam_req->edesc = edesc;
17291 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17292 + if (ret != -EINPROGRESS &&
17293 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17294 + ablkcipher_unmap(ctx->dev, edesc, req);
17295 + qi_cache_free(edesc);
17296 + }
17297 +
17298 + return ret;
17299 +}
17300 +
17301 +static int ablkcipher_decrypt(struct ablkcipher_request *req)
17302 +{
17303 + struct ablkcipher_edesc *edesc;
17304 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
17305 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
17306 + struct caam_request *caam_req = ablkcipher_request_ctx(req);
17307 + int ret;
17308 +
17309 + /* allocate extended descriptor */
17310 + edesc = ablkcipher_edesc_alloc(req, false);
17311 + if (IS_ERR(edesc))
17312 + return PTR_ERR(edesc);
17313 +
17314 + caam_req->flc = &ctx->flc[DECRYPT];
17315 + caam_req->flc_dma = ctx->flc_dma[DECRYPT];
17316 + caam_req->op_type = DECRYPT;
17317 + caam_req->cbk = ablkcipher_done;
17318 + caam_req->ctx = &req->base;
17319 + caam_req->edesc = edesc;
17320 + ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
17321 + if (ret != -EINPROGRESS &&
17322 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
17323 + ablkcipher_unmap(ctx->dev, edesc, req);
17324 + qi_cache_free(edesc);
17325 + }
17326 +
17327 + return ret;
17328 +}
17329 +
17330 +struct caam_crypto_alg {
17331 + struct list_head entry;
17332 + struct crypto_alg crypto_alg;
17333 + struct caam_alg_entry caam;
17334 +};
17335 +
17336 +static int caam_cra_init(struct crypto_tfm *tfm)
17337 +{
17338 + struct crypto_alg *alg = tfm->__crt_alg;
17339 + struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
17340 + crypto_alg);
17341 + struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
17342 + dma_addr_t dma_addr;
17343 + int i;
17344 +
17345 + /* copy descriptor header template value */
17346 + ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
17347 + caam_alg->caam.class1_alg_type;
17348 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG |
17349 + caam_alg->caam.class2_alg_type;
17350 +
17351 + ctx->dev = caam_alg->caam.dev;
17352 +
17353 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
17354 + offsetof(struct caam_ctx, flc_dma),
17355 + DMA_BIDIRECTIONAL,
17356 + DMA_ATTR_SKIP_CPU_SYNC);
17357 + if (dma_mapping_error(ctx->dev, dma_addr)) {
17358 + dev_err(ctx->dev, "unable to map key, shared descriptors\n");
17359 + return -ENOMEM;
17360 + }
17361 +
17362 + for (i = 0; i < NUM_OP; i++)
17363 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
17364 + ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
17365 +
17366 + return 0;
17367 +}
17368 +
17369 +static int caam_cra_init_ablkcipher(struct crypto_tfm *tfm)
17370 +{
17371 + struct ablkcipher_tfm *ablkcipher_tfm =
17372 + crypto_ablkcipher_crt(__crypto_ablkcipher_cast(tfm));
17373 +
17374 + ablkcipher_tfm->reqsize = sizeof(struct caam_request);
17375 + return caam_cra_init(tfm);
17376 +}
17377 +
17378 +static int caam_cra_init_aead(struct crypto_aead *tfm)
17379 +{
17380 + crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
17381 + return caam_cra_init(crypto_aead_tfm(tfm));
17382 +}
17383 +
17384 +static void caam_exit_common(struct caam_ctx *ctx)
17385 +{
17386 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
17387 + offsetof(struct caam_ctx, flc_dma),
17388 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
17389 +}
17390 +
17391 +static void caam_cra_exit(struct crypto_tfm *tfm)
17392 +{
17393 + caam_exit_common(crypto_tfm_ctx(tfm));
17394 +}
17395 +
17396 +static void caam_cra_exit_aead(struct crypto_aead *tfm)
17397 +{
17398 + caam_exit_common(crypto_aead_ctx(tfm));
17399 +}
17400 +
17401 +#define template_ablkcipher template_u.ablkcipher
17402 +struct caam_alg_template {
17403 + char name[CRYPTO_MAX_ALG_NAME];
17404 + char driver_name[CRYPTO_MAX_ALG_NAME];
17405 + unsigned int blocksize;
17406 + u32 type;
17407 + union {
17408 + struct ablkcipher_alg ablkcipher;
17409 + } template_u;
17410 + u32 class1_alg_type;
17411 + u32 class2_alg_type;
17412 +};
17413 +
17414 +static struct caam_alg_template driver_algs[] = {
17415 + /* ablkcipher descriptor */
17416 + {
17417 + .name = "cbc(aes)",
17418 + .driver_name = "cbc-aes-caam-qi2",
17419 + .blocksize = AES_BLOCK_SIZE,
17420 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17421 + .template_ablkcipher = {
17422 + .setkey = ablkcipher_setkey,
17423 + .encrypt = ablkcipher_encrypt,
17424 + .decrypt = ablkcipher_decrypt,
17425 + .givencrypt = ablkcipher_givencrypt,
17426 + .geniv = "<built-in>",
17427 + .min_keysize = AES_MIN_KEY_SIZE,
17428 + .max_keysize = AES_MAX_KEY_SIZE,
17429 + .ivsize = AES_BLOCK_SIZE,
17430 + },
17431 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17432 + },
17433 + {
17434 + .name = "cbc(des3_ede)",
17435 + .driver_name = "cbc-3des-caam-qi2",
17436 + .blocksize = DES3_EDE_BLOCK_SIZE,
17437 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17438 + .template_ablkcipher = {
17439 + .setkey = ablkcipher_setkey,
17440 + .encrypt = ablkcipher_encrypt,
17441 + .decrypt = ablkcipher_decrypt,
17442 + .givencrypt = ablkcipher_givencrypt,
17443 + .geniv = "<built-in>",
17444 + .min_keysize = DES3_EDE_KEY_SIZE,
17445 + .max_keysize = DES3_EDE_KEY_SIZE,
17446 + .ivsize = DES3_EDE_BLOCK_SIZE,
17447 + },
17448 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17449 + },
17450 + {
17451 + .name = "cbc(des)",
17452 + .driver_name = "cbc-des-caam-qi2",
17453 + .blocksize = DES_BLOCK_SIZE,
17454 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17455 + .template_ablkcipher = {
17456 + .setkey = ablkcipher_setkey,
17457 + .encrypt = ablkcipher_encrypt,
17458 + .decrypt = ablkcipher_decrypt,
17459 + .givencrypt = ablkcipher_givencrypt,
17460 + .geniv = "<built-in>",
17461 + .min_keysize = DES_KEY_SIZE,
17462 + .max_keysize = DES_KEY_SIZE,
17463 + .ivsize = DES_BLOCK_SIZE,
17464 + },
17465 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
17466 + },
17467 + {
17468 + .name = "ctr(aes)",
17469 + .driver_name = "ctr-aes-caam-qi2",
17470 + .blocksize = 1,
17471 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17472 + .template_ablkcipher = {
17473 + .setkey = ablkcipher_setkey,
17474 + .encrypt = ablkcipher_encrypt,
17475 + .decrypt = ablkcipher_decrypt,
17476 + .geniv = "chainiv",
17477 + .min_keysize = AES_MIN_KEY_SIZE,
17478 + .max_keysize = AES_MAX_KEY_SIZE,
17479 + .ivsize = AES_BLOCK_SIZE,
17480 + },
17481 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17482 + },
17483 + {
17484 + .name = "rfc3686(ctr(aes))",
17485 + .driver_name = "rfc3686-ctr-aes-caam-qi2",
17486 + .blocksize = 1,
17487 + .type = CRYPTO_ALG_TYPE_GIVCIPHER,
17488 + .template_ablkcipher = {
17489 + .setkey = ablkcipher_setkey,
17490 + .encrypt = ablkcipher_encrypt,
17491 + .decrypt = ablkcipher_decrypt,
17492 + .givencrypt = ablkcipher_givencrypt,
17493 + .geniv = "<built-in>",
17494 + .min_keysize = AES_MIN_KEY_SIZE +
17495 + CTR_RFC3686_NONCE_SIZE,
17496 + .max_keysize = AES_MAX_KEY_SIZE +
17497 + CTR_RFC3686_NONCE_SIZE,
17498 + .ivsize = CTR_RFC3686_IV_SIZE,
17499 + },
17500 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
17501 + },
17502 + {
17503 + .name = "xts(aes)",
17504 + .driver_name = "xts-aes-caam-qi2",
17505 + .blocksize = AES_BLOCK_SIZE,
17506 + .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
17507 + .template_ablkcipher = {
17508 + .setkey = xts_ablkcipher_setkey,
17509 + .encrypt = ablkcipher_encrypt,
17510 + .decrypt = ablkcipher_decrypt,
17511 + .geniv = "eseqiv",
17512 + .min_keysize = 2 * AES_MIN_KEY_SIZE,
17513 + .max_keysize = 2 * AES_MAX_KEY_SIZE,
17514 + .ivsize = AES_BLOCK_SIZE,
17515 + },
17516 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
17517 + }
17518 +};
17519 +
17520 +static struct caam_aead_alg driver_aeads[] = {
17521 + {
17522 + .aead = {
17523 + .base = {
17524 + .cra_name = "rfc4106(gcm(aes))",
17525 + .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
17526 + .cra_blocksize = 1,
17527 + },
17528 + .setkey = rfc4106_setkey,
17529 + .setauthsize = rfc4106_setauthsize,
17530 + .encrypt = ipsec_gcm_encrypt,
17531 + .decrypt = ipsec_gcm_decrypt,
17532 + .ivsize = 8,
17533 + .maxauthsize = AES_BLOCK_SIZE,
17534 + },
17535 + .caam = {
17536 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17537 + },
17538 + },
17539 + {
17540 + .aead = {
17541 + .base = {
17542 + .cra_name = "rfc4543(gcm(aes))",
17543 + .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
17544 + .cra_blocksize = 1,
17545 + },
17546 + .setkey = rfc4543_setkey,
17547 + .setauthsize = rfc4543_setauthsize,
17548 + .encrypt = ipsec_gcm_encrypt,
17549 + .decrypt = ipsec_gcm_decrypt,
17550 + .ivsize = 8,
17551 + .maxauthsize = AES_BLOCK_SIZE,
17552 + },
17553 + .caam = {
17554 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17555 + },
17556 + },
17557 + /* Galois Counter Mode */
17558 + {
17559 + .aead = {
17560 + .base = {
17561 + .cra_name = "gcm(aes)",
17562 + .cra_driver_name = "gcm-aes-caam-qi2",
17563 + .cra_blocksize = 1,
17564 + },
17565 + .setkey = gcm_setkey,
17566 + .setauthsize = gcm_setauthsize,
17567 + .encrypt = aead_encrypt,
17568 + .decrypt = aead_decrypt,
17569 + .ivsize = 12,
17570 + .maxauthsize = AES_BLOCK_SIZE,
17571 + },
17572 + .caam = {
17573 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
17574 + }
17575 + },
17576 + /* single-pass ipsec_esp descriptor */
17577 + {
17578 + .aead = {
17579 + .base = {
17580 + .cra_name = "authenc(hmac(md5),cbc(aes))",
17581 + .cra_driver_name = "authenc-hmac-md5-"
17582 + "cbc-aes-caam-qi2",
17583 + .cra_blocksize = AES_BLOCK_SIZE,
17584 + },
17585 + .setkey = aead_setkey,
17586 + .setauthsize = aead_setauthsize,
17587 + .encrypt = aead_encrypt,
17588 + .decrypt = aead_decrypt,
17589 + .ivsize = AES_BLOCK_SIZE,
17590 + .maxauthsize = MD5_DIGEST_SIZE,
17591 + },
17592 + .caam = {
17593 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17594 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17595 + OP_ALG_AAI_HMAC_PRECOMP,
17596 + }
17597 + },
17598 + {
17599 + .aead = {
17600 + .base = {
17601 + .cra_name = "echainiv(authenc(hmac(md5),"
17602 + "cbc(aes)))",
17603 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17604 + "cbc-aes-caam-qi2",
17605 + .cra_blocksize = AES_BLOCK_SIZE,
17606 + },
17607 + .setkey = aead_setkey,
17608 + .setauthsize = aead_setauthsize,
17609 + .encrypt = aead_encrypt,
17610 + .decrypt = aead_decrypt,
17611 + .ivsize = AES_BLOCK_SIZE,
17612 + .maxauthsize = MD5_DIGEST_SIZE,
17613 + },
17614 + .caam = {
17615 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17616 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17617 + OP_ALG_AAI_HMAC_PRECOMP,
17618 + .geniv = true,
17619 + }
17620 + },
17621 + {
17622 + .aead = {
17623 + .base = {
17624 + .cra_name = "authenc(hmac(sha1),cbc(aes))",
17625 + .cra_driver_name = "authenc-hmac-sha1-"
17626 + "cbc-aes-caam-qi2",
17627 + .cra_blocksize = AES_BLOCK_SIZE,
17628 + },
17629 + .setkey = aead_setkey,
17630 + .setauthsize = aead_setauthsize,
17631 + .encrypt = aead_encrypt,
17632 + .decrypt = aead_decrypt,
17633 + .ivsize = AES_BLOCK_SIZE,
17634 + .maxauthsize = SHA1_DIGEST_SIZE,
17635 + },
17636 + .caam = {
17637 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17638 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17639 + OP_ALG_AAI_HMAC_PRECOMP,
17640 + }
17641 + },
17642 + {
17643 + .aead = {
17644 + .base = {
17645 + .cra_name = "echainiv(authenc(hmac(sha1),"
17646 + "cbc(aes)))",
17647 + .cra_driver_name = "echainiv-authenc-"
17648 + "hmac-sha1-cbc-aes-caam-qi2",
17649 + .cra_blocksize = AES_BLOCK_SIZE,
17650 + },
17651 + .setkey = aead_setkey,
17652 + .setauthsize = aead_setauthsize,
17653 + .encrypt = aead_encrypt,
17654 + .decrypt = aead_decrypt,
17655 + .ivsize = AES_BLOCK_SIZE,
17656 + .maxauthsize = SHA1_DIGEST_SIZE,
17657 + },
17658 + .caam = {
17659 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17660 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17661 + OP_ALG_AAI_HMAC_PRECOMP,
17662 + .geniv = true,
17663 + },
17664 + },
17665 + {
17666 + .aead = {
17667 + .base = {
17668 + .cra_name = "authenc(hmac(sha224),cbc(aes))",
17669 + .cra_driver_name = "authenc-hmac-sha224-"
17670 + "cbc-aes-caam-qi2",
17671 + .cra_blocksize = AES_BLOCK_SIZE,
17672 + },
17673 + .setkey = aead_setkey,
17674 + .setauthsize = aead_setauthsize,
17675 + .encrypt = aead_encrypt,
17676 + .decrypt = aead_decrypt,
17677 + .ivsize = AES_BLOCK_SIZE,
17678 + .maxauthsize = SHA224_DIGEST_SIZE,
17679 + },
17680 + .caam = {
17681 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17682 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17683 + OP_ALG_AAI_HMAC_PRECOMP,
17684 + }
17685 + },
17686 + {
17687 + .aead = {
17688 + .base = {
17689 + .cra_name = "echainiv(authenc(hmac(sha224),"
17690 + "cbc(aes)))",
17691 + .cra_driver_name = "echainiv-authenc-"
17692 + "hmac-sha224-cbc-aes-caam-qi2",
17693 + .cra_blocksize = AES_BLOCK_SIZE,
17694 + },
17695 + .setkey = aead_setkey,
17696 + .setauthsize = aead_setauthsize,
17697 + .encrypt = aead_encrypt,
17698 + .decrypt = aead_decrypt,
17699 + .ivsize = AES_BLOCK_SIZE,
17700 + .maxauthsize = SHA224_DIGEST_SIZE,
17701 + },
17702 + .caam = {
17703 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17704 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17705 + OP_ALG_AAI_HMAC_PRECOMP,
17706 + .geniv = true,
17707 + }
17708 + },
17709 + {
17710 + .aead = {
17711 + .base = {
17712 + .cra_name = "authenc(hmac(sha256),cbc(aes))",
17713 + .cra_driver_name = "authenc-hmac-sha256-"
17714 + "cbc-aes-caam-qi2",
17715 + .cra_blocksize = AES_BLOCK_SIZE,
17716 + },
17717 + .setkey = aead_setkey,
17718 + .setauthsize = aead_setauthsize,
17719 + .encrypt = aead_encrypt,
17720 + .decrypt = aead_decrypt,
17721 + .ivsize = AES_BLOCK_SIZE,
17722 + .maxauthsize = SHA256_DIGEST_SIZE,
17723 + },
17724 + .caam = {
17725 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17726 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17727 + OP_ALG_AAI_HMAC_PRECOMP,
17728 + }
17729 + },
17730 + {
17731 + .aead = {
17732 + .base = {
17733 + .cra_name = "echainiv(authenc(hmac(sha256),"
17734 + "cbc(aes)))",
17735 + .cra_driver_name = "echainiv-authenc-"
17736 + "hmac-sha256-cbc-aes-"
17737 + "caam-qi2",
17738 + .cra_blocksize = AES_BLOCK_SIZE,
17739 + },
17740 + .setkey = aead_setkey,
17741 + .setauthsize = aead_setauthsize,
17742 + .encrypt = aead_encrypt,
17743 + .decrypt = aead_decrypt,
17744 + .ivsize = AES_BLOCK_SIZE,
17745 + .maxauthsize = SHA256_DIGEST_SIZE,
17746 + },
17747 + .caam = {
17748 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17749 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17750 + OP_ALG_AAI_HMAC_PRECOMP,
17751 + .geniv = true,
17752 + }
17753 + },
17754 + {
17755 + .aead = {
17756 + .base = {
17757 + .cra_name = "authenc(hmac(sha384),cbc(aes))",
17758 + .cra_driver_name = "authenc-hmac-sha384-"
17759 + "cbc-aes-caam-qi2",
17760 + .cra_blocksize = AES_BLOCK_SIZE,
17761 + },
17762 + .setkey = aead_setkey,
17763 + .setauthsize = aead_setauthsize,
17764 + .encrypt = aead_encrypt,
17765 + .decrypt = aead_decrypt,
17766 + .ivsize = AES_BLOCK_SIZE,
17767 + .maxauthsize = SHA384_DIGEST_SIZE,
17768 + },
17769 + .caam = {
17770 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17771 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17772 + OP_ALG_AAI_HMAC_PRECOMP,
17773 + }
17774 + },
17775 + {
17776 + .aead = {
17777 + .base = {
17778 + .cra_name = "echainiv(authenc(hmac(sha384),"
17779 + "cbc(aes)))",
17780 + .cra_driver_name = "echainiv-authenc-"
17781 + "hmac-sha384-cbc-aes-"
17782 + "caam-qi2",
17783 + .cra_blocksize = AES_BLOCK_SIZE,
17784 + },
17785 + .setkey = aead_setkey,
17786 + .setauthsize = aead_setauthsize,
17787 + .encrypt = aead_encrypt,
17788 + .decrypt = aead_decrypt,
17789 + .ivsize = AES_BLOCK_SIZE,
17790 + .maxauthsize = SHA384_DIGEST_SIZE,
17791 + },
17792 + .caam = {
17793 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17794 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
17795 + OP_ALG_AAI_HMAC_PRECOMP,
17796 + .geniv = true,
17797 + }
17798 + },
17799 + {
17800 + .aead = {
17801 + .base = {
17802 + .cra_name = "authenc(hmac(sha512),cbc(aes))",
17803 + .cra_driver_name = "authenc-hmac-sha512-"
17804 + "cbc-aes-caam-qi2",
17805 + .cra_blocksize = AES_BLOCK_SIZE,
17806 + },
17807 + .setkey = aead_setkey,
17808 + .setauthsize = aead_setauthsize,
17809 + .encrypt = aead_encrypt,
17810 + .decrypt = aead_decrypt,
17811 + .ivsize = AES_BLOCK_SIZE,
17812 + .maxauthsize = SHA512_DIGEST_SIZE,
17813 + },
17814 + .caam = {
17815 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17816 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17817 + OP_ALG_AAI_HMAC_PRECOMP,
17818 + }
17819 + },
17820 + {
17821 + .aead = {
17822 + .base = {
17823 + .cra_name = "echainiv(authenc(hmac(sha512),"
17824 + "cbc(aes)))",
17825 + .cra_driver_name = "echainiv-authenc-"
17826 + "hmac-sha512-cbc-aes-"
17827 + "caam-qi2",
17828 + .cra_blocksize = AES_BLOCK_SIZE,
17829 + },
17830 + .setkey = aead_setkey,
17831 + .setauthsize = aead_setauthsize,
17832 + .encrypt = aead_encrypt,
17833 + .decrypt = aead_decrypt,
17834 + .ivsize = AES_BLOCK_SIZE,
17835 + .maxauthsize = SHA512_DIGEST_SIZE,
17836 + },
17837 + .caam = {
17838 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
17839 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
17840 + OP_ALG_AAI_HMAC_PRECOMP,
17841 + .geniv = true,
17842 + }
17843 + },
17844 + {
17845 + .aead = {
17846 + .base = {
17847 + .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
17848 + .cra_driver_name = "authenc-hmac-md5-"
17849 + "cbc-des3_ede-caam-qi2",
17850 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17851 + },
17852 + .setkey = aead_setkey,
17853 + .setauthsize = aead_setauthsize,
17854 + .encrypt = aead_encrypt,
17855 + .decrypt = aead_decrypt,
17856 + .ivsize = DES3_EDE_BLOCK_SIZE,
17857 + .maxauthsize = MD5_DIGEST_SIZE,
17858 + },
17859 + .caam = {
17860 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17861 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17862 + OP_ALG_AAI_HMAC_PRECOMP,
17863 + }
17864 + },
17865 + {
17866 + .aead = {
17867 + .base = {
17868 + .cra_name = "echainiv(authenc(hmac(md5),"
17869 + "cbc(des3_ede)))",
17870 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
17871 + "cbc-des3_ede-caam-qi2",
17872 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17873 + },
17874 + .setkey = aead_setkey,
17875 + .setauthsize = aead_setauthsize,
17876 + .encrypt = aead_encrypt,
17877 + .decrypt = aead_decrypt,
17878 + .ivsize = DES3_EDE_BLOCK_SIZE,
17879 + .maxauthsize = MD5_DIGEST_SIZE,
17880 + },
17881 + .caam = {
17882 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17883 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
17884 + OP_ALG_AAI_HMAC_PRECOMP,
17885 + .geniv = true,
17886 + }
17887 + },
17888 + {
17889 + .aead = {
17890 + .base = {
17891 + .cra_name = "authenc(hmac(sha1),"
17892 + "cbc(des3_ede))",
17893 + .cra_driver_name = "authenc-hmac-sha1-"
17894 + "cbc-des3_ede-caam-qi2",
17895 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17896 + },
17897 + .setkey = aead_setkey,
17898 + .setauthsize = aead_setauthsize,
17899 + .encrypt = aead_encrypt,
17900 + .decrypt = aead_decrypt,
17901 + .ivsize = DES3_EDE_BLOCK_SIZE,
17902 + .maxauthsize = SHA1_DIGEST_SIZE,
17903 + },
17904 + .caam = {
17905 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17906 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17907 + OP_ALG_AAI_HMAC_PRECOMP,
17908 + },
17909 + },
17910 + {
17911 + .aead = {
17912 + .base = {
17913 + .cra_name = "echainiv(authenc(hmac(sha1),"
17914 + "cbc(des3_ede)))",
17915 + .cra_driver_name = "echainiv-authenc-"
17916 + "hmac-sha1-"
17917 + "cbc-des3_ede-caam-qi2",
17918 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17919 + },
17920 + .setkey = aead_setkey,
17921 + .setauthsize = aead_setauthsize,
17922 + .encrypt = aead_encrypt,
17923 + .decrypt = aead_decrypt,
17924 + .ivsize = DES3_EDE_BLOCK_SIZE,
17925 + .maxauthsize = SHA1_DIGEST_SIZE,
17926 + },
17927 + .caam = {
17928 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17929 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
17930 + OP_ALG_AAI_HMAC_PRECOMP,
17931 + .geniv = true,
17932 + }
17933 + },
17934 + {
17935 + .aead = {
17936 + .base = {
17937 + .cra_name = "authenc(hmac(sha224),"
17938 + "cbc(des3_ede))",
17939 + .cra_driver_name = "authenc-hmac-sha224-"
17940 + "cbc-des3_ede-caam-qi2",
17941 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17942 + },
17943 + .setkey = aead_setkey,
17944 + .setauthsize = aead_setauthsize,
17945 + .encrypt = aead_encrypt,
17946 + .decrypt = aead_decrypt,
17947 + .ivsize = DES3_EDE_BLOCK_SIZE,
17948 + .maxauthsize = SHA224_DIGEST_SIZE,
17949 + },
17950 + .caam = {
17951 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17952 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17953 + OP_ALG_AAI_HMAC_PRECOMP,
17954 + },
17955 + },
17956 + {
17957 + .aead = {
17958 + .base = {
17959 + .cra_name = "echainiv(authenc(hmac(sha224),"
17960 + "cbc(des3_ede)))",
17961 + .cra_driver_name = "echainiv-authenc-"
17962 + "hmac-sha224-"
17963 + "cbc-des3_ede-caam-qi2",
17964 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17965 + },
17966 + .setkey = aead_setkey,
17967 + .setauthsize = aead_setauthsize,
17968 + .encrypt = aead_encrypt,
17969 + .decrypt = aead_decrypt,
17970 + .ivsize = DES3_EDE_BLOCK_SIZE,
17971 + .maxauthsize = SHA224_DIGEST_SIZE,
17972 + },
17973 + .caam = {
17974 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17975 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
17976 + OP_ALG_AAI_HMAC_PRECOMP,
17977 + .geniv = true,
17978 + }
17979 + },
17980 + {
17981 + .aead = {
17982 + .base = {
17983 + .cra_name = "authenc(hmac(sha256),"
17984 + "cbc(des3_ede))",
17985 + .cra_driver_name = "authenc-hmac-sha256-"
17986 + "cbc-des3_ede-caam-qi2",
17987 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
17988 + },
17989 + .setkey = aead_setkey,
17990 + .setauthsize = aead_setauthsize,
17991 + .encrypt = aead_encrypt,
17992 + .decrypt = aead_decrypt,
17993 + .ivsize = DES3_EDE_BLOCK_SIZE,
17994 + .maxauthsize = SHA256_DIGEST_SIZE,
17995 + },
17996 + .caam = {
17997 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
17998 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
17999 + OP_ALG_AAI_HMAC_PRECOMP,
18000 + },
18001 + },
18002 + {
18003 + .aead = {
18004 + .base = {
18005 + .cra_name = "echainiv(authenc(hmac(sha256),"
18006 + "cbc(des3_ede)))",
18007 + .cra_driver_name = "echainiv-authenc-"
18008 + "hmac-sha256-"
18009 + "cbc-des3_ede-caam-qi2",
18010 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18011 + },
18012 + .setkey = aead_setkey,
18013 + .setauthsize = aead_setauthsize,
18014 + .encrypt = aead_encrypt,
18015 + .decrypt = aead_decrypt,
18016 + .ivsize = DES3_EDE_BLOCK_SIZE,
18017 + .maxauthsize = SHA256_DIGEST_SIZE,
18018 + },
18019 + .caam = {
18020 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18021 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18022 + OP_ALG_AAI_HMAC_PRECOMP,
18023 + .geniv = true,
18024 + }
18025 + },
18026 + {
18027 + .aead = {
18028 + .base = {
18029 + .cra_name = "authenc(hmac(sha384),"
18030 + "cbc(des3_ede))",
18031 + .cra_driver_name = "authenc-hmac-sha384-"
18032 + "cbc-des3_ede-caam-qi2",
18033 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18034 + },
18035 + .setkey = aead_setkey,
18036 + .setauthsize = aead_setauthsize,
18037 + .encrypt = aead_encrypt,
18038 + .decrypt = aead_decrypt,
18039 + .ivsize = DES3_EDE_BLOCK_SIZE,
18040 + .maxauthsize = SHA384_DIGEST_SIZE,
18041 + },
18042 + .caam = {
18043 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18044 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18045 + OP_ALG_AAI_HMAC_PRECOMP,
18046 + },
18047 + },
18048 + {
18049 + .aead = {
18050 + .base = {
18051 + .cra_name = "echainiv(authenc(hmac(sha384),"
18052 + "cbc(des3_ede)))",
18053 + .cra_driver_name = "echainiv-authenc-"
18054 + "hmac-sha384-"
18055 + "cbc-des3_ede-caam-qi2",
18056 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18057 + },
18058 + .setkey = aead_setkey,
18059 + .setauthsize = aead_setauthsize,
18060 + .encrypt = aead_encrypt,
18061 + .decrypt = aead_decrypt,
18062 + .ivsize = DES3_EDE_BLOCK_SIZE,
18063 + .maxauthsize = SHA384_DIGEST_SIZE,
18064 + },
18065 + .caam = {
18066 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18067 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18068 + OP_ALG_AAI_HMAC_PRECOMP,
18069 + .geniv = true,
18070 + }
18071 + },
18072 + {
18073 + .aead = {
18074 + .base = {
18075 + .cra_name = "authenc(hmac(sha512),"
18076 + "cbc(des3_ede))",
18077 + .cra_driver_name = "authenc-hmac-sha512-"
18078 + "cbc-des3_ede-caam-qi2",
18079 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18080 + },
18081 + .setkey = aead_setkey,
18082 + .setauthsize = aead_setauthsize,
18083 + .encrypt = aead_encrypt,
18084 + .decrypt = aead_decrypt,
18085 + .ivsize = DES3_EDE_BLOCK_SIZE,
18086 + .maxauthsize = SHA512_DIGEST_SIZE,
18087 + },
18088 + .caam = {
18089 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18090 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18091 + OP_ALG_AAI_HMAC_PRECOMP,
18092 + },
18093 + },
18094 + {
18095 + .aead = {
18096 + .base = {
18097 + .cra_name = "echainiv(authenc(hmac(sha512),"
18098 + "cbc(des3_ede)))",
18099 + .cra_driver_name = "echainiv-authenc-"
18100 + "hmac-sha512-"
18101 + "cbc-des3_ede-caam-qi2",
18102 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
18103 + },
18104 + .setkey = aead_setkey,
18105 + .setauthsize = aead_setauthsize,
18106 + .encrypt = aead_encrypt,
18107 + .decrypt = aead_decrypt,
18108 + .ivsize = DES3_EDE_BLOCK_SIZE,
18109 + .maxauthsize = SHA512_DIGEST_SIZE,
18110 + },
18111 + .caam = {
18112 + .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
18113 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18114 + OP_ALG_AAI_HMAC_PRECOMP,
18115 + .geniv = true,
18116 + }
18117 + },
18118 + {
18119 + .aead = {
18120 + .base = {
18121 + .cra_name = "authenc(hmac(md5),cbc(des))",
18122 + .cra_driver_name = "authenc-hmac-md5-"
18123 + "cbc-des-caam-qi2",
18124 + .cra_blocksize = DES_BLOCK_SIZE,
18125 + },
18126 + .setkey = aead_setkey,
18127 + .setauthsize = aead_setauthsize,
18128 + .encrypt = aead_encrypt,
18129 + .decrypt = aead_decrypt,
18130 + .ivsize = DES_BLOCK_SIZE,
18131 + .maxauthsize = MD5_DIGEST_SIZE,
18132 + },
18133 + .caam = {
18134 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18135 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18136 + OP_ALG_AAI_HMAC_PRECOMP,
18137 + },
18138 + },
18139 + {
18140 + .aead = {
18141 + .base = {
18142 + .cra_name = "echainiv(authenc(hmac(md5),"
18143 + "cbc(des)))",
18144 + .cra_driver_name = "echainiv-authenc-hmac-md5-"
18145 + "cbc-des-caam-qi2",
18146 + .cra_blocksize = DES_BLOCK_SIZE,
18147 + },
18148 + .setkey = aead_setkey,
18149 + .setauthsize = aead_setauthsize,
18150 + .encrypt = aead_encrypt,
18151 + .decrypt = aead_decrypt,
18152 + .ivsize = DES_BLOCK_SIZE,
18153 + .maxauthsize = MD5_DIGEST_SIZE,
18154 + },
18155 + .caam = {
18156 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18157 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18158 + OP_ALG_AAI_HMAC_PRECOMP,
18159 + .geniv = true,
18160 + }
18161 + },
18162 + {
18163 + .aead = {
18164 + .base = {
18165 + .cra_name = "authenc(hmac(sha1),cbc(des))",
18166 + .cra_driver_name = "authenc-hmac-sha1-"
18167 + "cbc-des-caam-qi2",
18168 + .cra_blocksize = DES_BLOCK_SIZE,
18169 + },
18170 + .setkey = aead_setkey,
18171 + .setauthsize = aead_setauthsize,
18172 + .encrypt = aead_encrypt,
18173 + .decrypt = aead_decrypt,
18174 + .ivsize = DES_BLOCK_SIZE,
18175 + .maxauthsize = SHA1_DIGEST_SIZE,
18176 + },
18177 + .caam = {
18178 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18179 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18180 + OP_ALG_AAI_HMAC_PRECOMP,
18181 + },
18182 + },
18183 + {
18184 + .aead = {
18185 + .base = {
18186 + .cra_name = "echainiv(authenc(hmac(sha1),"
18187 + "cbc(des)))",
18188 + .cra_driver_name = "echainiv-authenc-"
18189 + "hmac-sha1-cbc-des-caam-qi2",
18190 + .cra_blocksize = DES_BLOCK_SIZE,
18191 + },
18192 + .setkey = aead_setkey,
18193 + .setauthsize = aead_setauthsize,
18194 + .encrypt = aead_encrypt,
18195 + .decrypt = aead_decrypt,
18196 + .ivsize = DES_BLOCK_SIZE,
18197 + .maxauthsize = SHA1_DIGEST_SIZE,
18198 + },
18199 + .caam = {
18200 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18201 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18202 + OP_ALG_AAI_HMAC_PRECOMP,
18203 + .geniv = true,
18204 + }
18205 + },
18206 + {
18207 + .aead = {
18208 + .base = {
18209 + .cra_name = "authenc(hmac(sha224),cbc(des))",
18210 + .cra_driver_name = "authenc-hmac-sha224-"
18211 + "cbc-des-caam-qi2",
18212 + .cra_blocksize = DES_BLOCK_SIZE,
18213 + },
18214 + .setkey = aead_setkey,
18215 + .setauthsize = aead_setauthsize,
18216 + .encrypt = aead_encrypt,
18217 + .decrypt = aead_decrypt,
18218 + .ivsize = DES_BLOCK_SIZE,
18219 + .maxauthsize = SHA224_DIGEST_SIZE,
18220 + },
18221 + .caam = {
18222 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18223 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18224 + OP_ALG_AAI_HMAC_PRECOMP,
18225 + },
18226 + },
18227 + {
18228 + .aead = {
18229 + .base = {
18230 + .cra_name = "echainiv(authenc(hmac(sha224),"
18231 + "cbc(des)))",
18232 + .cra_driver_name = "echainiv-authenc-"
18233 + "hmac-sha224-cbc-des-"
18234 + "caam-qi2",
18235 + .cra_blocksize = DES_BLOCK_SIZE,
18236 + },
18237 + .setkey = aead_setkey,
18238 + .setauthsize = aead_setauthsize,
18239 + .encrypt = aead_encrypt,
18240 + .decrypt = aead_decrypt,
18241 + .ivsize = DES_BLOCK_SIZE,
18242 + .maxauthsize = SHA224_DIGEST_SIZE,
18243 + },
18244 + .caam = {
18245 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18246 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18247 + OP_ALG_AAI_HMAC_PRECOMP,
18248 + .geniv = true,
18249 + }
18250 + },
18251 + {
18252 + .aead = {
18253 + .base = {
18254 + .cra_name = "authenc(hmac(sha256),cbc(des))",
18255 + .cra_driver_name = "authenc-hmac-sha256-"
18256 + "cbc-des-caam-qi2",
18257 + .cra_blocksize = DES_BLOCK_SIZE,
18258 + },
18259 + .setkey = aead_setkey,
18260 + .setauthsize = aead_setauthsize,
18261 + .encrypt = aead_encrypt,
18262 + .decrypt = aead_decrypt,
18263 + .ivsize = DES_BLOCK_SIZE,
18264 + .maxauthsize = SHA256_DIGEST_SIZE,
18265 + },
18266 + .caam = {
18267 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18268 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18269 + OP_ALG_AAI_HMAC_PRECOMP,
18270 + },
18271 + },
18272 + {
18273 + .aead = {
18274 + .base = {
18275 + .cra_name = "echainiv(authenc(hmac(sha256),"
18276 + "cbc(des)))",
18277 + .cra_driver_name = "echainiv-authenc-"
18278 + "hmac-sha256-cbc-desi-"
18279 + "caam-qi2",
18280 + .cra_blocksize = DES_BLOCK_SIZE,
18281 + },
18282 + .setkey = aead_setkey,
18283 + .setauthsize = aead_setauthsize,
18284 + .encrypt = aead_encrypt,
18285 + .decrypt = aead_decrypt,
18286 + .ivsize = DES_BLOCK_SIZE,
18287 + .maxauthsize = SHA256_DIGEST_SIZE,
18288 + },
18289 + .caam = {
18290 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18291 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18292 + OP_ALG_AAI_HMAC_PRECOMP,
18293 + .geniv = true,
18294 + },
18295 + },
18296 + {
18297 + .aead = {
18298 + .base = {
18299 + .cra_name = "authenc(hmac(sha384),cbc(des))",
18300 + .cra_driver_name = "authenc-hmac-sha384-"
18301 + "cbc-des-caam-qi2",
18302 + .cra_blocksize = DES_BLOCK_SIZE,
18303 + },
18304 + .setkey = aead_setkey,
18305 + .setauthsize = aead_setauthsize,
18306 + .encrypt = aead_encrypt,
18307 + .decrypt = aead_decrypt,
18308 + .ivsize = DES_BLOCK_SIZE,
18309 + .maxauthsize = SHA384_DIGEST_SIZE,
18310 + },
18311 + .caam = {
18312 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18313 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18314 + OP_ALG_AAI_HMAC_PRECOMP,
18315 + },
18316 + },
18317 + {
18318 + .aead = {
18319 + .base = {
18320 + .cra_name = "echainiv(authenc(hmac(sha384),"
18321 + "cbc(des)))",
18322 + .cra_driver_name = "echainiv-authenc-"
18323 + "hmac-sha384-cbc-des-"
18324 + "caam-qi2",
18325 + .cra_blocksize = DES_BLOCK_SIZE,
18326 + },
18327 + .setkey = aead_setkey,
18328 + .setauthsize = aead_setauthsize,
18329 + .encrypt = aead_encrypt,
18330 + .decrypt = aead_decrypt,
18331 + .ivsize = DES_BLOCK_SIZE,
18332 + .maxauthsize = SHA384_DIGEST_SIZE,
18333 + },
18334 + .caam = {
18335 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18336 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18337 + OP_ALG_AAI_HMAC_PRECOMP,
18338 + .geniv = true,
18339 + }
18340 + },
18341 + {
18342 + .aead = {
18343 + .base = {
18344 + .cra_name = "authenc(hmac(sha512),cbc(des))",
18345 + .cra_driver_name = "authenc-hmac-sha512-"
18346 + "cbc-des-caam-qi2",
18347 + .cra_blocksize = DES_BLOCK_SIZE,
18348 + },
18349 + .setkey = aead_setkey,
18350 + .setauthsize = aead_setauthsize,
18351 + .encrypt = aead_encrypt,
18352 + .decrypt = aead_decrypt,
18353 + .ivsize = DES_BLOCK_SIZE,
18354 + .maxauthsize = SHA512_DIGEST_SIZE,
18355 + },
18356 + .caam = {
18357 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18358 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18359 + OP_ALG_AAI_HMAC_PRECOMP,
18360 + }
18361 + },
18362 + {
18363 + .aead = {
18364 + .base = {
18365 + .cra_name = "echainiv(authenc(hmac(sha512),"
18366 + "cbc(des)))",
18367 + .cra_driver_name = "echainiv-authenc-"
18368 + "hmac-sha512-cbc-des-"
18369 + "caam-qi2",
18370 + .cra_blocksize = DES_BLOCK_SIZE,
18371 + },
18372 + .setkey = aead_setkey,
18373 + .setauthsize = aead_setauthsize,
18374 + .encrypt = aead_encrypt,
18375 + .decrypt = aead_decrypt,
18376 + .ivsize = DES_BLOCK_SIZE,
18377 + .maxauthsize = SHA512_DIGEST_SIZE,
18378 + },
18379 + .caam = {
18380 + .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
18381 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18382 + OP_ALG_AAI_HMAC_PRECOMP,
18383 + .geniv = true,
18384 + }
18385 + },
18386 + {
18387 + .aead = {
18388 + .base = {
18389 + .cra_name = "authenc(hmac(md5),"
18390 + "rfc3686(ctr(aes)))",
18391 + .cra_driver_name = "authenc-hmac-md5-"
18392 + "rfc3686-ctr-aes-caam-qi2",
18393 + .cra_blocksize = 1,
18394 + },
18395 + .setkey = aead_setkey,
18396 + .setauthsize = aead_setauthsize,
18397 + .encrypt = aead_encrypt,
18398 + .decrypt = aead_decrypt,
18399 + .ivsize = CTR_RFC3686_IV_SIZE,
18400 + .maxauthsize = MD5_DIGEST_SIZE,
18401 + },
18402 + .caam = {
18403 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18404 + OP_ALG_AAI_CTR_MOD128,
18405 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18406 + OP_ALG_AAI_HMAC_PRECOMP,
18407 + .rfc3686 = true,
18408 + },
18409 + },
18410 + {
18411 + .aead = {
18412 + .base = {
18413 + .cra_name = "seqiv(authenc("
18414 + "hmac(md5),rfc3686(ctr(aes))))",
18415 + .cra_driver_name = "seqiv-authenc-hmac-md5-"
18416 + "rfc3686-ctr-aes-caam-qi2",
18417 + .cra_blocksize = 1,
18418 + },
18419 + .setkey = aead_setkey,
18420 + .setauthsize = aead_setauthsize,
18421 + .encrypt = aead_encrypt,
18422 + .decrypt = aead_decrypt,
18423 + .ivsize = CTR_RFC3686_IV_SIZE,
18424 + .maxauthsize = MD5_DIGEST_SIZE,
18425 + },
18426 + .caam = {
18427 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18428 + OP_ALG_AAI_CTR_MOD128,
18429 + .class2_alg_type = OP_ALG_ALGSEL_MD5 |
18430 + OP_ALG_AAI_HMAC_PRECOMP,
18431 + .rfc3686 = true,
18432 + .geniv = true,
18433 + },
18434 + },
18435 + {
18436 + .aead = {
18437 + .base = {
18438 + .cra_name = "authenc(hmac(sha1),"
18439 + "rfc3686(ctr(aes)))",
18440 + .cra_driver_name = "authenc-hmac-sha1-"
18441 + "rfc3686-ctr-aes-caam-qi2",
18442 + .cra_blocksize = 1,
18443 + },
18444 + .setkey = aead_setkey,
18445 + .setauthsize = aead_setauthsize,
18446 + .encrypt = aead_encrypt,
18447 + .decrypt = aead_decrypt,
18448 + .ivsize = CTR_RFC3686_IV_SIZE,
18449 + .maxauthsize = SHA1_DIGEST_SIZE,
18450 + },
18451 + .caam = {
18452 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18453 + OP_ALG_AAI_CTR_MOD128,
18454 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18455 + OP_ALG_AAI_HMAC_PRECOMP,
18456 + .rfc3686 = true,
18457 + },
18458 + },
18459 + {
18460 + .aead = {
18461 + .base = {
18462 + .cra_name = "seqiv(authenc("
18463 + "hmac(sha1),rfc3686(ctr(aes))))",
18464 + .cra_driver_name = "seqiv-authenc-hmac-sha1-"
18465 + "rfc3686-ctr-aes-caam-qi2",
18466 + .cra_blocksize = 1,
18467 + },
18468 + .setkey = aead_setkey,
18469 + .setauthsize = aead_setauthsize,
18470 + .encrypt = aead_encrypt,
18471 + .decrypt = aead_decrypt,
18472 + .ivsize = CTR_RFC3686_IV_SIZE,
18473 + .maxauthsize = SHA1_DIGEST_SIZE,
18474 + },
18475 + .caam = {
18476 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18477 + OP_ALG_AAI_CTR_MOD128,
18478 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18479 + OP_ALG_AAI_HMAC_PRECOMP,
18480 + .rfc3686 = true,
18481 + .geniv = true,
18482 + },
18483 + },
18484 + {
18485 + .aead = {
18486 + .base = {
18487 + .cra_name = "authenc(hmac(sha224),"
18488 + "rfc3686(ctr(aes)))",
18489 + .cra_driver_name = "authenc-hmac-sha224-"
18490 + "rfc3686-ctr-aes-caam-qi2",
18491 + .cra_blocksize = 1,
18492 + },
18493 + .setkey = aead_setkey,
18494 + .setauthsize = aead_setauthsize,
18495 + .encrypt = aead_encrypt,
18496 + .decrypt = aead_decrypt,
18497 + .ivsize = CTR_RFC3686_IV_SIZE,
18498 + .maxauthsize = SHA224_DIGEST_SIZE,
18499 + },
18500 + .caam = {
18501 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18502 + OP_ALG_AAI_CTR_MOD128,
18503 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18504 + OP_ALG_AAI_HMAC_PRECOMP,
18505 + .rfc3686 = true,
18506 + },
18507 + },
18508 + {
18509 + .aead = {
18510 + .base = {
18511 + .cra_name = "seqiv(authenc("
18512 + "hmac(sha224),rfc3686(ctr(aes))))",
18513 + .cra_driver_name = "seqiv-authenc-hmac-sha224-"
18514 + "rfc3686-ctr-aes-caam-qi2",
18515 + .cra_blocksize = 1,
18516 + },
18517 + .setkey = aead_setkey,
18518 + .setauthsize = aead_setauthsize,
18519 + .encrypt = aead_encrypt,
18520 + .decrypt = aead_decrypt,
18521 + .ivsize = CTR_RFC3686_IV_SIZE,
18522 + .maxauthsize = SHA224_DIGEST_SIZE,
18523 + },
18524 + .caam = {
18525 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18526 + OP_ALG_AAI_CTR_MOD128,
18527 + .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
18528 + OP_ALG_AAI_HMAC_PRECOMP,
18529 + .rfc3686 = true,
18530 + .geniv = true,
18531 + },
18532 + },
18533 + {
18534 + .aead = {
18535 + .base = {
18536 + .cra_name = "authenc(hmac(sha256),"
18537 + "rfc3686(ctr(aes)))",
18538 + .cra_driver_name = "authenc-hmac-sha256-"
18539 + "rfc3686-ctr-aes-caam-qi2",
18540 + .cra_blocksize = 1,
18541 + },
18542 + .setkey = aead_setkey,
18543 + .setauthsize = aead_setauthsize,
18544 + .encrypt = aead_encrypt,
18545 + .decrypt = aead_decrypt,
18546 + .ivsize = CTR_RFC3686_IV_SIZE,
18547 + .maxauthsize = SHA256_DIGEST_SIZE,
18548 + },
18549 + .caam = {
18550 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18551 + OP_ALG_AAI_CTR_MOD128,
18552 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18553 + OP_ALG_AAI_HMAC_PRECOMP,
18554 + .rfc3686 = true,
18555 + },
18556 + },
18557 + {
18558 + .aead = {
18559 + .base = {
18560 + .cra_name = "seqiv(authenc(hmac(sha256),"
18561 + "rfc3686(ctr(aes))))",
18562 + .cra_driver_name = "seqiv-authenc-hmac-sha256-"
18563 + "rfc3686-ctr-aes-caam-qi2",
18564 + .cra_blocksize = 1,
18565 + },
18566 + .setkey = aead_setkey,
18567 + .setauthsize = aead_setauthsize,
18568 + .encrypt = aead_encrypt,
18569 + .decrypt = aead_decrypt,
18570 + .ivsize = CTR_RFC3686_IV_SIZE,
18571 + .maxauthsize = SHA256_DIGEST_SIZE,
18572 + },
18573 + .caam = {
18574 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18575 + OP_ALG_AAI_CTR_MOD128,
18576 + .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
18577 + OP_ALG_AAI_HMAC_PRECOMP,
18578 + .rfc3686 = true,
18579 + .geniv = true,
18580 + },
18581 + },
18582 + {
18583 + .aead = {
18584 + .base = {
18585 + .cra_name = "authenc(hmac(sha384),"
18586 + "rfc3686(ctr(aes)))",
18587 + .cra_driver_name = "authenc-hmac-sha384-"
18588 + "rfc3686-ctr-aes-caam-qi2",
18589 + .cra_blocksize = 1,
18590 + },
18591 + .setkey = aead_setkey,
18592 + .setauthsize = aead_setauthsize,
18593 + .encrypt = aead_encrypt,
18594 + .decrypt = aead_decrypt,
18595 + .ivsize = CTR_RFC3686_IV_SIZE,
18596 + .maxauthsize = SHA384_DIGEST_SIZE,
18597 + },
18598 + .caam = {
18599 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18600 + OP_ALG_AAI_CTR_MOD128,
18601 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18602 + OP_ALG_AAI_HMAC_PRECOMP,
18603 + .rfc3686 = true,
18604 + },
18605 + },
18606 + {
18607 + .aead = {
18608 + .base = {
18609 + .cra_name = "seqiv(authenc(hmac(sha384),"
18610 + "rfc3686(ctr(aes))))",
18611 + .cra_driver_name = "seqiv-authenc-hmac-sha384-"
18612 + "rfc3686-ctr-aes-caam-qi2",
18613 + .cra_blocksize = 1,
18614 + },
18615 + .setkey = aead_setkey,
18616 + .setauthsize = aead_setauthsize,
18617 + .encrypt = aead_encrypt,
18618 + .decrypt = aead_decrypt,
18619 + .ivsize = CTR_RFC3686_IV_SIZE,
18620 + .maxauthsize = SHA384_DIGEST_SIZE,
18621 + },
18622 + .caam = {
18623 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18624 + OP_ALG_AAI_CTR_MOD128,
18625 + .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
18626 + OP_ALG_AAI_HMAC_PRECOMP,
18627 + .rfc3686 = true,
18628 + .geniv = true,
18629 + },
18630 + },
18631 + {
18632 + .aead = {
18633 + .base = {
18634 + .cra_name = "authenc(hmac(sha512),"
18635 + "rfc3686(ctr(aes)))",
18636 + .cra_driver_name = "authenc-hmac-sha512-"
18637 + "rfc3686-ctr-aes-caam-qi2",
18638 + .cra_blocksize = 1,
18639 + },
18640 + .setkey = aead_setkey,
18641 + .setauthsize = aead_setauthsize,
18642 + .encrypt = aead_encrypt,
18643 + .decrypt = aead_decrypt,
18644 + .ivsize = CTR_RFC3686_IV_SIZE,
18645 + .maxauthsize = SHA512_DIGEST_SIZE,
18646 + },
18647 + .caam = {
18648 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18649 + OP_ALG_AAI_CTR_MOD128,
18650 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18651 + OP_ALG_AAI_HMAC_PRECOMP,
18652 + .rfc3686 = true,
18653 + },
18654 + },
18655 + {
18656 + .aead = {
18657 + .base = {
18658 + .cra_name = "seqiv(authenc(hmac(sha512),"
18659 + "rfc3686(ctr(aes))))",
18660 + .cra_driver_name = "seqiv-authenc-hmac-sha512-"
18661 + "rfc3686-ctr-aes-caam-qi2",
18662 + .cra_blocksize = 1,
18663 + },
18664 + .setkey = aead_setkey,
18665 + .setauthsize = aead_setauthsize,
18666 + .encrypt = aead_encrypt,
18667 + .decrypt = aead_decrypt,
18668 + .ivsize = CTR_RFC3686_IV_SIZE,
18669 + .maxauthsize = SHA512_DIGEST_SIZE,
18670 + },
18671 + .caam = {
18672 + .class1_alg_type = OP_ALG_ALGSEL_AES |
18673 + OP_ALG_AAI_CTR_MOD128,
18674 + .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
18675 + OP_ALG_AAI_HMAC_PRECOMP,
18676 + .rfc3686 = true,
18677 + .geniv = true,
18678 + },
18679 + },
18680 + {
18681 + .aead = {
18682 + .base = {
18683 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
18684 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi2",
18685 + .cra_blocksize = AES_BLOCK_SIZE,
18686 + },
18687 + .setkey = tls_setkey,
18688 + .setauthsize = tls_setauthsize,
18689 + .encrypt = tls_encrypt,
18690 + .decrypt = tls_decrypt,
18691 + .ivsize = AES_BLOCK_SIZE,
18692 + .maxauthsize = SHA1_DIGEST_SIZE,
18693 + },
18694 + .caam = {
18695 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
18696 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
18697 + OP_ALG_AAI_HMAC_PRECOMP,
18698 + },
18699 + },
18700 +};
18701 +
18702 +static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
18703 + *template)
18704 +{
18705 + struct caam_crypto_alg *t_alg;
18706 + struct crypto_alg *alg;
18707 +
18708 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
18709 + if (!t_alg)
18710 + return ERR_PTR(-ENOMEM);
18711 +
18712 + alg = &t_alg->crypto_alg;
18713 +
18714 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
18715 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
18716 + template->driver_name);
18717 + alg->cra_module = THIS_MODULE;
18718 + alg->cra_exit = caam_cra_exit;
18719 + alg->cra_priority = CAAM_CRA_PRIORITY;
18720 + alg->cra_blocksize = template->blocksize;
18721 + alg->cra_alignmask = 0;
18722 + alg->cra_ctxsize = sizeof(struct caam_ctx);
18723 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
18724 + template->type;
18725 + switch (template->type) {
18726 + case CRYPTO_ALG_TYPE_GIVCIPHER:
18727 + alg->cra_init = caam_cra_init_ablkcipher;
18728 + alg->cra_type = &crypto_givcipher_type;
18729 + alg->cra_ablkcipher = template->template_ablkcipher;
18730 + break;
18731 + case CRYPTO_ALG_TYPE_ABLKCIPHER:
18732 + alg->cra_init = caam_cra_init_ablkcipher;
18733 + alg->cra_type = &crypto_ablkcipher_type;
18734 + alg->cra_ablkcipher = template->template_ablkcipher;
18735 + break;
18736 + }
18737 +
18738 + t_alg->caam.class1_alg_type = template->class1_alg_type;
18739 + t_alg->caam.class2_alg_type = template->class2_alg_type;
18740 +
18741 + return t_alg;
18742 +}
18743 +
18744 +static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
18745 +{
18746 + struct aead_alg *alg = &t_alg->aead;
18747 +
18748 + alg->base.cra_module = THIS_MODULE;
18749 + alg->base.cra_priority = CAAM_CRA_PRIORITY;
18750 + alg->base.cra_ctxsize = sizeof(struct caam_ctx);
18751 + alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
18752 +
18753 + alg->init = caam_cra_init_aead;
18754 + alg->exit = caam_cra_exit_aead;
18755 +}
18756 +
18757 +/* max hash key is max split key size */
18758 +#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
18759 +
18760 +#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
18761 +#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
18762 +
18763 +#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
18764 + CAAM_MAX_HASH_KEY_SIZE)
18765 +#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
18766 +
18767 +/* caam context sizes for hashes: running digest + 8 */
18768 +#define HASH_MSG_LEN 8
18769 +#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
18770 +
18771 +enum hash_optype {
18772 + UPDATE = 0,
18773 + UPDATE_FIRST,
18774 + FINALIZE,
18775 + DIGEST,
18776 + HASH_NUM_OP
18777 +};
18778 +
18779 +/**
18780 + * caam_hash_ctx - ahash per-session context
18781 + * @flc: Flow Contexts array
18782 + * @flc_dma: I/O virtual addresses of the Flow Contexts
18783 + * @key: virtual address of the authentication key
18784 + * @dev: dpseci device
18785 + * @ctx_len: size of Context Register
18786 + * @adata: hashing algorithm details
18787 + */
18788 +struct caam_hash_ctx {
18789 + struct caam_flc flc[HASH_NUM_OP];
18790 + dma_addr_t flc_dma[HASH_NUM_OP];
18791 + u8 key[CAAM_MAX_HASH_KEY_SIZE];
18792 + struct device *dev;
18793 + int ctx_len;
18794 + struct alginfo adata;
18795 +};
18796 +
18797 +/* ahash state */
18798 +struct caam_hash_state {
18799 + struct caam_request caam_req;
18800 + dma_addr_t buf_dma;
18801 + dma_addr_t ctx_dma;
18802 + u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
18803 + int buflen_0;
18804 + u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
18805 + int buflen_1;
18806 + u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
18807 + int (*update)(struct ahash_request *req);
18808 + int (*final)(struct ahash_request *req);
18809 + int (*finup)(struct ahash_request *req);
18810 + int current_buf;
18811 +};
18812 +
18813 +struct caam_export_state {
18814 + u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
18815 + u8 caam_ctx[MAX_CTX_LEN];
18816 + int buflen;
18817 + int (*update)(struct ahash_request *req);
18818 + int (*final)(struct ahash_request *req);
18819 + int (*finup)(struct ahash_request *req);
18820 +};
18821 +
18822 +static inline void switch_buf(struct caam_hash_state *state)
18823 +{
18824 + state->current_buf ^= 1;
18825 +}
18826 +
18827 +static inline u8 *current_buf(struct caam_hash_state *state)
18828 +{
18829 + return state->current_buf ? state->buf_1 : state->buf_0;
18830 +}
18831 +
18832 +static inline u8 *alt_buf(struct caam_hash_state *state)
18833 +{
18834 + return state->current_buf ? state->buf_0 : state->buf_1;
18835 +}
18836 +
18837 +static inline int *current_buflen(struct caam_hash_state *state)
18838 +{
18839 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
18840 +}
18841 +
18842 +static inline int *alt_buflen(struct caam_hash_state *state)
18843 +{
18844 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
18845 +}
18846 +
18847 +/* Map current buffer in state (if length > 0) and put it in link table */
18848 +static inline int buf_map_to_qm_sg(struct device *dev,
18849 + struct dpaa2_sg_entry *qm_sg,
18850 + struct caam_hash_state *state)
18851 +{
18852 + int buflen = *current_buflen(state);
18853 +
18854 + if (!buflen)
18855 + return 0;
18856 +
18857 + state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
18858 + DMA_TO_DEVICE);
18859 + if (dma_mapping_error(dev, state->buf_dma)) {
18860 + dev_err(dev, "unable to map buf\n");
18861 + state->buf_dma = 0;
18862 + return -ENOMEM;
18863 + }
18864 +
18865 + dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
18866 +
18867 + return 0;
18868 +}
18869 +
18870 +/* Map state->caam_ctx, and add it to link table */
18871 +static inline int ctx_map_to_qm_sg(struct device *dev,
18872 + struct caam_hash_state *state, int ctx_len,
18873 + struct dpaa2_sg_entry *qm_sg, u32 flag)
18874 +{
18875 + state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
18876 + if (dma_mapping_error(dev, state->ctx_dma)) {
18877 + dev_err(dev, "unable to map ctx\n");
18878 + state->ctx_dma = 0;
18879 + return -ENOMEM;
18880 + }
18881 +
18882 + dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
18883 +
18884 + return 0;
18885 +}
18886 +
18887 +static int ahash_set_sh_desc(struct crypto_ahash *ahash)
18888 +{
18889 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
18890 + int digestsize = crypto_ahash_digestsize(ahash);
18891 + struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
18892 + struct caam_flc *flc;
18893 + u32 *desc;
18894 +
18895 + ctx->adata.key_virt = ctx->key;
18896 + ctx->adata.key_inline = true;
18897 +
18898 + /* ahash_update shared descriptor */
18899 + flc = &ctx->flc[UPDATE];
18900 + desc = flc->sh_desc;
18901 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
18902 + ctx->ctx_len, true, priv->sec_attr.era);
18903 + flc->flc[1] = desc_len(desc); /* SDL */
18904 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
18905 + desc_bytes(desc), DMA_BIDIRECTIONAL);
18906 +#ifdef DEBUG
18907 + print_hex_dump(KERN_ERR,
18908 + "ahash update shdesc@" __stringify(__LINE__)": ",
18909 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18910 +#endif
18911 +
18912 + /* ahash_update_first shared descriptor */
18913 + flc = &ctx->flc[UPDATE_FIRST];
18914 + desc = flc->sh_desc;
18915 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
18916 + ctx->ctx_len, false, priv->sec_attr.era);
18917 + flc->flc[1] = desc_len(desc); /* SDL */
18918 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
18919 + desc_bytes(desc), DMA_BIDIRECTIONAL);
18920 +#ifdef DEBUG
18921 + print_hex_dump(KERN_ERR,
18922 + "ahash update first shdesc@" __stringify(__LINE__)": ",
18923 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18924 +#endif
18925 +
18926 + /* ahash_final shared descriptor */
18927 + flc = &ctx->flc[FINALIZE];
18928 + desc = flc->sh_desc;
18929 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
18930 + ctx->ctx_len, true, priv->sec_attr.era);
18931 + flc->flc[1] = desc_len(desc); /* SDL */
18932 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
18933 + desc_bytes(desc), DMA_BIDIRECTIONAL);
18934 +#ifdef DEBUG
18935 + print_hex_dump(KERN_ERR,
18936 + "ahash final shdesc@" __stringify(__LINE__)": ",
18937 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18938 +#endif
18939 +
18940 + /* ahash_digest shared descriptor */
18941 + flc = &ctx->flc[DIGEST];
18942 + desc = flc->sh_desc;
18943 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
18944 + ctx->ctx_len, false, priv->sec_attr.era);
18945 + flc->flc[1] = desc_len(desc); /* SDL */
18946 + dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
18947 + desc_bytes(desc), DMA_BIDIRECTIONAL);
18948 +#ifdef DEBUG
18949 + print_hex_dump(KERN_ERR,
18950 + "ahash digest shdesc@" __stringify(__LINE__)": ",
18951 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
18952 +#endif
18953 +
18954 + return 0;
18955 +}
18956 +
18957 +/* Digest hash size if it is too large */
18958 +static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
18959 + u32 *keylen, u8 *key_out, u32 digestsize)
18960 +{
18961 + struct caam_request *req_ctx;
18962 + u32 *desc;
18963 + struct split_key_sh_result result;
18964 + dma_addr_t src_dma, dst_dma;
18965 + struct caam_flc *flc;
18966 + dma_addr_t flc_dma;
18967 + int ret = -ENOMEM;
18968 + struct dpaa2_fl_entry *in_fle, *out_fle;
18969 +
18970 + req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
18971 + if (!req_ctx)
18972 + return -ENOMEM;
18973 +
18974 + in_fle = &req_ctx->fd_flt[1];
18975 + out_fle = &req_ctx->fd_flt[0];
18976 +
18977 + flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
18978 + if (!flc)
18979 + goto err_flc;
18980 +
18981 + src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
18982 + DMA_TO_DEVICE);
18983 + if (dma_mapping_error(ctx->dev, src_dma)) {
18984 + dev_err(ctx->dev, "unable to map key input memory\n");
18985 + goto err_src_dma;
18986 + }
18987 + dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
18988 + DMA_FROM_DEVICE);
18989 + if (dma_mapping_error(ctx->dev, dst_dma)) {
18990 + dev_err(ctx->dev, "unable to map key output memory\n");
18991 + goto err_dst_dma;
18992 + }
18993 +
18994 + desc = flc->sh_desc;
18995 +
18996 + init_sh_desc(desc, 0);
18997 +
18998 + /* descriptor to perform unkeyed hash on key_in */
18999 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
19000 + OP_ALG_AS_INITFINAL);
19001 + append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
19002 + FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
19003 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
19004 + LDST_SRCDST_BYTE_CONTEXT);
19005 +
19006 + flc->flc[1] = desc_len(desc); /* SDL */
19007 + flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
19008 + desc_bytes(desc), DMA_TO_DEVICE);
19009 + if (dma_mapping_error(ctx->dev, flc_dma)) {
19010 + dev_err(ctx->dev, "unable to map shared descriptor\n");
19011 + goto err_flc_dma;
19012 + }
19013 +
19014 + dpaa2_fl_set_final(in_fle, true);
19015 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19016 + dpaa2_fl_set_addr(in_fle, src_dma);
19017 + dpaa2_fl_set_len(in_fle, *keylen);
19018 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19019 + dpaa2_fl_set_addr(out_fle, dst_dma);
19020 + dpaa2_fl_set_len(out_fle, digestsize);
19021 +
19022 +#ifdef DEBUG
19023 + print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
19024 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
19025 + print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
19026 + DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
19027 +#endif
19028 +
19029 + result.err = 0;
19030 + init_completion(&result.completion);
19031 + result.dev = ctx->dev;
19032 +
19033 + req_ctx->flc = flc;
19034 + req_ctx->flc_dma = flc_dma;
19035 + req_ctx->cbk = split_key_sh_done;
19036 + req_ctx->ctx = &result;
19037 +
19038 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19039 + if (ret == -EINPROGRESS) {
19040 + /* in progress */
19041 + wait_for_completion(&result.completion);
19042 + ret = result.err;
19043 +#ifdef DEBUG
19044 + print_hex_dump(KERN_ERR,
19045 + "digested key@" __stringify(__LINE__)": ",
19046 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
19047 + 1);
19048 +#endif
19049 + }
19050 +
19051 + dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
19052 + DMA_TO_DEVICE);
19053 +err_flc_dma:
19054 + dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
19055 +err_dst_dma:
19056 + dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
19057 +err_src_dma:
19058 + kfree(flc);
19059 +err_flc:
19060 + kfree(req_ctx);
19061 +
19062 + *keylen = digestsize;
19063 +
19064 + return ret;
19065 +}
19066 +
19067 +static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
19068 + unsigned int keylen)
19069 +{
19070 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19071 + unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
19072 + unsigned int digestsize = crypto_ahash_digestsize(ahash);
19073 + int ret;
19074 + u8 *hashed_key = NULL;
19075 +
19076 +#ifdef DEBUG
19077 + dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
19078 +#endif
19079 +
19080 + if (keylen > blocksize) {
19081 + hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
19082 + GFP_KERNEL | GFP_DMA);
19083 + if (!hashed_key)
19084 + return -ENOMEM;
19085 + ret = hash_digest_key(ctx, key, &keylen, hashed_key,
19086 + digestsize);
19087 + if (ret)
19088 + goto bad_free_key;
19089 + key = hashed_key;
19090 + }
19091 +
19092 + ctx->adata.keylen = keylen;
19093 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
19094 + OP_ALG_ALGSEL_MASK);
19095 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
19096 + goto bad_free_key;
19097 +
19098 + memcpy(ctx->key, key, keylen);
19099 +
19100 + kfree(hashed_key);
19101 + return ahash_set_sh_desc(ahash);
19102 +bad_free_key:
19103 + kfree(hashed_key);
19104 + crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
19105 + return -EINVAL;
19106 +}
19107 +
19108 +static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
19109 + struct ahash_request *req, int dst_len)
19110 +{
19111 + struct caam_hash_state *state = ahash_request_ctx(req);
19112 +
19113 + if (edesc->src_nents)
19114 + dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
19115 + if (edesc->dst_dma)
19116 + dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
19117 +
19118 + if (edesc->qm_sg_bytes)
19119 + dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
19120 + DMA_TO_DEVICE);
19121 +
19122 + if (state->buf_dma) {
19123 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
19124 + DMA_TO_DEVICE);
19125 + state->buf_dma = 0;
19126 + }
19127 +}
19128 +
19129 +static inline void ahash_unmap_ctx(struct device *dev,
19130 + struct ahash_edesc *edesc,
19131 + struct ahash_request *req, int dst_len,
19132 + u32 flag)
19133 +{
19134 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19135 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19136 + struct caam_hash_state *state = ahash_request_ctx(req);
19137 +
19138 + if (state->ctx_dma) {
19139 + dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
19140 + state->ctx_dma = 0;
19141 + }
19142 + ahash_unmap(dev, edesc, req, dst_len);
19143 +}
19144 +
19145 +static void ahash_done(void *cbk_ctx, u32 status)
19146 +{
19147 + struct crypto_async_request *areq = cbk_ctx;
19148 + struct ahash_request *req = ahash_request_cast(areq);
19149 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19150 + struct caam_hash_state *state = ahash_request_ctx(req);
19151 + struct ahash_edesc *edesc = state->caam_req.edesc;
19152 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19153 + int digestsize = crypto_ahash_digestsize(ahash);
19154 + int ecode = 0;
19155 +
19156 +#ifdef DEBUG
19157 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19158 +#endif
19159 +
19160 + if (unlikely(status)) {
19161 + caam_qi2_strstatus(ctx->dev, status);
19162 + ecode = -EIO;
19163 + }
19164 +
19165 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19166 + qi_cache_free(edesc);
19167 +
19168 +#ifdef DEBUG
19169 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19170 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19171 + ctx->ctx_len, 1);
19172 + if (req->result)
19173 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19174 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19175 + digestsize, 1);
19176 +#endif
19177 +
19178 + req->base.complete(&req->base, ecode);
19179 +}
19180 +
19181 +static void ahash_done_bi(void *cbk_ctx, u32 status)
19182 +{
19183 + struct crypto_async_request *areq = cbk_ctx;
19184 + struct ahash_request *req = ahash_request_cast(areq);
19185 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19186 + struct caam_hash_state *state = ahash_request_ctx(req);
19187 + struct ahash_edesc *edesc = state->caam_req.edesc;
19188 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19189 + int ecode = 0;
19190 +#ifdef DEBUG
19191 + int digestsize = crypto_ahash_digestsize(ahash);
19192 +
19193 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19194 +#endif
19195 +
19196 + if (unlikely(status)) {
19197 + caam_qi2_strstatus(ctx->dev, status);
19198 + ecode = -EIO;
19199 + }
19200 +
19201 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
19202 + switch_buf(state);
19203 + qi_cache_free(edesc);
19204 +
19205 +#ifdef DEBUG
19206 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19207 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19208 + ctx->ctx_len, 1);
19209 + if (req->result)
19210 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19211 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19212 + digestsize, 1);
19213 +#endif
19214 +
19215 + req->base.complete(&req->base, ecode);
19216 +}
19217 +
19218 +static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
19219 +{
19220 + struct crypto_async_request *areq = cbk_ctx;
19221 + struct ahash_request *req = ahash_request_cast(areq);
19222 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19223 + struct caam_hash_state *state = ahash_request_ctx(req);
19224 + struct ahash_edesc *edesc = state->caam_req.edesc;
19225 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19226 + int digestsize = crypto_ahash_digestsize(ahash);
19227 + int ecode = 0;
19228 +
19229 +#ifdef DEBUG
19230 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19231 +#endif
19232 +
19233 + if (unlikely(status)) {
19234 + caam_qi2_strstatus(ctx->dev, status);
19235 + ecode = -EIO;
19236 + }
19237 +
19238 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
19239 + qi_cache_free(edesc);
19240 +
19241 +#ifdef DEBUG
19242 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19243 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19244 + ctx->ctx_len, 1);
19245 + if (req->result)
19246 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19247 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19248 + digestsize, 1);
19249 +#endif
19250 +
19251 + req->base.complete(&req->base, ecode);
19252 +}
19253 +
19254 +static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
19255 +{
19256 + struct crypto_async_request *areq = cbk_ctx;
19257 + struct ahash_request *req = ahash_request_cast(areq);
19258 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19259 + struct caam_hash_state *state = ahash_request_ctx(req);
19260 + struct ahash_edesc *edesc = state->caam_req.edesc;
19261 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19262 + int ecode = 0;
19263 +#ifdef DEBUG
19264 + int digestsize = crypto_ahash_digestsize(ahash);
19265 +
19266 + dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
19267 +#endif
19268 +
19269 + if (unlikely(status)) {
19270 + caam_qi2_strstatus(ctx->dev, status);
19271 + ecode = -EIO;
19272 + }
19273 +
19274 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
19275 + switch_buf(state);
19276 + qi_cache_free(edesc);
19277 +
19278 +#ifdef DEBUG
19279 + print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
19280 + DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
19281 + ctx->ctx_len, 1);
19282 + if (req->result)
19283 + print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
19284 + DUMP_PREFIX_ADDRESS, 16, 4, req->result,
19285 + digestsize, 1);
19286 +#endif
19287 +
19288 + req->base.complete(&req->base, ecode);
19289 +}
19290 +
19291 +static int ahash_update_ctx(struct ahash_request *req)
19292 +{
19293 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19294 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19295 + struct caam_hash_state *state = ahash_request_ctx(req);
19296 + struct caam_request *req_ctx = &state->caam_req;
19297 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19298 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19299 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19300 + GFP_KERNEL : GFP_ATOMIC;
19301 + u8 *buf = current_buf(state);
19302 + int *buflen = current_buflen(state);
19303 + u8 *next_buf = alt_buf(state);
19304 + int *next_buflen = alt_buflen(state), last_buflen;
19305 + int in_len = *buflen + req->nbytes, to_hash;
19306 + int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
19307 + struct ahash_edesc *edesc;
19308 + int ret = 0;
19309 +
19310 + last_buflen = *next_buflen;
19311 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
19312 + to_hash = in_len - *next_buflen;
19313 +
19314 + if (to_hash) {
19315 + struct dpaa2_sg_entry *sg_table;
19316 +
19317 + src_nents = sg_nents_for_len(req->src,
19318 + req->nbytes - (*next_buflen));
19319 + if (src_nents < 0) {
19320 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19321 + return src_nents;
19322 + }
19323 +
19324 + if (src_nents) {
19325 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19326 + DMA_TO_DEVICE);
19327 + if (!mapped_nents) {
19328 + dev_err(ctx->dev, "unable to DMA map source\n");
19329 + return -ENOMEM;
19330 + }
19331 + } else {
19332 + mapped_nents = 0;
19333 + }
19334 +
19335 + /* allocate space for base edesc and link tables */
19336 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19337 + if (!edesc) {
19338 + dma_unmap_sg(ctx->dev, req->src, src_nents,
19339 + DMA_TO_DEVICE);
19340 + return -ENOMEM;
19341 + }
19342 +
19343 + edesc->src_nents = src_nents;
19344 + qm_sg_src_index = 1 + (*buflen ? 1 : 0);
19345 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
19346 + sizeof(*sg_table);
19347 + sg_table = &edesc->sgt[0];
19348 +
19349 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19350 + DMA_BIDIRECTIONAL);
19351 + if (ret)
19352 + goto unmap_ctx;
19353 +
19354 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19355 + if (ret)
19356 + goto unmap_ctx;
19357 +
19358 + if (mapped_nents) {
19359 + sg_to_qm_sg_last(req->src, mapped_nents,
19360 + sg_table + qm_sg_src_index, 0);
19361 + if (*next_buflen)
19362 + scatterwalk_map_and_copy(next_buf, req->src,
19363 + to_hash - *buflen,
19364 + *next_buflen, 0);
19365 + } else {
19366 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
19367 + true);
19368 + }
19369 +
19370 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19371 + qm_sg_bytes, DMA_TO_DEVICE);
19372 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19373 + dev_err(ctx->dev, "unable to map S/G table\n");
19374 + ret = -ENOMEM;
19375 + goto unmap_ctx;
19376 + }
19377 + edesc->qm_sg_bytes = qm_sg_bytes;
19378 +
19379 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19380 + dpaa2_fl_set_final(in_fle, true);
19381 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19382 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19383 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
19384 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19385 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
19386 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
19387 +
19388 + req_ctx->flc = &ctx->flc[UPDATE];
19389 + req_ctx->flc_dma = ctx->flc_dma[UPDATE];
19390 + req_ctx->cbk = ahash_done_bi;
19391 + req_ctx->ctx = &req->base;
19392 + req_ctx->edesc = edesc;
19393 +
19394 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19395 + if (ret != -EINPROGRESS &&
19396 + !(ret == -EBUSY &&
19397 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19398 + goto unmap_ctx;
19399 + } else if (*next_buflen) {
19400 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
19401 + req->nbytes, 0);
19402 + *buflen = *next_buflen;
19403 + *next_buflen = last_buflen;
19404 + }
19405 +#ifdef DEBUG
19406 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
19407 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
19408 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
19409 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
19410 + *next_buflen, 1);
19411 +#endif
19412 +
19413 + return ret;
19414 +unmap_ctx:
19415 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
19416 + qi_cache_free(edesc);
19417 + return ret;
19418 +}
19419 +
19420 +static int ahash_final_ctx(struct ahash_request *req)
19421 +{
19422 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19423 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19424 + struct caam_hash_state *state = ahash_request_ctx(req);
19425 + struct caam_request *req_ctx = &state->caam_req;
19426 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19427 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19428 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19429 + GFP_KERNEL : GFP_ATOMIC;
19430 + int buflen = *current_buflen(state);
19431 + int qm_sg_bytes, qm_sg_src_index;
19432 + int digestsize = crypto_ahash_digestsize(ahash);
19433 + struct ahash_edesc *edesc;
19434 + struct dpaa2_sg_entry *sg_table;
19435 + int ret;
19436 +
19437 + /* allocate space for base edesc and link tables */
19438 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19439 + if (!edesc)
19440 + return -ENOMEM;
19441 +
19442 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
19443 + qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
19444 + sg_table = &edesc->sgt[0];
19445 +
19446 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19447 + DMA_TO_DEVICE);
19448 + if (ret)
19449 + goto unmap_ctx;
19450 +
19451 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19452 + if (ret)
19453 + goto unmap_ctx;
19454 +
19455 + dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
19456 +
19457 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19458 + DMA_TO_DEVICE);
19459 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19460 + dev_err(ctx->dev, "unable to map S/G table\n");
19461 + ret = -ENOMEM;
19462 + goto unmap_ctx;
19463 + }
19464 + edesc->qm_sg_bytes = qm_sg_bytes;
19465 +
19466 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19467 + DMA_FROM_DEVICE);
19468 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19469 + dev_err(ctx->dev, "unable to map dst\n");
19470 + edesc->dst_dma = 0;
19471 + ret = -ENOMEM;
19472 + goto unmap_ctx;
19473 + }
19474 +
19475 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19476 + dpaa2_fl_set_final(in_fle, true);
19477 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19478 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19479 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
19480 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19481 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19482 + dpaa2_fl_set_len(out_fle, digestsize);
19483 +
19484 + req_ctx->flc = &ctx->flc[FINALIZE];
19485 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
19486 + req_ctx->cbk = ahash_done_ctx_src;
19487 + req_ctx->ctx = &req->base;
19488 + req_ctx->edesc = edesc;
19489 +
19490 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19491 + if (ret == -EINPROGRESS ||
19492 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19493 + return ret;
19494 +
19495 +unmap_ctx:
19496 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
19497 + qi_cache_free(edesc);
19498 + return ret;
19499 +}
19500 +
19501 +static int ahash_finup_ctx(struct ahash_request *req)
19502 +{
19503 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19504 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19505 + struct caam_hash_state *state = ahash_request_ctx(req);
19506 + struct caam_request *req_ctx = &state->caam_req;
19507 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19508 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19509 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19510 + GFP_KERNEL : GFP_ATOMIC;
19511 + int buflen = *current_buflen(state);
19512 + int qm_sg_bytes, qm_sg_src_index;
19513 + int src_nents, mapped_nents;
19514 + int digestsize = crypto_ahash_digestsize(ahash);
19515 + struct ahash_edesc *edesc;
19516 + struct dpaa2_sg_entry *sg_table;
19517 + int ret;
19518 +
19519 + src_nents = sg_nents_for_len(req->src, req->nbytes);
19520 + if (src_nents < 0) {
19521 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19522 + return src_nents;
19523 + }
19524 +
19525 + if (src_nents) {
19526 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19527 + DMA_TO_DEVICE);
19528 + if (!mapped_nents) {
19529 + dev_err(ctx->dev, "unable to DMA map source\n");
19530 + return -ENOMEM;
19531 + }
19532 + } else {
19533 + mapped_nents = 0;
19534 + }
19535 +
19536 + /* allocate space for base edesc and link tables */
19537 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19538 + if (!edesc) {
19539 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19540 + return -ENOMEM;
19541 + }
19542 +
19543 + edesc->src_nents = src_nents;
19544 + qm_sg_src_index = 1 + (buflen ? 1 : 0);
19545 + qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
19546 + sg_table = &edesc->sgt[0];
19547 +
19548 + ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
19549 + DMA_TO_DEVICE);
19550 + if (ret)
19551 + goto unmap_ctx;
19552 +
19553 + ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
19554 + if (ret)
19555 + goto unmap_ctx;
19556 +
19557 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
19558 +
19559 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19560 + DMA_TO_DEVICE);
19561 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19562 + dev_err(ctx->dev, "unable to map S/G table\n");
19563 + ret = -ENOMEM;
19564 + goto unmap_ctx;
19565 + }
19566 + edesc->qm_sg_bytes = qm_sg_bytes;
19567 +
19568 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19569 + DMA_FROM_DEVICE);
19570 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19571 + dev_err(ctx->dev, "unable to map dst\n");
19572 + edesc->dst_dma = 0;
19573 + ret = -ENOMEM;
19574 + goto unmap_ctx;
19575 + }
19576 +
19577 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19578 + dpaa2_fl_set_final(in_fle, true);
19579 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19580 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19581 + dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
19582 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19583 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19584 + dpaa2_fl_set_len(out_fle, digestsize);
19585 +
19586 + req_ctx->flc = &ctx->flc[FINALIZE];
19587 + req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
19588 + req_ctx->cbk = ahash_done_ctx_src;
19589 + req_ctx->ctx = &req->base;
19590 + req_ctx->edesc = edesc;
19591 +
19592 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19593 + if (ret == -EINPROGRESS ||
19594 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19595 + return ret;
19596 +
19597 +unmap_ctx:
19598 + ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
19599 + qi_cache_free(edesc);
19600 + return ret;
19601 +}
19602 +
19603 +static int ahash_digest(struct ahash_request *req)
19604 +{
19605 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19606 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19607 + struct caam_hash_state *state = ahash_request_ctx(req);
19608 + struct caam_request *req_ctx = &state->caam_req;
19609 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19610 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19611 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19612 + GFP_KERNEL : GFP_ATOMIC;
19613 + int digestsize = crypto_ahash_digestsize(ahash);
19614 + int src_nents, mapped_nents;
19615 + struct ahash_edesc *edesc;
19616 + int ret = -ENOMEM;
19617 +
19618 + state->buf_dma = 0;
19619 +
19620 + src_nents = sg_nents_for_len(req->src, req->nbytes);
19621 + if (src_nents < 0) {
19622 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19623 + return src_nents;
19624 + }
19625 +
19626 + if (src_nents) {
19627 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19628 + DMA_TO_DEVICE);
19629 + if (!mapped_nents) {
19630 + dev_err(ctx->dev, "unable to map source for DMA\n");
19631 + return ret;
19632 + }
19633 + } else {
19634 + mapped_nents = 0;
19635 + }
19636 +
19637 + /* allocate space for base edesc and link tables */
19638 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19639 + if (!edesc) {
19640 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19641 + return ret;
19642 + }
19643 +
19644 + edesc->src_nents = src_nents;
19645 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19646 +
19647 + if (mapped_nents > 1) {
19648 + int qm_sg_bytes;
19649 + struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
19650 +
19651 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
19652 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
19653 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19654 + qm_sg_bytes, DMA_TO_DEVICE);
19655 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19656 + dev_err(ctx->dev, "unable to map S/G table\n");
19657 + goto unmap;
19658 + }
19659 + edesc->qm_sg_bytes = qm_sg_bytes;
19660 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19661 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19662 + } else {
19663 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19664 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
19665 + }
19666 +
19667 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19668 + DMA_FROM_DEVICE);
19669 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19670 + dev_err(ctx->dev, "unable to map dst\n");
19671 + edesc->dst_dma = 0;
19672 + goto unmap;
19673 + }
19674 +
19675 + dpaa2_fl_set_final(in_fle, true);
19676 + dpaa2_fl_set_len(in_fle, req->nbytes);
19677 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19678 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19679 + dpaa2_fl_set_len(out_fle, digestsize);
19680 +
19681 + req_ctx->flc = &ctx->flc[DIGEST];
19682 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19683 + req_ctx->cbk = ahash_done;
19684 + req_ctx->ctx = &req->base;
19685 + req_ctx->edesc = edesc;
19686 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19687 + if (ret == -EINPROGRESS ||
19688 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19689 + return ret;
19690 +
19691 +unmap:
19692 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19693 + qi_cache_free(edesc);
19694 + return ret;
19695 +}
19696 +
19697 +static int ahash_final_no_ctx(struct ahash_request *req)
19698 +{
19699 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19700 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19701 + struct caam_hash_state *state = ahash_request_ctx(req);
19702 + struct caam_request *req_ctx = &state->caam_req;
19703 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19704 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19705 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19706 + GFP_KERNEL : GFP_ATOMIC;
19707 + u8 *buf = current_buf(state);
19708 + int buflen = *current_buflen(state);
19709 + int digestsize = crypto_ahash_digestsize(ahash);
19710 + struct ahash_edesc *edesc;
19711 + int ret = -ENOMEM;
19712 +
19713 + /* allocate space for base edesc and link tables */
19714 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19715 + if (!edesc)
19716 + return ret;
19717 +
19718 + state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
19719 + if (dma_mapping_error(ctx->dev, state->buf_dma)) {
19720 + dev_err(ctx->dev, "unable to map src\n");
19721 + goto unmap;
19722 + }
19723 +
19724 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19725 + DMA_FROM_DEVICE);
19726 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19727 + dev_err(ctx->dev, "unable to map dst\n");
19728 + edesc->dst_dma = 0;
19729 + goto unmap;
19730 + }
19731 +
19732 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19733 + dpaa2_fl_set_final(in_fle, true);
19734 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
19735 + dpaa2_fl_set_addr(in_fle, state->buf_dma);
19736 + dpaa2_fl_set_len(in_fle, buflen);
19737 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19738 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19739 + dpaa2_fl_set_len(out_fle, digestsize);
19740 +
19741 + req_ctx->flc = &ctx->flc[DIGEST];
19742 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19743 + req_ctx->cbk = ahash_done;
19744 + req_ctx->ctx = &req->base;
19745 + req_ctx->edesc = edesc;
19746 +
19747 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19748 + if (ret == -EINPROGRESS ||
19749 + (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19750 + return ret;
19751 +
19752 +unmap:
19753 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19754 + qi_cache_free(edesc);
19755 + return ret;
19756 +}
19757 +
19758 +static int ahash_update_no_ctx(struct ahash_request *req)
19759 +{
19760 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19761 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19762 + struct caam_hash_state *state = ahash_request_ctx(req);
19763 + struct caam_request *req_ctx = &state->caam_req;
19764 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19765 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19766 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19767 + GFP_KERNEL : GFP_ATOMIC;
19768 + u8 *buf = current_buf(state);
19769 + int *buflen = current_buflen(state);
19770 + u8 *next_buf = alt_buf(state);
19771 + int *next_buflen = alt_buflen(state);
19772 + int in_len = *buflen + req->nbytes, to_hash;
19773 + int qm_sg_bytes, src_nents, mapped_nents;
19774 + struct ahash_edesc *edesc;
19775 + int ret = 0;
19776 +
19777 + *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
19778 + to_hash = in_len - *next_buflen;
19779 +
19780 + if (to_hash) {
19781 + struct dpaa2_sg_entry *sg_table;
19782 +
19783 + src_nents = sg_nents_for_len(req->src,
19784 + req->nbytes - *next_buflen);
19785 + if (src_nents < 0) {
19786 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19787 + return src_nents;
19788 + }
19789 +
19790 + if (src_nents) {
19791 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19792 + DMA_TO_DEVICE);
19793 + if (!mapped_nents) {
19794 + dev_err(ctx->dev, "unable to DMA map source\n");
19795 + return -ENOMEM;
19796 + }
19797 + } else {
19798 + mapped_nents = 0;
19799 + }
19800 +
19801 + /* allocate space for base edesc and link tables */
19802 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19803 + if (!edesc) {
19804 + dma_unmap_sg(ctx->dev, req->src, src_nents,
19805 + DMA_TO_DEVICE);
19806 + return -ENOMEM;
19807 + }
19808 +
19809 + edesc->src_nents = src_nents;
19810 + qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
19811 + sg_table = &edesc->sgt[0];
19812 +
19813 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
19814 + if (ret)
19815 + goto unmap_ctx;
19816 +
19817 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
19818 +
19819 + if (*next_buflen)
19820 + scatterwalk_map_and_copy(next_buf, req->src,
19821 + to_hash - *buflen,
19822 + *next_buflen, 0);
19823 +
19824 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
19825 + qm_sg_bytes, DMA_TO_DEVICE);
19826 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19827 + dev_err(ctx->dev, "unable to map S/G table\n");
19828 + ret = -ENOMEM;
19829 + goto unmap_ctx;
19830 + }
19831 + edesc->qm_sg_bytes = qm_sg_bytes;
19832 +
19833 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
19834 + ctx->ctx_len, DMA_FROM_DEVICE);
19835 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
19836 + dev_err(ctx->dev, "unable to map ctx\n");
19837 + state->ctx_dma = 0;
19838 + ret = -ENOMEM;
19839 + goto unmap_ctx;
19840 + }
19841 +
19842 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19843 + dpaa2_fl_set_final(in_fle, true);
19844 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19845 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19846 + dpaa2_fl_set_len(in_fle, to_hash);
19847 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19848 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
19849 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
19850 +
19851 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
19852 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
19853 + req_ctx->cbk = ahash_done_ctx_dst;
19854 + req_ctx->ctx = &req->base;
19855 + req_ctx->edesc = edesc;
19856 +
19857 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19858 + if (ret != -EINPROGRESS &&
19859 + !(ret == -EBUSY &&
19860 + req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19861 + goto unmap_ctx;
19862 +
19863 + state->update = ahash_update_ctx;
19864 + state->finup = ahash_finup_ctx;
19865 + state->final = ahash_final_ctx;
19866 + } else if (*next_buflen) {
19867 + scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
19868 + req->nbytes, 0);
19869 + *buflen = *next_buflen;
19870 + *next_buflen = 0;
19871 + }
19872 +#ifdef DEBUG
19873 + print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
19874 + DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
19875 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
19876 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
19877 + *next_buflen, 1);
19878 +#endif
19879 +
19880 + return ret;
19881 +unmap_ctx:
19882 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
19883 + qi_cache_free(edesc);
19884 + return ret;
19885 +}
19886 +
19887 +static int ahash_finup_no_ctx(struct ahash_request *req)
19888 +{
19889 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19890 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19891 + struct caam_hash_state *state = ahash_request_ctx(req);
19892 + struct caam_request *req_ctx = &state->caam_req;
19893 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19894 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19895 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19896 + GFP_KERNEL : GFP_ATOMIC;
19897 + int buflen = *current_buflen(state);
19898 + int qm_sg_bytes, src_nents, mapped_nents;
19899 + int digestsize = crypto_ahash_digestsize(ahash);
19900 + struct ahash_edesc *edesc;
19901 + struct dpaa2_sg_entry *sg_table;
19902 + int ret;
19903 +
19904 + src_nents = sg_nents_for_len(req->src, req->nbytes);
19905 + if (src_nents < 0) {
19906 + dev_err(ctx->dev, "Invalid number of src SG.\n");
19907 + return src_nents;
19908 + }
19909 +
19910 + if (src_nents) {
19911 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
19912 + DMA_TO_DEVICE);
19913 + if (!mapped_nents) {
19914 + dev_err(ctx->dev, "unable to DMA map source\n");
19915 + return -ENOMEM;
19916 + }
19917 + } else {
19918 + mapped_nents = 0;
19919 + }
19920 +
19921 + /* allocate space for base edesc and link tables */
19922 + edesc = qi_cache_zalloc(GFP_DMA | flags);
19923 + if (!edesc) {
19924 + dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
19925 + return -ENOMEM;
19926 + }
19927 +
19928 + edesc->src_nents = src_nents;
19929 + qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
19930 + sg_table = &edesc->sgt[0];
19931 +
19932 + ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
19933 + if (ret)
19934 + goto unmap;
19935 +
19936 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
19937 +
19938 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
19939 + DMA_TO_DEVICE);
19940 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
19941 + dev_err(ctx->dev, "unable to map S/G table\n");
19942 + ret = -ENOMEM;
19943 + goto unmap;
19944 + }
19945 + edesc->qm_sg_bytes = qm_sg_bytes;
19946 +
19947 + edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
19948 + DMA_FROM_DEVICE);
19949 + if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
19950 + dev_err(ctx->dev, "unable to map dst\n");
19951 + edesc->dst_dma = 0;
19952 + ret = -ENOMEM;
19953 + goto unmap;
19954 + }
19955 +
19956 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
19957 + dpaa2_fl_set_final(in_fle, true);
19958 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
19959 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
19960 + dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
19961 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
19962 + dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
19963 + dpaa2_fl_set_len(out_fle, digestsize);
19964 +
19965 + req_ctx->flc = &ctx->flc[DIGEST];
19966 + req_ctx->flc_dma = ctx->flc_dma[DIGEST];
19967 + req_ctx->cbk = ahash_done;
19968 + req_ctx->ctx = &req->base;
19969 + req_ctx->edesc = edesc;
19970 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
19971 + if (ret != -EINPROGRESS &&
19972 + !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
19973 + goto unmap;
19974 +
19975 + return ret;
19976 +unmap:
19977 + ahash_unmap(ctx->dev, edesc, req, digestsize);
19978 + qi_cache_free(edesc);
19979 + return -ENOMEM;
19980 +}
19981 +
19982 +static int ahash_update_first(struct ahash_request *req)
19983 +{
19984 + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
19985 + struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
19986 + struct caam_hash_state *state = ahash_request_ctx(req);
19987 + struct caam_request *req_ctx = &state->caam_req;
19988 + struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
19989 + struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
19990 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
19991 + GFP_KERNEL : GFP_ATOMIC;
19992 + u8 *next_buf = alt_buf(state);
19993 + int *next_buflen = alt_buflen(state);
19994 + int to_hash;
19995 + int src_nents, mapped_nents;
19996 + struct ahash_edesc *edesc;
19997 + int ret = 0;
19998 +
19999 + *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
20000 + 1);
20001 + to_hash = req->nbytes - *next_buflen;
20002 +
20003 + if (to_hash) {
20004 + struct dpaa2_sg_entry *sg_table;
20005 +
20006 + src_nents = sg_nents_for_len(req->src,
20007 + req->nbytes - (*next_buflen));
20008 + if (src_nents < 0) {
20009 + dev_err(ctx->dev, "Invalid number of src SG.\n");
20010 + return src_nents;
20011 + }
20012 +
20013 + if (src_nents) {
20014 + mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
20015 + DMA_TO_DEVICE);
20016 + if (!mapped_nents) {
20017 + dev_err(ctx->dev, "unable to map source for DMA\n");
20018 + return -ENOMEM;
20019 + }
20020 + } else {
20021 + mapped_nents = 0;
20022 + }
20023 +
20024 + /* allocate space for base edesc and link tables */
20025 + edesc = qi_cache_zalloc(GFP_DMA | flags);
20026 + if (!edesc) {
20027 + dma_unmap_sg(ctx->dev, req->src, src_nents,
20028 + DMA_TO_DEVICE);
20029 + return -ENOMEM;
20030 + }
20031 +
20032 + edesc->src_nents = src_nents;
20033 + sg_table = &edesc->sgt[0];
20034 +
20035 + memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
20036 + dpaa2_fl_set_final(in_fle, true);
20037 + dpaa2_fl_set_len(in_fle, to_hash);
20038 +
20039 + if (mapped_nents > 1) {
20040 + int qm_sg_bytes;
20041 +
20042 + sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
20043 + qm_sg_bytes = mapped_nents * sizeof(*sg_table);
20044 + edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
20045 + qm_sg_bytes,
20046 + DMA_TO_DEVICE);
20047 + if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
20048 + dev_err(ctx->dev, "unable to map S/G table\n");
20049 + ret = -ENOMEM;
20050 + goto unmap_ctx;
20051 + }
20052 + edesc->qm_sg_bytes = qm_sg_bytes;
20053 + dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
20054 + dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
20055 + } else {
20056 + dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
20057 + dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
20058 + }
20059 +
20060 + if (*next_buflen)
20061 + scatterwalk_map_and_copy(next_buf, req->src, to_hash,
20062 + *next_buflen, 0);
20063 +
20064 + state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
20065 + ctx->ctx_len, DMA_FROM_DEVICE);
20066 + if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
20067 + dev_err(ctx->dev, "unable to map ctx\n");
20068 + state->ctx_dma = 0;
20069 + ret = -ENOMEM;
20070 + goto unmap_ctx;
20071 + }
20072 +
20073 + dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
20074 + dpaa2_fl_set_addr(out_fle, state->ctx_dma);
20075 + dpaa2_fl_set_len(out_fle, ctx->ctx_len);
20076 +
20077 + req_ctx->flc = &ctx->flc[UPDATE_FIRST];
20078 + req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
20079 + req_ctx->cbk = ahash_done_ctx_dst;
20080 + req_ctx->ctx = &req->base;
20081 + req_ctx->edesc = edesc;
20082 +
20083 + ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
20084 + if (ret != -EINPROGRESS &&
20085 + !(ret == -EBUSY && req->base.flags &
20086 + CRYPTO_TFM_REQ_MAY_BACKLOG))
20087 + goto unmap_ctx;
20088 +
20089 + state->update = ahash_update_ctx;
20090 + state->finup = ahash_finup_ctx;
20091 + state->final = ahash_final_ctx;
20092 + } else if (*next_buflen) {
20093 + state->update = ahash_update_no_ctx;
20094 + state->finup = ahash_finup_no_ctx;
20095 + state->final = ahash_final_no_ctx;
20096 + scatterwalk_map_and_copy(next_buf, req->src, 0,
20097 + req->nbytes, 0);
20098 + switch_buf(state);
20099 + }
20100 +#ifdef DEBUG
20101 + print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
20102 + DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
20103 +#endif
20104 +
20105 + return ret;
20106 +unmap_ctx:
20107 + ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
20108 + qi_cache_free(edesc);
20109 + return ret;
20110 +}
20111 +
20112 +static int ahash_finup_first(struct ahash_request *req)
20113 +{
20114 + return ahash_digest(req);
20115 +}
20116 +
20117 +static int ahash_init(struct ahash_request *req)
20118 +{
20119 + struct caam_hash_state *state = ahash_request_ctx(req);
20120 +
20121 + state->update = ahash_update_first;
20122 + state->finup = ahash_finup_first;
20123 + state->final = ahash_final_no_ctx;
20124 +
20125 + state->ctx_dma = 0;
20126 + state->current_buf = 0;
20127 + state->buf_dma = 0;
20128 + state->buflen_0 = 0;
20129 + state->buflen_1 = 0;
20130 +
20131 + return 0;
20132 +}
20133 +
20134 +static int ahash_update(struct ahash_request *req)
20135 +{
20136 + struct caam_hash_state *state = ahash_request_ctx(req);
20137 +
20138 + return state->update(req);
20139 +}
20140 +
20141 +static int ahash_finup(struct ahash_request *req)
20142 +{
20143 + struct caam_hash_state *state = ahash_request_ctx(req);
20144 +
20145 + return state->finup(req);
20146 +}
20147 +
20148 +static int ahash_final(struct ahash_request *req)
20149 +{
20150 + struct caam_hash_state *state = ahash_request_ctx(req);
20151 +
20152 + return state->final(req);
20153 +}
20154 +
20155 +static int ahash_export(struct ahash_request *req, void *out)
20156 +{
20157 + struct caam_hash_state *state = ahash_request_ctx(req);
20158 + struct caam_export_state *export = out;
20159 + int len;
20160 + u8 *buf;
20161 +
20162 + if (state->current_buf) {
20163 + buf = state->buf_1;
20164 + len = state->buflen_1;
20165 + } else {
20166 + buf = state->buf_0;
20167 + len = state->buflen_0;
20168 + }
20169 +
20170 + memcpy(export->buf, buf, len);
20171 + memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
20172 + export->buflen = len;
20173 + export->update = state->update;
20174 + export->final = state->final;
20175 + export->finup = state->finup;
20176 +
20177 + return 0;
20178 +}
20179 +
20180 +static int ahash_import(struct ahash_request *req, const void *in)
20181 +{
20182 + struct caam_hash_state *state = ahash_request_ctx(req);
20183 + const struct caam_export_state *export = in;
20184 +
20185 + memset(state, 0, sizeof(*state));
20186 + memcpy(state->buf_0, export->buf, export->buflen);
20187 + memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
20188 + state->buflen_0 = export->buflen;
20189 + state->update = export->update;
20190 + state->final = export->final;
20191 + state->finup = export->finup;
20192 +
20193 + return 0;
20194 +}
20195 +
20196 +struct caam_hash_template {
20197 + char name[CRYPTO_MAX_ALG_NAME];
20198 + char driver_name[CRYPTO_MAX_ALG_NAME];
20199 + char hmac_name[CRYPTO_MAX_ALG_NAME];
20200 + char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
20201 + unsigned int blocksize;
20202 + struct ahash_alg template_ahash;
20203 + u32 alg_type;
20204 +};
20205 +
20206 +/* ahash descriptors */
20207 +static struct caam_hash_template driver_hash[] = {
20208 + {
20209 + .name = "sha1",
20210 + .driver_name = "sha1-caam-qi2",
20211 + .hmac_name = "hmac(sha1)",
20212 + .hmac_driver_name = "hmac-sha1-caam-qi2",
20213 + .blocksize = SHA1_BLOCK_SIZE,
20214 + .template_ahash = {
20215 + .init = ahash_init,
20216 + .update = ahash_update,
20217 + .final = ahash_final,
20218 + .finup = ahash_finup,
20219 + .digest = ahash_digest,
20220 + .export = ahash_export,
20221 + .import = ahash_import,
20222 + .setkey = ahash_setkey,
20223 + .halg = {
20224 + .digestsize = SHA1_DIGEST_SIZE,
20225 + .statesize = sizeof(struct caam_export_state),
20226 + },
20227 + },
20228 + .alg_type = OP_ALG_ALGSEL_SHA1,
20229 + }, {
20230 + .name = "sha224",
20231 + .driver_name = "sha224-caam-qi2",
20232 + .hmac_name = "hmac(sha224)",
20233 + .hmac_driver_name = "hmac-sha224-caam-qi2",
20234 + .blocksize = SHA224_BLOCK_SIZE,
20235 + .template_ahash = {
20236 + .init = ahash_init,
20237 + .update = ahash_update,
20238 + .final = ahash_final,
20239 + .finup = ahash_finup,
20240 + .digest = ahash_digest,
20241 + .export = ahash_export,
20242 + .import = ahash_import,
20243 + .setkey = ahash_setkey,
20244 + .halg = {
20245 + .digestsize = SHA224_DIGEST_SIZE,
20246 + .statesize = sizeof(struct caam_export_state),
20247 + },
20248 + },
20249 + .alg_type = OP_ALG_ALGSEL_SHA224,
20250 + }, {
20251 + .name = "sha256",
20252 + .driver_name = "sha256-caam-qi2",
20253 + .hmac_name = "hmac(sha256)",
20254 + .hmac_driver_name = "hmac-sha256-caam-qi2",
20255 + .blocksize = SHA256_BLOCK_SIZE,
20256 + .template_ahash = {
20257 + .init = ahash_init,
20258 + .update = ahash_update,
20259 + .final = ahash_final,
20260 + .finup = ahash_finup,
20261 + .digest = ahash_digest,
20262 + .export = ahash_export,
20263 + .import = ahash_import,
20264 + .setkey = ahash_setkey,
20265 + .halg = {
20266 + .digestsize = SHA256_DIGEST_SIZE,
20267 + .statesize = sizeof(struct caam_export_state),
20268 + },
20269 + },
20270 + .alg_type = OP_ALG_ALGSEL_SHA256,
20271 + }, {
20272 + .name = "sha384",
20273 + .driver_name = "sha384-caam-qi2",
20274 + .hmac_name = "hmac(sha384)",
20275 + .hmac_driver_name = "hmac-sha384-caam-qi2",
20276 + .blocksize = SHA384_BLOCK_SIZE,
20277 + .template_ahash = {
20278 + .init = ahash_init,
20279 + .update = ahash_update,
20280 + .final = ahash_final,
20281 + .finup = ahash_finup,
20282 + .digest = ahash_digest,
20283 + .export = ahash_export,
20284 + .import = ahash_import,
20285 + .setkey = ahash_setkey,
20286 + .halg = {
20287 + .digestsize = SHA384_DIGEST_SIZE,
20288 + .statesize = sizeof(struct caam_export_state),
20289 + },
20290 + },
20291 + .alg_type = OP_ALG_ALGSEL_SHA384,
20292 + }, {
20293 + .name = "sha512",
20294 + .driver_name = "sha512-caam-qi2",
20295 + .hmac_name = "hmac(sha512)",
20296 + .hmac_driver_name = "hmac-sha512-caam-qi2",
20297 + .blocksize = SHA512_BLOCK_SIZE,
20298 + .template_ahash = {
20299 + .init = ahash_init,
20300 + .update = ahash_update,
20301 + .final = ahash_final,
20302 + .finup = ahash_finup,
20303 + .digest = ahash_digest,
20304 + .export = ahash_export,
20305 + .import = ahash_import,
20306 + .setkey = ahash_setkey,
20307 + .halg = {
20308 + .digestsize = SHA512_DIGEST_SIZE,
20309 + .statesize = sizeof(struct caam_export_state),
20310 + },
20311 + },
20312 + .alg_type = OP_ALG_ALGSEL_SHA512,
20313 + }, {
20314 + .name = "md5",
20315 + .driver_name = "md5-caam-qi2",
20316 + .hmac_name = "hmac(md5)",
20317 + .hmac_driver_name = "hmac-md5-caam-qi2",
20318 + .blocksize = MD5_BLOCK_WORDS * 4,
20319 + .template_ahash = {
20320 + .init = ahash_init,
20321 + .update = ahash_update,
20322 + .final = ahash_final,
20323 + .finup = ahash_finup,
20324 + .digest = ahash_digest,
20325 + .export = ahash_export,
20326 + .import = ahash_import,
20327 + .setkey = ahash_setkey,
20328 + .halg = {
20329 + .digestsize = MD5_DIGEST_SIZE,
20330 + .statesize = sizeof(struct caam_export_state),
20331 + },
20332 + },
20333 + .alg_type = OP_ALG_ALGSEL_MD5,
20334 + }
20335 +};
20336 +
20337 +struct caam_hash_alg {
20338 + struct list_head entry;
20339 + struct device *dev;
20340 + int alg_type;
20341 + struct ahash_alg ahash_alg;
20342 +};
20343 +
20344 +static int caam_hash_cra_init(struct crypto_tfm *tfm)
20345 +{
20346 + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
20347 + struct crypto_alg *base = tfm->__crt_alg;
20348 + struct hash_alg_common *halg =
20349 + container_of(base, struct hash_alg_common, base);
20350 + struct ahash_alg *alg =
20351 + container_of(halg, struct ahash_alg, halg);
20352 + struct caam_hash_alg *caam_hash =
20353 + container_of(alg, struct caam_hash_alg, ahash_alg);
20354 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20355 + /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
20356 + static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
20357 + HASH_MSG_LEN + SHA1_DIGEST_SIZE,
20358 + HASH_MSG_LEN + 32,
20359 + HASH_MSG_LEN + SHA256_DIGEST_SIZE,
20360 + HASH_MSG_LEN + 64,
20361 + HASH_MSG_LEN + SHA512_DIGEST_SIZE };
20362 + dma_addr_t dma_addr;
20363 + int i;
20364 +
20365 + ctx->dev = caam_hash->dev;
20366 +
20367 + dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
20368 + DMA_BIDIRECTIONAL,
20369 + DMA_ATTR_SKIP_CPU_SYNC);
20370 + if (dma_mapping_error(ctx->dev, dma_addr)) {
20371 + dev_err(ctx->dev, "unable to map shared descriptors\n");
20372 + return -ENOMEM;
20373 + }
20374 +
20375 + for (i = 0; i < HASH_NUM_OP; i++)
20376 + ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
20377 +
20378 + /* copy descriptor header template value */
20379 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
20380 +
20381 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
20382 + OP_ALG_ALGSEL_SUBMASK) >>
20383 + OP_ALG_ALGSEL_SHIFT];
20384 +
20385 + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
20386 + sizeof(struct caam_hash_state));
20387 +
20388 + return ahash_set_sh_desc(ahash);
20389 +}
20390 +
20391 +static void caam_hash_cra_exit(struct crypto_tfm *tfm)
20392 +{
20393 + struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
20394 +
20395 + dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
20396 + DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
20397 +}
20398 +
20399 +static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
20400 + struct caam_hash_template *template, bool keyed)
20401 +{
20402 + struct caam_hash_alg *t_alg;
20403 + struct ahash_alg *halg;
20404 + struct crypto_alg *alg;
20405 +
20406 + t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
20407 + if (!t_alg)
20408 + return ERR_PTR(-ENOMEM);
20409 +
20410 + t_alg->ahash_alg = template->template_ahash;
20411 + halg = &t_alg->ahash_alg;
20412 + alg = &halg->halg.base;
20413 +
20414 + if (keyed) {
20415 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
20416 + template->hmac_name);
20417 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
20418 + template->hmac_driver_name);
20419 + } else {
20420 + snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
20421 + template->name);
20422 + snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
20423 + template->driver_name);
20424 + }
20425 + alg->cra_module = THIS_MODULE;
20426 + alg->cra_init = caam_hash_cra_init;
20427 + alg->cra_exit = caam_hash_cra_exit;
20428 + alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
20429 + alg->cra_priority = CAAM_CRA_PRIORITY;
20430 + alg->cra_blocksize = template->blocksize;
20431 + alg->cra_alignmask = 0;
20432 + alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
20433 + alg->cra_type = &crypto_ahash_type;
20434 +
20435 + t_alg->alg_type = template->alg_type;
20436 + t_alg->dev = dev;
20437 +
20438 + return t_alg;
20439 +}
20440 +
20441 +static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
20442 +{
20443 + struct dpaa2_caam_priv_per_cpu *ppriv;
20444 +
20445 + ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
20446 + napi_schedule_irqoff(&ppriv->napi);
20447 +}
20448 +
20449 +static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
20450 +{
20451 + struct device *dev = priv->dev;
20452 + struct dpaa2_io_notification_ctx *nctx;
20453 + struct dpaa2_caam_priv_per_cpu *ppriv;
20454 + int err, i = 0, cpu;
20455 +
20456 + for_each_online_cpu(cpu) {
20457 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20458 + ppriv->priv = priv;
20459 + nctx = &ppriv->nctx;
20460 + nctx->is_cdan = 0;
20461 + nctx->id = ppriv->rsp_fqid;
20462 + nctx->desired_cpu = cpu;
20463 + nctx->cb = dpaa2_caam_fqdan_cb;
20464 +
20465 + /* Register notification callbacks */
20466 + err = dpaa2_io_service_register(NULL, nctx);
20467 + if (unlikely(err)) {
20468 + dev_err(dev, "notification register failed\n");
20469 + nctx->cb = NULL;
20470 + goto err;
20471 + }
20472 +
20473 + ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
20474 + dev);
20475 + if (unlikely(!ppriv->store)) {
20476 + dev_err(dev, "dpaa2_io_store_create() failed\n");
20477 + goto err;
20478 + }
20479 +
20480 + if (++i == priv->num_pairs)
20481 + break;
20482 + }
20483 +
20484 + return 0;
20485 +
20486 +err:
20487 + for_each_online_cpu(cpu) {
20488 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20489 + if (!ppriv->nctx.cb)
20490 + break;
20491 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
20492 + }
20493 +
20494 + for_each_online_cpu(cpu) {
20495 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20496 + if (!ppriv->store)
20497 + break;
20498 + dpaa2_io_store_destroy(ppriv->store);
20499 + }
20500 +
20501 + return err;
20502 +}
20503 +
20504 +static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
20505 +{
20506 + struct dpaa2_caam_priv_per_cpu *ppriv;
20507 + int i = 0, cpu;
20508 +
20509 + for_each_online_cpu(cpu) {
20510 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20511 + dpaa2_io_service_deregister(NULL, &ppriv->nctx);
20512 + dpaa2_io_store_destroy(ppriv->store);
20513 +
20514 + if (++i == priv->num_pairs)
20515 + return;
20516 + }
20517 +}
20518 +
20519 +static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
20520 +{
20521 + struct dpseci_rx_queue_cfg rx_queue_cfg;
20522 + struct device *dev = priv->dev;
20523 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20524 + struct dpaa2_caam_priv_per_cpu *ppriv;
20525 + int err = 0, i = 0, cpu;
20526 +
20527 + /* Configure Rx queues */
20528 + for_each_online_cpu(cpu) {
20529 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20530 +
20531 + rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
20532 + DPSECI_QUEUE_OPT_USER_CTX;
20533 + rx_queue_cfg.order_preservation_en = 0;
20534 + rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
20535 + rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
20536 + /*
20537 + * Rx priority (WQ) doesn't really matter, since we use
20538 + * pull mode, i.e. volatile dequeues from specific FQs
20539 + */
20540 + rx_queue_cfg.dest_cfg.priority = 0;
20541 + rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
20542 +
20543 + err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20544 + &rx_queue_cfg);
20545 + if (err) {
20546 + dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
20547 + err);
20548 + return err;
20549 + }
20550 +
20551 + if (++i == priv->num_pairs)
20552 + break;
20553 + }
20554 +
20555 + return err;
20556 +}
20557 +
20558 +static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
20559 +{
20560 + struct device *dev = priv->dev;
20561 +
20562 + if (!priv->cscn_mem)
20563 + return;
20564 +
20565 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20566 + kfree(priv->cscn_mem);
20567 +}
20568 +
20569 +static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
20570 +{
20571 + struct device *dev = priv->dev;
20572 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20573 +
20574 + dpaa2_dpseci_congestion_free(priv);
20575 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
20576 +}
20577 +
20578 +static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
20579 + const struct dpaa2_fd *fd)
20580 +{
20581 + struct caam_request *req;
20582 + u32 fd_err;
20583 +
20584 + if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
20585 + dev_err(priv->dev, "Only Frame List FD format is supported!\n");
20586 + return;
20587 + }
20588 +
20589 + fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
20590 + if (unlikely(fd_err))
20591 + dev_err(priv->dev, "FD error: %08x\n", fd_err);
20592 +
20593 + /*
20594 + * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
20595 + * in FD[ERR] or FD[FRC].
20596 + */
20597 + req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
20598 + dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
20599 + DMA_BIDIRECTIONAL);
20600 + req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
20601 +}
20602 +
20603 +static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
20604 +{
20605 + int err;
20606 +
20607 + /* Retry while portal is busy */
20608 + do {
20609 + err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
20610 + ppriv->store);
20611 + } while (err == -EBUSY);
20612 +
20613 + if (unlikely(err))
20614 + dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
20615 +
20616 + return err;
20617 +}
20618 +
20619 +static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
20620 +{
20621 + struct dpaa2_dq *dq;
20622 + int cleaned = 0, is_last;
20623 +
20624 + do {
20625 + dq = dpaa2_io_store_next(ppriv->store, &is_last);
20626 + if (unlikely(!dq)) {
20627 + if (unlikely(!is_last)) {
20628 + dev_dbg(ppriv->priv->dev,
20629 + "FQ %d returned no valid frames\n",
20630 + ppriv->rsp_fqid);
20631 + /*
20632 + * MUST retry until we get some sort of
20633 + * valid response token (be it "empty dequeue"
20634 + * or a valid frame).
20635 + */
20636 + continue;
20637 + }
20638 + break;
20639 + }
20640 +
20641 + /* Process FD */
20642 + dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
20643 + cleaned++;
20644 + } while (!is_last);
20645 +
20646 + return cleaned;
20647 +}
20648 +
20649 +static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
20650 +{
20651 + struct dpaa2_caam_priv_per_cpu *ppriv;
20652 + struct dpaa2_caam_priv *priv;
20653 + int err, cleaned = 0, store_cleaned;
20654 +
20655 + ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
20656 + priv = ppriv->priv;
20657 +
20658 + if (unlikely(dpaa2_caam_pull_fq(ppriv)))
20659 + return 0;
20660 +
20661 + do {
20662 + store_cleaned = dpaa2_caam_store_consume(ppriv);
20663 + cleaned += store_cleaned;
20664 +
20665 + if (store_cleaned == 0 ||
20666 + cleaned > budget - DPAA2_CAAM_STORE_SIZE)
20667 + break;
20668 +
20669 + /* Try to dequeue some more */
20670 + err = dpaa2_caam_pull_fq(ppriv);
20671 + if (unlikely(err))
20672 + break;
20673 + } while (1);
20674 +
20675 + if (cleaned < budget) {
20676 + napi_complete_done(napi, cleaned);
20677 + err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
20678 + if (unlikely(err))
20679 + dev_err(priv->dev, "Notification rearm failed: %d\n",
20680 + err);
20681 + }
20682 +
20683 + return cleaned;
20684 +}
20685 +
20686 +static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
20687 + u16 token)
20688 +{
20689 + struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
20690 + struct device *dev = priv->dev;
20691 + int err;
20692 +
20693 + /*
20694 + * Congestion group feature supported starting with DPSECI API v5.1
20695 + * and only when object has been created with this capability.
20696 + */
20697 + if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
20698 + !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
20699 + return 0;
20700 +
20701 + priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
20702 + GFP_KERNEL | GFP_DMA);
20703 + if (!priv->cscn_mem)
20704 + return -ENOMEM;
20705 +
20706 + priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
20707 + priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
20708 + DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20709 + if (dma_mapping_error(dev, priv->cscn_dma)) {
20710 + dev_err(dev, "Error mapping CSCN memory area\n");
20711 + err = -ENOMEM;
20712 + goto err_dma_map;
20713 + }
20714 +
20715 + cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
20716 + cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
20717 + cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
20718 + cong_notif_cfg.message_ctx = (u64)priv;
20719 + cong_notif_cfg.message_iova = priv->cscn_dma;
20720 + cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
20721 + DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
20722 + DPSECI_CGN_MODE_COHERENT_WRITE;
20723 +
20724 + err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
20725 + &cong_notif_cfg);
20726 + if (err) {
20727 + dev_err(dev, "dpseci_set_congestion_notification failed\n");
20728 + goto err_set_cong;
20729 + }
20730 +
20731 + return 0;
20732 +
20733 +err_set_cong:
20734 + dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
20735 +err_dma_map:
20736 + kfree(priv->cscn_mem);
20737 +
20738 + return err;
20739 +}
20740 +
20741 +static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
20742 +{
20743 + struct device *dev = &ls_dev->dev;
20744 + struct dpaa2_caam_priv *priv;
20745 + struct dpaa2_caam_priv_per_cpu *ppriv;
20746 + int err, cpu;
20747 + u8 i;
20748 +
20749 + priv = dev_get_drvdata(dev);
20750 +
20751 + priv->dev = dev;
20752 + priv->dpsec_id = ls_dev->obj_desc.id;
20753 +
20754 + /* Get a handle for the DPSECI this interface is associate with */
20755 + err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
20756 + if (err) {
20757 + dev_err(dev, "dpsec_open() failed: %d\n", err);
20758 + goto err_open;
20759 + }
20760 +
20761 + dev_info(dev, "Opened dpseci object successfully\n");
20762 +
20763 + err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
20764 + &priv->minor_ver);
20765 + if (err) {
20766 + dev_err(dev, "dpseci_get_api_version() failed\n");
20767 + goto err_get_vers;
20768 + }
20769 +
20770 + err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
20771 + &priv->dpseci_attr);
20772 + if (err) {
20773 + dev_err(dev, "dpseci_get_attributes() failed\n");
20774 + goto err_get_vers;
20775 + }
20776 +
20777 + err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
20778 + &priv->sec_attr);
20779 + if (err) {
20780 + dev_err(dev, "dpseci_get_sec_attr() failed\n");
20781 + goto err_get_vers;
20782 + }
20783 +
20784 + err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
20785 + if (err) {
20786 + dev_err(dev, "setup_congestion() failed\n");
20787 + goto err_get_vers;
20788 + }
20789 +
20790 + priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
20791 + priv->dpseci_attr.num_tx_queues);
20792 + if (priv->num_pairs > num_online_cpus()) {
20793 + dev_warn(dev, "%d queues won't be used\n",
20794 + priv->num_pairs - num_online_cpus());
20795 + priv->num_pairs = num_online_cpus();
20796 + }
20797 +
20798 + for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
20799 + err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20800 + &priv->rx_queue_attr[i]);
20801 + if (err) {
20802 + dev_err(dev, "dpseci_get_rx_queue() failed\n");
20803 + goto err_get_rx_queue;
20804 + }
20805 + }
20806 +
20807 + for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
20808 + err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
20809 + &priv->tx_queue_attr[i]);
20810 + if (err) {
20811 + dev_err(dev, "dpseci_get_tx_queue() failed\n");
20812 + goto err_get_rx_queue;
20813 + }
20814 + }
20815 +
20816 + i = 0;
20817 + for_each_online_cpu(cpu) {
20818 + dev_info(dev, "prio %d: rx queue %d, tx queue %d\n", i,
20819 + priv->rx_queue_attr[i].fqid,
20820 + priv->tx_queue_attr[i].fqid);
20821 +
20822 + ppriv = per_cpu_ptr(priv->ppriv, cpu);
20823 + ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
20824 + ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
20825 + ppriv->prio = i;
20826 +
20827 + ppriv->net_dev.dev = *dev;
20828 + INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
20829 + netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
20830 + DPAA2_CAAM_NAPI_WEIGHT);
20831 + if (++i == priv->num_pairs)
20832 + break;
20833 + }
20834 +
20835 + return 0;
20836 +
20837 +err_get_rx_queue:
20838 + dpaa2_dpseci_congestion_free(priv);
20839 +err_get_vers:
20840 + dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
20841 +err_open:
20842 + return err;
20843 +}
20844 +
20845 +static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
20846 +{
20847 + struct device *dev = priv->dev;
20848 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20849 + struct dpaa2_caam_priv_per_cpu *ppriv;
20850 + int err, i;
20851 +
20852 + for (i = 0; i < priv->num_pairs; i++) {
20853 + ppriv = per_cpu_ptr(priv->ppriv, i);
20854 + napi_enable(&ppriv->napi);
20855 + }
20856 +
20857 + err = dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
20858 + if (err) {
20859 + dev_err(dev, "dpseci_enable() failed\n");
20860 + return err;
20861 + }
20862 +
20863 + dev_info(dev, "DPSECI version %d.%d\n",
20864 + priv->major_ver,
20865 + priv->minor_ver);
20866 +
20867 + return 0;
20868 +}
20869 +
20870 +static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
20871 +{
20872 + struct device *dev = priv->dev;
20873 + struct dpaa2_caam_priv_per_cpu *ppriv;
20874 + struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
20875 + int i, err = 0, enabled;
20876 +
20877 + err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
20878 + if (err) {
20879 + dev_err(dev, "dpseci_disable() failed\n");
20880 + return err;
20881 + }
20882 +
20883 + err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
20884 + if (err) {
20885 + dev_err(dev, "dpseci_is_enabled() failed\n");
20886 + return err;
20887 + }
20888 +
20889 + dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
20890 +
20891 + for (i = 0; i < priv->num_pairs; i++) {
20892 + ppriv = per_cpu_ptr(priv->ppriv, i);
20893 + napi_disable(&ppriv->napi);
20894 + netif_napi_del(&ppriv->napi);
20895 + }
20896 +
20897 + return 0;
20898 +}
20899 +
20900 +static struct list_head alg_list;
20901 +static struct list_head hash_list;
20902 +
20903 +static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
20904 +{
20905 + struct device *dev;
20906 + struct dpaa2_caam_priv *priv;
20907 + int i, err = 0;
20908 + bool registered = false;
20909 +
20910 + /*
20911 + * There is no way to get CAAM endianness - there is no direct register
20912 + * space access and MC f/w does not provide this attribute.
20913 + * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
20914 + * property.
20915 + */
20916 + caam_little_end = true;
20917 +
20918 + caam_imx = false;
20919 +
20920 + dev = &dpseci_dev->dev;
20921 +
20922 + priv = kzalloc(sizeof(*priv), GFP_KERNEL);
20923 + if (!priv)
20924 + return -ENOMEM;
20925 +
20926 + dev_set_drvdata(dev, priv);
20927 +
20928 + priv->domain = iommu_get_domain_for_dev(dev);
20929 +
20930 + qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
20931 + 0, SLAB_CACHE_DMA, NULL);
20932 + if (!qi_cache) {
20933 + dev_err(dev, "Can't allocate SEC cache\n");
20934 + err = -ENOMEM;
20935 + goto err_qicache;
20936 + }
20937 +
20938 + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
20939 + if (err) {
20940 + dev_err(dev, "dma_set_mask_and_coherent() failed\n");
20941 + goto err_dma_mask;
20942 + }
20943 +
20944 + /* Obtain a MC portal */
20945 + err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
20946 + if (err) {
20947 + dev_err(dev, "MC portal allocation failed\n");
20948 + goto err_dma_mask;
20949 + }
20950 +
20951 + priv->ppriv = alloc_percpu(*priv->ppriv);
20952 + if (!priv->ppriv) {
20953 + dev_err(dev, "alloc_percpu() failed\n");
20954 + goto err_alloc_ppriv;
20955 + }
20956 +
20957 + /* DPSECI initialization */
20958 + err = dpaa2_dpseci_setup(dpseci_dev);
20959 + if (err < 0) {
20960 + dev_err(dev, "dpaa2_dpseci_setup() failed\n");
20961 + goto err_dpseci_setup;
20962 + }
20963 +
20964 + /* DPIO */
20965 + err = dpaa2_dpseci_dpio_setup(priv);
20966 + if (err) {
20967 + dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
20968 + goto err_dpio_setup;
20969 + }
20970 +
20971 + /* DPSECI binding to DPIO */
20972 + err = dpaa2_dpseci_bind(priv);
20973 + if (err) {
20974 + dev_err(dev, "dpaa2_dpseci_bind() failed\n");
20975 + goto err_bind;
20976 + }
20977 +
20978 + /* DPSECI enable */
20979 + err = dpaa2_dpseci_enable(priv);
20980 + if (err) {
20981 + dev_err(dev, "dpaa2_dpseci_enable() failed");
20982 + goto err_bind;
20983 + }
20984 +
20985 + /* register crypto algorithms the device supports */
20986 + INIT_LIST_HEAD(&alg_list);
20987 + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
20988 + struct caam_crypto_alg *t_alg;
20989 + struct caam_alg_template *alg = driver_algs + i;
20990 + u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
20991 +
20992 + /* Skip DES algorithms if not supported by device */
20993 + if (!priv->sec_attr.des_acc_num &&
20994 + ((alg_sel == OP_ALG_ALGSEL_3DES) ||
20995 + (alg_sel == OP_ALG_ALGSEL_DES)))
20996 + continue;
20997 +
20998 + /* Skip AES algorithms if not supported by device */
20999 + if (!priv->sec_attr.aes_acc_num &&
21000 + (alg_sel == OP_ALG_ALGSEL_AES))
21001 + continue;
21002 +
21003 + t_alg = caam_alg_alloc(alg);
21004 + if (IS_ERR(t_alg)) {
21005 + err = PTR_ERR(t_alg);
21006 + dev_warn(dev, "%s alg allocation failed: %d\n",
21007 + alg->driver_name, err);
21008 + continue;
21009 + }
21010 + t_alg->caam.dev = dev;
21011 +
21012 + err = crypto_register_alg(&t_alg->crypto_alg);
21013 + if (err) {
21014 + dev_warn(dev, "%s alg registration failed: %d\n",
21015 + t_alg->crypto_alg.cra_driver_name, err);
21016 + kfree(t_alg);
21017 + continue;
21018 + }
21019 +
21020 + list_add_tail(&t_alg->entry, &alg_list);
21021 + registered = true;
21022 + }
21023 +
21024 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
21025 + struct caam_aead_alg *t_alg = driver_aeads + i;
21026 + u32 c1_alg_sel = t_alg->caam.class1_alg_type &
21027 + OP_ALG_ALGSEL_MASK;
21028 + u32 c2_alg_sel = t_alg->caam.class2_alg_type &
21029 + OP_ALG_ALGSEL_MASK;
21030 +
21031 + /* Skip DES algorithms if not supported by device */
21032 + if (!priv->sec_attr.des_acc_num &&
21033 + ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
21034 + (c1_alg_sel == OP_ALG_ALGSEL_DES)))
21035 + continue;
21036 +
21037 + /* Skip AES algorithms if not supported by device */
21038 + if (!priv->sec_attr.aes_acc_num &&
21039 + (c1_alg_sel == OP_ALG_ALGSEL_AES))
21040 + continue;
21041 +
21042 + /*
21043 + * Skip algorithms requiring message digests
21044 + * if MD not supported by device.
21045 + */
21046 + if (!priv->sec_attr.md_acc_num && c2_alg_sel)
21047 + continue;
21048 +
21049 + t_alg->caam.dev = dev;
21050 + caam_aead_alg_init(t_alg);
21051 +
21052 + err = crypto_register_aead(&t_alg->aead);
21053 + if (err) {
21054 + dev_warn(dev, "%s alg registration failed: %d\n",
21055 + t_alg->aead.base.cra_driver_name, err);
21056 + continue;
21057 + }
21058 +
21059 + t_alg->registered = true;
21060 + registered = true;
21061 + }
21062 + if (registered)
21063 + dev_info(dev, "algorithms registered in /proc/crypto\n");
21064 +
21065 + /* register hash algorithms the device supports */
21066 + INIT_LIST_HEAD(&hash_list);
21067 +
21068 + /*
21069 + * Skip registration of any hashing algorithms if MD block
21070 + * is not present.
21071 + */
21072 + if (!priv->sec_attr.md_acc_num)
21073 + return 0;
21074 +
21075 + for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
21076 + struct caam_hash_alg *t_alg;
21077 + struct caam_hash_template *alg = driver_hash + i;
21078 +
21079 + /* register hmac version */
21080 + t_alg = caam_hash_alloc(dev, alg, true);
21081 + if (IS_ERR(t_alg)) {
21082 + err = PTR_ERR(t_alg);
21083 + dev_warn(dev, "%s hash alg allocation failed: %d\n",
21084 + alg->driver_name, err);
21085 + continue;
21086 + }
21087 +
21088 + err = crypto_register_ahash(&t_alg->ahash_alg);
21089 + if (err) {
21090 + dev_warn(dev, "%s alg registration failed: %d\n",
21091 + t_alg->ahash_alg.halg.base.cra_driver_name,
21092 + err);
21093 + kfree(t_alg);
21094 + } else {
21095 + list_add_tail(&t_alg->entry, &hash_list);
21096 + }
21097 +
21098 + /* register unkeyed version */
21099 + t_alg = caam_hash_alloc(dev, alg, false);
21100 + if (IS_ERR(t_alg)) {
21101 + err = PTR_ERR(t_alg);
21102 + dev_warn(dev, "%s alg allocation failed: %d\n",
21103 + alg->driver_name, err);
21104 + continue;
21105 + }
21106 +
21107 + err = crypto_register_ahash(&t_alg->ahash_alg);
21108 + if (err) {
21109 + dev_warn(dev, "%s alg registration failed: %d\n",
21110 + t_alg->ahash_alg.halg.base.cra_driver_name,
21111 + err);
21112 + kfree(t_alg);
21113 + } else {
21114 + list_add_tail(&t_alg->entry, &hash_list);
21115 + }
21116 + }
21117 + if (!list_empty(&hash_list))
21118 + dev_info(dev, "hash algorithms registered in /proc/crypto\n");
21119 +
21120 + return err;
21121 +
21122 +err_bind:
21123 + dpaa2_dpseci_dpio_free(priv);
21124 +err_dpio_setup:
21125 + dpaa2_dpseci_free(priv);
21126 +err_dpseci_setup:
21127 + free_percpu(priv->ppriv);
21128 +err_alloc_ppriv:
21129 + fsl_mc_portal_free(priv->mc_io);
21130 +err_dma_mask:
21131 + kmem_cache_destroy(qi_cache);
21132 +err_qicache:
21133 + dev_set_drvdata(dev, NULL);
21134 +
21135 + return err;
21136 +}
21137 +
21138 +static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
21139 +{
21140 + struct device *dev;
21141 + struct dpaa2_caam_priv *priv;
21142 + int i;
21143 +
21144 + dev = &ls_dev->dev;
21145 + priv = dev_get_drvdata(dev);
21146 +
21147 + for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
21148 + struct caam_aead_alg *t_alg = driver_aeads + i;
21149 +
21150 + if (t_alg->registered)
21151 + crypto_unregister_aead(&t_alg->aead);
21152 + }
21153 +
21154 + if (alg_list.next) {
21155 + struct caam_crypto_alg *t_alg, *n;
21156 +
21157 + list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
21158 + crypto_unregister_alg(&t_alg->crypto_alg);
21159 + list_del(&t_alg->entry);
21160 + kfree(t_alg);
21161 + }
21162 + }
21163 +
21164 + if (hash_list.next) {
21165 + struct caam_hash_alg *t_hash_alg, *p;
21166 +
21167 + list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
21168 + crypto_unregister_ahash(&t_hash_alg->ahash_alg);
21169 + list_del(&t_hash_alg->entry);
21170 + kfree(t_hash_alg);
21171 + }
21172 + }
21173 +
21174 + dpaa2_dpseci_disable(priv);
21175 + dpaa2_dpseci_dpio_free(priv);
21176 + dpaa2_dpseci_free(priv);
21177 + free_percpu(priv->ppriv);
21178 + fsl_mc_portal_free(priv->mc_io);
21179 + dev_set_drvdata(dev, NULL);
21180 + kmem_cache_destroy(qi_cache);
21181 +
21182 + return 0;
21183 +}
21184 +
21185 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
21186 +{
21187 + struct dpaa2_fd fd;
21188 + struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
21189 + int err = 0, i, id;
21190 +
21191 + if (IS_ERR(req))
21192 + return PTR_ERR(req);
21193 +
21194 + if (priv->cscn_mem) {
21195 + dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
21196 + DPAA2_CSCN_SIZE,
21197 + DMA_FROM_DEVICE);
21198 + if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
21199 + dev_dbg_ratelimited(dev, "Dropping request\n");
21200 + return -EBUSY;
21201 + }
21202 + }
21203 +
21204 + dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
21205 +
21206 + req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
21207 + DMA_BIDIRECTIONAL);
21208 + if (dma_mapping_error(dev, req->fd_flt_dma)) {
21209 + dev_err(dev, "DMA mapping error for QI enqueue request\n");
21210 + goto err_out;
21211 + }
21212 +
21213 + memset(&fd, 0, sizeof(fd));
21214 + dpaa2_fd_set_format(&fd, dpaa2_fd_list);
21215 + dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
21216 + dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
21217 + dpaa2_fd_set_flc(&fd, req->flc_dma);
21218 +
21219 + /*
21220 + * There is no guarantee that preemption is disabled here,
21221 + * thus take action.
21222 + */
21223 + preempt_disable();
21224 + id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
21225 + for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
21226 + err = dpaa2_io_service_enqueue_fq(NULL,
21227 + priv->tx_queue_attr[id].fqid,
21228 + &fd);
21229 + if (err != -EBUSY)
21230 + break;
21231 + }
21232 + preempt_enable();
21233 +
21234 + if (unlikely(err < 0)) {
21235 + dev_err(dev, "Error enqueuing frame: %d\n", err);
21236 + goto err_out;
21237 + }
21238 +
21239 + return -EINPROGRESS;
21240 +
21241 +err_out:
21242 + dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
21243 + DMA_BIDIRECTIONAL);
21244 + return -EIO;
21245 +}
21246 +EXPORT_SYMBOL(dpaa2_caam_enqueue);
21247 +
21248 +const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
21249 + {
21250 + .vendor = FSL_MC_VENDOR_FREESCALE,
21251 + .obj_type = "dpseci",
21252 + },
21253 + { .vendor = 0x0 }
21254 +};
21255 +
21256 +static struct fsl_mc_driver dpaa2_caam_driver = {
21257 + .driver = {
21258 + .name = KBUILD_MODNAME,
21259 + .owner = THIS_MODULE,
21260 + },
21261 + .probe = dpaa2_caam_probe,
21262 + .remove = dpaa2_caam_remove,
21263 + .match_id_table = dpaa2_caam_match_id_table
21264 +};
21265 +
21266 +MODULE_LICENSE("Dual BSD/GPL");
21267 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
21268 +MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
21269 +
21270 +module_fsl_mc_driver(dpaa2_caam_driver);
21271 --- /dev/null
21272 +++ b/drivers/crypto/caam/caamalg_qi2.h
21273 @@ -0,0 +1,281 @@
21274 +/*
21275 + * Copyright 2015-2016 Freescale Semiconductor Inc.
21276 + * Copyright 2017 NXP
21277 + *
21278 + * Redistribution and use in source and binary forms, with or without
21279 + * modification, are permitted provided that the following conditions are met:
21280 + * * Redistributions of source code must retain the above copyright
21281 + * notice, this list of conditions and the following disclaimer.
21282 + * * Redistributions in binary form must reproduce the above copyright
21283 + * notice, this list of conditions and the following disclaimer in the
21284 + * documentation and/or other materials provided with the distribution.
21285 + * * Neither the names of the above-listed copyright holders nor the
21286 + * names of any contributors may be used to endorse or promote products
21287 + * derived from this software without specific prior written permission.
21288 + *
21289 + *
21290 + * ALTERNATIVELY, this software may be distributed under the terms of the
21291 + * GNU General Public License ("GPL") as published by the Free Software
21292 + * Foundation, either version 2 of that License or (at your option) any
21293 + * later version.
21294 + *
21295 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21296 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21297 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21298 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
21299 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21300 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21301 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
21302 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
21303 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
21304 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21305 + * POSSIBILITY OF SUCH DAMAGE.
21306 + */
21307 +
21308 +#ifndef _CAAMALG_QI2_H_
21309 +#define _CAAMALG_QI2_H_
21310 +
21311 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
21312 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
21313 +#include <linux/threads.h>
21314 +#include "dpseci.h"
21315 +#include "desc_constr.h"
21316 +
21317 +#define DPAA2_CAAM_STORE_SIZE 16
21318 +/* NAPI weight *must* be a multiple of the store size. */
21319 +#define DPAA2_CAAM_NAPI_WEIGHT 64
21320 +
21321 +/* The congestion entrance threshold was chosen so that on LS2088
21322 + * we support the maximum throughput for the available memory
21323 + */
21324 +#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
21325 +#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
21326 +
21327 +/**
21328 + * dpaa2_caam_priv - driver private data
21329 + * @dpseci_id: DPSECI object unique ID
21330 + * @major_ver: DPSECI major version
21331 + * @minor_ver: DPSECI minor version
21332 + * @dpseci_attr: DPSECI attributes
21333 + * @sec_attr: SEC engine attributes
21334 + * @rx_queue_attr: array of Rx queue attributes
21335 + * @tx_queue_attr: array of Tx queue attributes
21336 + * @cscn_mem: pointer to memory region containing the
21337 + * dpaa2_cscn struct; it's size is larger than
21338 + * sizeof(struct dpaa2_cscn) to accommodate alignment
21339 + * @cscn_mem_aligned: pointer to struct dpaa2_cscn; it is computed
21340 + * as PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
21341 + * @cscn_dma: dma address used by the QMAN to write CSCN messages
21342 + * @dev: device associated with the DPSECI object
21343 + * @mc_io: pointer to MC portal's I/O object
21344 + * @domain: IOMMU domain
21345 + * @ppriv: per CPU pointers to privata data
21346 + */
21347 +struct dpaa2_caam_priv {
21348 + int dpsec_id;
21349 +
21350 + u16 major_ver;
21351 + u16 minor_ver;
21352 +
21353 + struct dpseci_attr dpseci_attr;
21354 + struct dpseci_sec_attr sec_attr;
21355 + struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_PRIO_NUM];
21356 + struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_PRIO_NUM];
21357 + int num_pairs;
21358 +
21359 + /* congestion */
21360 + void *cscn_mem;
21361 + void *cscn_mem_aligned;
21362 + dma_addr_t cscn_dma;
21363 +
21364 + struct device *dev;
21365 + struct fsl_mc_io *mc_io;
21366 + struct iommu_domain *domain;
21367 +
21368 + struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
21369 +};
21370 +
21371 +/**
21372 + * dpaa2_caam_priv_per_cpu - per CPU private data
21373 + * @napi: napi structure
21374 + * @net_dev: netdev used by napi
21375 + * @req_fqid: (virtual) request (Tx / enqueue) FQID
21376 + * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
21377 + * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
21378 + * @nctx: notification context of response FQ
21379 + * @store: where dequeued frames are stored
21380 + * @priv: backpointer to dpaa2_caam_priv
21381 + */
21382 +struct dpaa2_caam_priv_per_cpu {
21383 + struct napi_struct napi;
21384 + struct net_device net_dev;
21385 + int req_fqid;
21386 + int rsp_fqid;
21387 + int prio;
21388 + struct dpaa2_io_notification_ctx nctx;
21389 + struct dpaa2_io_store *store;
21390 + struct dpaa2_caam_priv *priv;
21391 +};
21392 +
21393 +/*
21394 + * The CAAM QI hardware constructs a job descriptor which points
21395 + * to shared descriptor (as pointed by context_a of FQ to CAAM).
21396 + * When the job descriptor is executed by deco, the whole job
21397 + * descriptor together with shared descriptor gets loaded in
21398 + * deco buffer which is 64 words long (each 32-bit).
21399 + *
21400 + * The job descriptor constructed by QI hardware has layout:
21401 + *
21402 + * HEADER (1 word)
21403 + * Shdesc ptr (1 or 2 words)
21404 + * SEQ_OUT_PTR (1 word)
21405 + * Out ptr (1 or 2 words)
21406 + * Out length (1 word)
21407 + * SEQ_IN_PTR (1 word)
21408 + * In ptr (1 or 2 words)
21409 + * In length (1 word)
21410 + *
21411 + * The shdesc ptr is used to fetch shared descriptor contents
21412 + * into deco buffer.
21413 + *
21414 + * Apart from shdesc contents, the total number of words that
21415 + * get loaded in deco buffer are '8' or '11'. The remaining words
21416 + * in deco buffer can be used for storing shared descriptor.
21417 + */
21418 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
21419 +
21420 +/* Length of a single buffer in the QI driver memory cache */
21421 +#define CAAM_QI_MEMCACHE_SIZE 512
21422 +
21423 +/*
21424 + * aead_edesc - s/w-extended aead descriptor
21425 + * @src_nents: number of segments in input scatterlist
21426 + * @dst_nents: number of segments in output scatterlist
21427 + * @iv_dma: dma address of iv for checking continuity and link table
21428 + * @qm_sg_bytes: length of dma mapped h/w link table
21429 + * @qm_sg_dma: bus physical mapped address of h/w link table
21430 + * @assoclen_dma: bus physical mapped address of req->assoclen
21431 + * @sgt: the h/w link table
21432 + */
21433 +struct aead_edesc {
21434 + int src_nents;
21435 + int dst_nents;
21436 + dma_addr_t iv_dma;
21437 + int qm_sg_bytes;
21438 + dma_addr_t qm_sg_dma;
21439 + dma_addr_t assoclen_dma;
21440 +#define CAAM_QI_MAX_AEAD_SG \
21441 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) / \
21442 + sizeof(struct dpaa2_sg_entry))
21443 + struct dpaa2_sg_entry sgt[0];
21444 +};
21445 +
21446 +/*
21447 + * tls_edesc - s/w-extended tls descriptor
21448 + * @src_nents: number of segments in input scatterlist
21449 + * @dst_nents: number of segments in output scatterlist
21450 + * @iv_dma: dma address of iv for checking continuity and link table
21451 + * @qm_sg_bytes: length of dma mapped h/w link table
21452 + * @qm_sg_dma: bus physical mapped address of h/w link table
21453 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
21454 + * @dst: pointer to output scatterlist, usefull for unmapping
21455 + * @sgt: the h/w link table
21456 + */
21457 +struct tls_edesc {
21458 + int src_nents;
21459 + int dst_nents;
21460 + dma_addr_t iv_dma;
21461 + int qm_sg_bytes;
21462 + dma_addr_t qm_sg_dma;
21463 + struct scatterlist tmp[2];
21464 + struct scatterlist *dst;
21465 + struct dpaa2_sg_entry sgt[0];
21466 +};
21467 +
21468 +/*
21469 + * ablkcipher_edesc - s/w-extended ablkcipher descriptor
21470 + * @src_nents: number of segments in input scatterlist
21471 + * @dst_nents: number of segments in output scatterlist
21472 + * @iv_dma: dma address of iv for checking continuity and link table
21473 + * @qm_sg_bytes: length of dma mapped qm_sg space
21474 + * @qm_sg_dma: I/O virtual address of h/w link table
21475 + * @sgt: the h/w link table
21476 + */
21477 +struct ablkcipher_edesc {
21478 + int src_nents;
21479 + int dst_nents;
21480 + dma_addr_t iv_dma;
21481 + int qm_sg_bytes;
21482 + dma_addr_t qm_sg_dma;
21483 +#define CAAM_QI_MAX_ABLKCIPHER_SG \
21484 + ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
21485 + sizeof(struct dpaa2_sg_entry))
21486 + struct dpaa2_sg_entry sgt[0];
21487 +};
21488 +
21489 +/*
21490 + * ahash_edesc - s/w-extended ahash descriptor
21491 + * @dst_dma: I/O virtual address of req->result
21492 + * @qm_sg_dma: I/O virtual address of h/w link table
21493 + * @src_nents: number of segments in input scatterlist
21494 + * @qm_sg_bytes: length of dma mapped qm_sg space
21495 + * @sgt: pointer to h/w link table
21496 + */
21497 +struct ahash_edesc {
21498 + dma_addr_t dst_dma;
21499 + dma_addr_t qm_sg_dma;
21500 + int src_nents;
21501 + int qm_sg_bytes;
21502 + struct dpaa2_sg_entry sgt[0];
21503 +};
21504 +
21505 +/**
21506 + * caam_flc - Flow Context (FLC)
21507 + * @flc: Flow Context options
21508 + * @sh_desc: Shared Descriptor
21509 + */
21510 +struct caam_flc {
21511 + u32 flc[16];
21512 + u32 sh_desc[MAX_SDLEN];
21513 +} ____cacheline_aligned;
21514 +
21515 +enum optype {
21516 + ENCRYPT = 0,
21517 + DECRYPT,
21518 + GIVENCRYPT,
21519 + NUM_OP
21520 +};
21521 +
21522 +/**
21523 + * caam_request - the request structure the driver application should fill while
21524 + * submitting a job to driver.
21525 + * @fd_flt: Frame list table defining input and output
21526 + * fd_flt[0] - FLE pointing to output buffer
21527 + * fd_flt[1] - FLE pointing to input buffer
21528 + * @fd_flt_dma: DMA address for the frame list table
21529 + * @flc: Flow Context
21530 + * @flc_dma: I/O virtual address of Flow Context
21531 + * @op_type: operation type
21532 + * @cbk: Callback function to invoke when job is completed
21533 + * @ctx: arbit context attached with request by the application
21534 + * @edesc: extended descriptor; points to one of {ablkcipher,aead}_edesc
21535 + */
21536 +struct caam_request {
21537 + struct dpaa2_fl_entry fd_flt[2];
21538 + dma_addr_t fd_flt_dma;
21539 + struct caam_flc *flc;
21540 + dma_addr_t flc_dma;
21541 + enum optype op_type;
21542 + void (*cbk)(void *ctx, u32 err);
21543 + void *ctx;
21544 + void *edesc;
21545 +};
21546 +
21547 +/**
21548 + * dpaa2_caam_enqueue() - enqueue a crypto request
21549 + * @dev: device associated with the DPSECI object
21550 + * @req: pointer to caam_request
21551 + */
21552 +int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
21553 +
21554 +#endif /* _CAAMALG_QI2_H_ */
21555 --- a/drivers/crypto/caam/caamhash.c
21556 +++ b/drivers/crypto/caam/caamhash.c
21557 @@ -62,6 +62,7 @@
21558 #include "error.h"
21559 #include "sg_sw_sec4.h"
21560 #include "key_gen.h"
21561 +#include "caamhash_desc.h"
21562
21563 #define CAAM_CRA_PRIORITY 3000
21564
21565 @@ -71,14 +72,6 @@
21566 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
21567 #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
21568
21569 -/* length of descriptors text */
21570 -#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
21571 -#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
21572 -#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
21573 -#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
21574 -#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
21575 -#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
21576 -
21577 #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
21578 CAAM_MAX_HASH_KEY_SIZE)
21579 #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
21580 @@ -103,20 +96,14 @@ struct caam_hash_ctx {
21581 u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21582 u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21583 u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21584 - u32 sh_desc_finup[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
21585 dma_addr_t sh_desc_update_dma ____cacheline_aligned;
21586 dma_addr_t sh_desc_update_first_dma;
21587 dma_addr_t sh_desc_fin_dma;
21588 dma_addr_t sh_desc_digest_dma;
21589 - dma_addr_t sh_desc_finup_dma;
21590 struct device *jrdev;
21591 - u32 alg_type;
21592 - u32 alg_op;
21593 u8 key[CAAM_MAX_HASH_KEY_SIZE];
21594 - dma_addr_t key_dma;
21595 int ctx_len;
21596 - unsigned int split_key_len;
21597 - unsigned int split_key_pad_len;
21598 + struct alginfo adata;
21599 };
21600
21601 /* ahash state */
21602 @@ -143,6 +130,31 @@ struct caam_export_state {
21603 int (*finup)(struct ahash_request *req);
21604 };
21605
21606 +static inline void switch_buf(struct caam_hash_state *state)
21607 +{
21608 + state->current_buf ^= 1;
21609 +}
21610 +
21611 +static inline u8 *current_buf(struct caam_hash_state *state)
21612 +{
21613 + return state->current_buf ? state->buf_1 : state->buf_0;
21614 +}
21615 +
21616 +static inline u8 *alt_buf(struct caam_hash_state *state)
21617 +{
21618 + return state->current_buf ? state->buf_0 : state->buf_1;
21619 +}
21620 +
21621 +static inline int *current_buflen(struct caam_hash_state *state)
21622 +{
21623 + return state->current_buf ? &state->buflen_1 : &state->buflen_0;
21624 +}
21625 +
21626 +static inline int *alt_buflen(struct caam_hash_state *state)
21627 +{
21628 + return state->current_buf ? &state->buflen_0 : &state->buflen_1;
21629 +}
21630 +
21631 /* Common job descriptor seq in/out ptr routines */
21632
21633 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
21634 @@ -175,40 +187,31 @@ static inline dma_addr_t map_seq_out_ptr
21635 return dst_dma;
21636 }
21637
21638 -/* Map current buffer in state and put it in link table */
21639 -static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev,
21640 - struct sec4_sg_entry *sec4_sg,
21641 - u8 *buf, int buflen)
21642 -{
21643 - dma_addr_t buf_dma;
21644 +/* Map current buffer in state (if length > 0) and put it in link table */
21645 +static inline int buf_map_to_sec4_sg(struct device *jrdev,
21646 + struct sec4_sg_entry *sec4_sg,
21647 + struct caam_hash_state *state)
21648 +{
21649 + int buflen = *current_buflen(state);
21650 +
21651 + if (!buflen)
21652 + return 0;
21653 +
21654 + state->buf_dma = dma_map_single(jrdev, current_buf(state), buflen,
21655 + DMA_TO_DEVICE);
21656 + if (dma_mapping_error(jrdev, state->buf_dma)) {
21657 + dev_err(jrdev, "unable to map buf\n");
21658 + state->buf_dma = 0;
21659 + return -ENOMEM;
21660 + }
21661
21662 - buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
21663 - dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
21664 + dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0);
21665
21666 - return buf_dma;
21667 -}
21668 -
21669 -/*
21670 - * Only put buffer in link table if it contains data, which is possible,
21671 - * since a buffer has previously been used, and needs to be unmapped,
21672 - */
21673 -static inline dma_addr_t
21674 -try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
21675 - u8 *buf, dma_addr_t buf_dma, int buflen,
21676 - int last_buflen)
21677 -{
21678 - if (buf_dma && !dma_mapping_error(jrdev, buf_dma))
21679 - dma_unmap_single(jrdev, buf_dma, last_buflen, DMA_TO_DEVICE);
21680 - if (buflen)
21681 - buf_dma = buf_map_to_sec4_sg(jrdev, sec4_sg, buf, buflen);
21682 - else
21683 - buf_dma = 0;
21684 -
21685 - return buf_dma;
21686 + return 0;
21687 }
21688
21689 /* Map state->caam_ctx, and add it to link table */
21690 -static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
21691 +static inline int ctx_map_to_sec4_sg(struct device *jrdev,
21692 struct caam_hash_state *state, int ctx_len,
21693 struct sec4_sg_entry *sec4_sg, u32 flag)
21694 {
21695 @@ -224,124 +227,22 @@ static inline int ctx_map_to_sec4_sg(u32
21696 return 0;
21697 }
21698
21699 -/* Common shared descriptor commands */
21700 -static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
21701 -{
21702 - append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
21703 - ctx->split_key_len, CLASS_2 |
21704 - KEY_DEST_MDHA_SPLIT | KEY_ENC);
21705 -}
21706 -
21707 -/* Append key if it has been set */
21708 -static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
21709 -{
21710 - u32 *key_jump_cmd;
21711 -
21712 - init_sh_desc(desc, HDR_SHARE_SERIAL);
21713 -
21714 - if (ctx->split_key_len) {
21715 - /* Skip if already shared */
21716 - key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
21717 - JUMP_COND_SHRD);
21718 -
21719 - append_key_ahash(desc, ctx);
21720 -
21721 - set_jump_tgt_here(desc, key_jump_cmd);
21722 - }
21723 -
21724 - /* Propagate errors from shared to job descriptor */
21725 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
21726 -}
21727 -
21728 -/*
21729 - * For ahash read data from seqin following state->caam_ctx,
21730 - * and write resulting class2 context to seqout, which may be state->caam_ctx
21731 - * or req->result
21732 - */
21733 -static inline void ahash_append_load_str(u32 *desc, int digestsize)
21734 -{
21735 - /* Calculate remaining bytes to read */
21736 - append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
21737 -
21738 - /* Read remaining bytes */
21739 - append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
21740 - FIFOLD_TYPE_MSG | KEY_VLF);
21741 -
21742 - /* Store class2 context bytes */
21743 - append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
21744 - LDST_SRCDST_BYTE_CONTEXT);
21745 -}
21746 -
21747 -/*
21748 - * For ahash update, final and finup, import context, read and write to seqout
21749 - */
21750 -static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state,
21751 - int digestsize,
21752 - struct caam_hash_ctx *ctx)
21753 -{
21754 - init_sh_desc_key_ahash(desc, ctx);
21755 -
21756 - /* Import context from software */
21757 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
21758 - LDST_CLASS_2_CCB | ctx->ctx_len);
21759 -
21760 - /* Class 2 operation */
21761 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
21762 -
21763 - /*
21764 - * Load from buf and/or src and write to req->result or state->context
21765 - */
21766 - ahash_append_load_str(desc, digestsize);
21767 -}
21768 -
21769 -/* For ahash firsts and digest, read and write to seqout */
21770 -static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
21771 - int digestsize, struct caam_hash_ctx *ctx)
21772 -{
21773 - init_sh_desc_key_ahash(desc, ctx);
21774 -
21775 - /* Class 2 operation */
21776 - append_operation(desc, op | state | OP_ALG_ENCRYPT);
21777 -
21778 - /*
21779 - * Load from buf and/or src and write to req->result or state->context
21780 - */
21781 - ahash_append_load_str(desc, digestsize);
21782 -}
21783 -
21784 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
21785 {
21786 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
21787 int digestsize = crypto_ahash_digestsize(ahash);
21788 struct device *jrdev = ctx->jrdev;
21789 - u32 have_key = 0;
21790 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
21791 u32 *desc;
21792
21793 - if (ctx->split_key_len)
21794 - have_key = OP_ALG_AAI_HMAC_PRECOMP;
21795 + ctx->adata.key_virt = ctx->key;
21796
21797 /* ahash_update shared descriptor */
21798 desc = ctx->sh_desc_update;
21799 -
21800 - init_sh_desc(desc, HDR_SHARE_SERIAL);
21801 -
21802 - /* Import context from software */
21803 - append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
21804 - LDST_CLASS_2_CCB | ctx->ctx_len);
21805 -
21806 - /* Class 2 operation */
21807 - append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
21808 - OP_ALG_ENCRYPT);
21809 -
21810 - /* Load data and write to result or context */
21811 - ahash_append_load_str(desc, ctx->ctx_len);
21812 -
21813 - ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21814 - DMA_TO_DEVICE);
21815 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
21816 - dev_err(jrdev, "unable to map shared descriptor\n");
21817 - return -ENOMEM;
21818 - }
21819 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
21820 + ctx->ctx_len, true, ctrlpriv->era);
21821 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
21822 + desc_bytes(desc), DMA_TO_DEVICE);
21823 #ifdef DEBUG
21824 print_hex_dump(KERN_ERR,
21825 "ahash update shdesc@"__stringify(__LINE__)": ",
21826 @@ -350,17 +251,10 @@ static int ahash_set_sh_desc(struct cryp
21827
21828 /* ahash_update_first shared descriptor */
21829 desc = ctx->sh_desc_update_first;
21830 -
21831 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
21832 - ctx->ctx_len, ctx);
21833 -
21834 - ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
21835 - desc_bytes(desc),
21836 - DMA_TO_DEVICE);
21837 - if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
21838 - dev_err(jrdev, "unable to map shared descriptor\n");
21839 - return -ENOMEM;
21840 - }
21841 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
21842 + ctx->ctx_len, false, ctrlpriv->era);
21843 + dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
21844 + desc_bytes(desc), DMA_TO_DEVICE);
21845 #ifdef DEBUG
21846 print_hex_dump(KERN_ERR,
21847 "ahash update first shdesc@"__stringify(__LINE__)": ",
21848 @@ -369,53 +263,22 @@ static int ahash_set_sh_desc(struct cryp
21849
21850 /* ahash_final shared descriptor */
21851 desc = ctx->sh_desc_fin;
21852 -
21853 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
21854 - OP_ALG_AS_FINALIZE, digestsize, ctx);
21855 -
21856 - ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21857 - DMA_TO_DEVICE);
21858 - if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
21859 - dev_err(jrdev, "unable to map shared descriptor\n");
21860 - return -ENOMEM;
21861 - }
21862 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
21863 + ctx->ctx_len, true, ctrlpriv->era);
21864 + dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
21865 + desc_bytes(desc), DMA_TO_DEVICE);
21866 #ifdef DEBUG
21867 print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
21868 DUMP_PREFIX_ADDRESS, 16, 4, desc,
21869 desc_bytes(desc), 1);
21870 #endif
21871
21872 - /* ahash_finup shared descriptor */
21873 - desc = ctx->sh_desc_finup;
21874 -
21875 - ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
21876 - OP_ALG_AS_FINALIZE, digestsize, ctx);
21877 -
21878 - ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
21879 - DMA_TO_DEVICE);
21880 - if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
21881 - dev_err(jrdev, "unable to map shared descriptor\n");
21882 - return -ENOMEM;
21883 - }
21884 -#ifdef DEBUG
21885 - print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
21886 - DUMP_PREFIX_ADDRESS, 16, 4, desc,
21887 - desc_bytes(desc), 1);
21888 -#endif
21889 -
21890 /* ahash_digest shared descriptor */
21891 desc = ctx->sh_desc_digest;
21892 -
21893 - ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
21894 - digestsize, ctx);
21895 -
21896 - ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
21897 - desc_bytes(desc),
21898 - DMA_TO_DEVICE);
21899 - if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
21900 - dev_err(jrdev, "unable to map shared descriptor\n");
21901 - return -ENOMEM;
21902 - }
21903 + cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
21904 + ctx->ctx_len, false, ctrlpriv->era);
21905 + dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
21906 + desc_bytes(desc), DMA_TO_DEVICE);
21907 #ifdef DEBUG
21908 print_hex_dump(KERN_ERR,
21909 "ahash digest shdesc@"__stringify(__LINE__)": ",
21910 @@ -426,14 +289,6 @@ static int ahash_set_sh_desc(struct cryp
21911 return 0;
21912 }
21913
21914 -static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
21915 - u32 keylen)
21916 -{
21917 - return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
21918 - ctx->split_key_pad_len, key_in, keylen,
21919 - ctx->alg_op);
21920 -}
21921 -
21922 /* Digest hash size if it is too large */
21923 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
21924 u32 *keylen, u8 *key_out, u32 digestsize)
21925 @@ -469,7 +324,7 @@ static int hash_digest_key(struct caam_h
21926 }
21927
21928 /* Job descriptor to perform unkeyed hash on key_in */
21929 - append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
21930 + append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
21931 OP_ALG_AS_INITFINAL);
21932 append_seq_in_ptr(desc, src_dma, *keylen, 0);
21933 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
21934 @@ -513,12 +368,10 @@ static int hash_digest_key(struct caam_h
21935 static int ahash_setkey(struct crypto_ahash *ahash,
21936 const u8 *key, unsigned int keylen)
21937 {
21938 - /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
21939 - static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
21940 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
21941 - struct device *jrdev = ctx->jrdev;
21942 int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
21943 int digestsize = crypto_ahash_digestsize(ahash);
21944 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
21945 int ret;
21946 u8 *hashed_key = NULL;
21947
21948 @@ -539,43 +392,29 @@ static int ahash_setkey(struct crypto_ah
21949 key = hashed_key;
21950 }
21951
21952 - /* Pick class 2 key length from algorithm submask */
21953 - ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
21954 - OP_ALG_ALGSEL_SHIFT] * 2;
21955 - ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
21956 -
21957 -#ifdef DEBUG
21958 - printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
21959 - ctx->split_key_len, ctx->split_key_pad_len);
21960 - print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
21961 - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
21962 -#endif
21963 + /*
21964 + * If DKP is supported, use it in the shared descriptor to generate
21965 + * the split key.
21966 + */
21967 + if (ctrlpriv->era >= 6) {
21968 + ctx->adata.key_inline = true;
21969 + ctx->adata.keylen = keylen;
21970 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
21971 + OP_ALG_ALGSEL_MASK);
21972
21973 - ret = gen_split_hash_key(ctx, key, keylen);
21974 - if (ret)
21975 - goto bad_free_key;
21976 + if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
21977 + goto bad_free_key;
21978
21979 - ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
21980 - DMA_TO_DEVICE);
21981 - if (dma_mapping_error(jrdev, ctx->key_dma)) {
21982 - dev_err(jrdev, "unable to map key i/o memory\n");
21983 - ret = -ENOMEM;
21984 - goto error_free_key;
21985 + memcpy(ctx->key, key, keylen);
21986 + } else {
21987 + ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
21988 + keylen, CAAM_MAX_HASH_KEY_SIZE);
21989 + if (ret)
21990 + goto bad_free_key;
21991 }
21992 -#ifdef DEBUG
21993 - print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
21994 - DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
21995 - ctx->split_key_pad_len, 1);
21996 -#endif
21997
21998 - ret = ahash_set_sh_desc(ahash);
21999 - if (ret) {
22000 - dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
22001 - DMA_TO_DEVICE);
22002 - }
22003 - error_free_key:
22004 kfree(hashed_key);
22005 - return ret;
22006 + return ahash_set_sh_desc(ahash);
22007 bad_free_key:
22008 kfree(hashed_key);
22009 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
22010 @@ -604,6 +443,8 @@ static inline void ahash_unmap(struct de
22011 struct ahash_edesc *edesc,
22012 struct ahash_request *req, int dst_len)
22013 {
22014 + struct caam_hash_state *state = ahash_request_ctx(req);
22015 +
22016 if (edesc->src_nents)
22017 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
22018 if (edesc->dst_dma)
22019 @@ -612,6 +453,12 @@ static inline void ahash_unmap(struct de
22020 if (edesc->sec4_sg_bytes)
22021 dma_unmap_single(dev, edesc->sec4_sg_dma,
22022 edesc->sec4_sg_bytes, DMA_TO_DEVICE);
22023 +
22024 + if (state->buf_dma) {
22025 + dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
22026 + DMA_TO_DEVICE);
22027 + state->buf_dma = 0;
22028 + }
22029 }
22030
22031 static inline void ahash_unmap_ctx(struct device *dev,
22032 @@ -643,8 +490,7 @@ static void ahash_done(struct device *jr
22033 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22034 #endif
22035
22036 - edesc = (struct ahash_edesc *)((char *)desc -
22037 - offsetof(struct ahash_edesc, hw_desc));
22038 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22039 if (err)
22040 caam_jr_strstatus(jrdev, err);
22041
22042 @@ -671,19 +517,19 @@ static void ahash_done_bi(struct device
22043 struct ahash_edesc *edesc;
22044 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22045 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22046 -#ifdef DEBUG
22047 struct caam_hash_state *state = ahash_request_ctx(req);
22048 +#ifdef DEBUG
22049 int digestsize = crypto_ahash_digestsize(ahash);
22050
22051 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22052 #endif
22053
22054 - edesc = (struct ahash_edesc *)((char *)desc -
22055 - offsetof(struct ahash_edesc, hw_desc));
22056 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22057 if (err)
22058 caam_jr_strstatus(jrdev, err);
22059
22060 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
22061 + switch_buf(state);
22062 kfree(edesc);
22063
22064 #ifdef DEBUG
22065 @@ -713,8 +559,7 @@ static void ahash_done_ctx_src(struct de
22066 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22067 #endif
22068
22069 - edesc = (struct ahash_edesc *)((char *)desc -
22070 - offsetof(struct ahash_edesc, hw_desc));
22071 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22072 if (err)
22073 caam_jr_strstatus(jrdev, err);
22074
22075 @@ -741,19 +586,19 @@ static void ahash_done_ctx_dst(struct de
22076 struct ahash_edesc *edesc;
22077 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22078 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22079 -#ifdef DEBUG
22080 struct caam_hash_state *state = ahash_request_ctx(req);
22081 +#ifdef DEBUG
22082 int digestsize = crypto_ahash_digestsize(ahash);
22083
22084 dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
22085 #endif
22086
22087 - edesc = (struct ahash_edesc *)((char *)desc -
22088 - offsetof(struct ahash_edesc, hw_desc));
22089 + edesc = container_of(desc, struct ahash_edesc, hw_desc[0]);
22090 if (err)
22091 caam_jr_strstatus(jrdev, err);
22092
22093 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
22094 + switch_buf(state);
22095 kfree(edesc);
22096
22097 #ifdef DEBUG
22098 @@ -835,13 +680,12 @@ static int ahash_update_ctx(struct ahash
22099 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22100 struct caam_hash_state *state = ahash_request_ctx(req);
22101 struct device *jrdev = ctx->jrdev;
22102 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22103 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22104 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22105 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
22106 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
22107 - int *next_buflen = state->current_buf ? &state->buflen_0 :
22108 - &state->buflen_1, last_buflen;
22109 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22110 + GFP_KERNEL : GFP_ATOMIC;
22111 + u8 *buf = current_buf(state);
22112 + int *buflen = current_buflen(state);
22113 + u8 *next_buf = alt_buf(state);
22114 + int *next_buflen = alt_buflen(state), last_buflen;
22115 int in_len = *buflen + req->nbytes, to_hash;
22116 u32 *desc;
22117 int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
22118 @@ -890,15 +734,14 @@ static int ahash_update_ctx(struct ahash
22119 edesc->src_nents = src_nents;
22120 edesc->sec4_sg_bytes = sec4_sg_bytes;
22121
22122 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22123 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22124 edesc->sec4_sg, DMA_BIDIRECTIONAL);
22125 if (ret)
22126 goto unmap_ctx;
22127
22128 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
22129 - edesc->sec4_sg + 1,
22130 - buf, state->buf_dma,
22131 - *buflen, last_buflen);
22132 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22133 + if (ret)
22134 + goto unmap_ctx;
22135
22136 if (mapped_nents) {
22137 sg_to_sec4_sg_last(req->src, mapped_nents,
22138 @@ -909,12 +752,10 @@ static int ahash_update_ctx(struct ahash
22139 to_hash - *buflen,
22140 *next_buflen, 0);
22141 } else {
22142 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
22143 - cpu_to_caam32(SEC4_SG_LEN_FIN);
22144 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index -
22145 + 1);
22146 }
22147
22148 - state->current_buf = !state->current_buf;
22149 -
22150 desc = edesc->hw_desc;
22151
22152 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22153 @@ -969,12 +810,9 @@ static int ahash_final_ctx(struct ahash_
22154 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22155 struct caam_hash_state *state = ahash_request_ctx(req);
22156 struct device *jrdev = ctx->jrdev;
22157 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22158 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22159 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22160 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22161 - int last_buflen = state->current_buf ? state->buflen_0 :
22162 - state->buflen_1;
22163 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22164 + GFP_KERNEL : GFP_ATOMIC;
22165 + int buflen = *current_buflen(state);
22166 u32 *desc;
22167 int sec4_sg_bytes, sec4_sg_src_index;
22168 int digestsize = crypto_ahash_digestsize(ahash);
22169 @@ -994,18 +832,17 @@ static int ahash_final_ctx(struct ahash_
22170 desc = edesc->hw_desc;
22171
22172 edesc->sec4_sg_bytes = sec4_sg_bytes;
22173 - edesc->src_nents = 0;
22174
22175 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22176 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22177 edesc->sec4_sg, DMA_TO_DEVICE);
22178 if (ret)
22179 goto unmap_ctx;
22180
22181 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
22182 - buf, state->buf_dma, buflen,
22183 - last_buflen);
22184 - (edesc->sec4_sg + sec4_sg_src_index - 1)->len |=
22185 - cpu_to_caam32(SEC4_SG_LEN_FIN);
22186 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22187 + if (ret)
22188 + goto unmap_ctx;
22189 +
22190 + sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1);
22191
22192 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22193 sec4_sg_bytes, DMA_TO_DEVICE);
22194 @@ -1048,12 +885,9 @@ static int ahash_finup_ctx(struct ahash_
22195 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22196 struct caam_hash_state *state = ahash_request_ctx(req);
22197 struct device *jrdev = ctx->jrdev;
22198 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22199 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22200 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22201 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22202 - int last_buflen = state->current_buf ? state->buflen_0 :
22203 - state->buflen_1;
22204 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22205 + GFP_KERNEL : GFP_ATOMIC;
22206 + int buflen = *current_buflen(state);
22207 u32 *desc;
22208 int sec4_sg_src_index;
22209 int src_nents, mapped_nents;
22210 @@ -1082,7 +916,7 @@ static int ahash_finup_ctx(struct ahash_
22211
22212 /* allocate space for base edesc and hw desc commands, link tables */
22213 edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
22214 - ctx->sh_desc_finup, ctx->sh_desc_finup_dma,
22215 + ctx->sh_desc_fin, ctx->sh_desc_fin_dma,
22216 flags);
22217 if (!edesc) {
22218 dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
22219 @@ -1093,14 +927,14 @@ static int ahash_finup_ctx(struct ahash_
22220
22221 edesc->src_nents = src_nents;
22222
22223 - ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
22224 + ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
22225 edesc->sec4_sg, DMA_TO_DEVICE);
22226 if (ret)
22227 goto unmap_ctx;
22228
22229 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
22230 - buf, state->buf_dma, buflen,
22231 - last_buflen);
22232 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state);
22233 + if (ret)
22234 + goto unmap_ctx;
22235
22236 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
22237 sec4_sg_src_index, ctx->ctx_len + buflen,
22238 @@ -1136,15 +970,18 @@ static int ahash_digest(struct ahash_req
22239 {
22240 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
22241 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22242 + struct caam_hash_state *state = ahash_request_ctx(req);
22243 struct device *jrdev = ctx->jrdev;
22244 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22245 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22246 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22247 + GFP_KERNEL : GFP_ATOMIC;
22248 u32 *desc;
22249 int digestsize = crypto_ahash_digestsize(ahash);
22250 int src_nents, mapped_nents;
22251 struct ahash_edesc *edesc;
22252 int ret;
22253
22254 + state->buf_dma = 0;
22255 +
22256 src_nents = sg_nents_for_len(req->src, req->nbytes);
22257 if (src_nents < 0) {
22258 dev_err(jrdev, "Invalid number of src SG.\n");
22259 @@ -1215,10 +1052,10 @@ static int ahash_final_no_ctx(struct aha
22260 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22261 struct caam_hash_state *state = ahash_request_ctx(req);
22262 struct device *jrdev = ctx->jrdev;
22263 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22264 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22265 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22266 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22267 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22268 + GFP_KERNEL : GFP_ATOMIC;
22269 + u8 *buf = current_buf(state);
22270 + int buflen = *current_buflen(state);
22271 u32 *desc;
22272 int digestsize = crypto_ahash_digestsize(ahash);
22273 struct ahash_edesc *edesc;
22274 @@ -1249,7 +1086,6 @@ static int ahash_final_no_ctx(struct aha
22275 dev_err(jrdev, "unable to map dst\n");
22276 goto unmap;
22277 }
22278 - edesc->src_nents = 0;
22279
22280 #ifdef DEBUG
22281 print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
22282 @@ -1279,13 +1115,12 @@ static int ahash_update_no_ctx(struct ah
22283 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22284 struct caam_hash_state *state = ahash_request_ctx(req);
22285 struct device *jrdev = ctx->jrdev;
22286 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22287 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22288 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22289 - int *buflen = state->current_buf ? &state->buflen_1 : &state->buflen_0;
22290 - u8 *next_buf = state->current_buf ? state->buf_0 : state->buf_1;
22291 - int *next_buflen = state->current_buf ? &state->buflen_0 :
22292 - &state->buflen_1;
22293 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22294 + GFP_KERNEL : GFP_ATOMIC;
22295 + u8 *buf = current_buf(state);
22296 + int *buflen = current_buflen(state);
22297 + u8 *next_buf = alt_buf(state);
22298 + int *next_buflen = alt_buflen(state);
22299 int in_len = *buflen + req->nbytes, to_hash;
22300 int sec4_sg_bytes, src_nents, mapped_nents;
22301 struct ahash_edesc *edesc;
22302 @@ -1332,10 +1167,11 @@ static int ahash_update_no_ctx(struct ah
22303
22304 edesc->src_nents = src_nents;
22305 edesc->sec4_sg_bytes = sec4_sg_bytes;
22306 - edesc->dst_dma = 0;
22307
22308 - state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
22309 - buf, *buflen);
22310 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
22311 + if (ret)
22312 + goto unmap_ctx;
22313 +
22314 sg_to_sec4_sg_last(req->src, mapped_nents,
22315 edesc->sec4_sg + 1, 0);
22316
22317 @@ -1345,8 +1181,6 @@ static int ahash_update_no_ctx(struct ah
22318 *next_buflen, 0);
22319 }
22320
22321 - state->current_buf = !state->current_buf;
22322 -
22323 desc = edesc->hw_desc;
22324
22325 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
22326 @@ -1406,12 +1240,9 @@ static int ahash_finup_no_ctx(struct aha
22327 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22328 struct caam_hash_state *state = ahash_request_ctx(req);
22329 struct device *jrdev = ctx->jrdev;
22330 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22331 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22332 - u8 *buf = state->current_buf ? state->buf_1 : state->buf_0;
22333 - int buflen = state->current_buf ? state->buflen_1 : state->buflen_0;
22334 - int last_buflen = state->current_buf ? state->buflen_0 :
22335 - state->buflen_1;
22336 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22337 + GFP_KERNEL : GFP_ATOMIC;
22338 + int buflen = *current_buflen(state);
22339 u32 *desc;
22340 int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
22341 int digestsize = crypto_ahash_digestsize(ahash);
22342 @@ -1453,9 +1284,9 @@ static int ahash_finup_no_ctx(struct aha
22343 edesc->src_nents = src_nents;
22344 edesc->sec4_sg_bytes = sec4_sg_bytes;
22345
22346 - state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
22347 - state->buf_dma, buflen,
22348 - last_buflen);
22349 + ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
22350 + if (ret)
22351 + goto unmap;
22352
22353 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
22354 req->nbytes);
22355 @@ -1499,11 +1330,10 @@ static int ahash_update_first(struct aha
22356 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
22357 struct caam_hash_state *state = ahash_request_ctx(req);
22358 struct device *jrdev = ctx->jrdev;
22359 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22360 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22361 - u8 *next_buf = state->current_buf ? state->buf_1 : state->buf_0;
22362 - int *next_buflen = state->current_buf ?
22363 - &state->buflen_1 : &state->buflen_0;
22364 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22365 + GFP_KERNEL : GFP_ATOMIC;
22366 + u8 *next_buf = alt_buf(state);
22367 + int *next_buflen = alt_buflen(state);
22368 int to_hash;
22369 u32 *desc;
22370 int src_nents, mapped_nents;
22371 @@ -1548,7 +1378,6 @@ static int ahash_update_first(struct aha
22372 }
22373
22374 edesc->src_nents = src_nents;
22375 - edesc->dst_dma = 0;
22376
22377 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
22378 to_hash);
22379 @@ -1585,6 +1414,7 @@ static int ahash_update_first(struct aha
22380 state->final = ahash_final_no_ctx;
22381 scatterwalk_map_and_copy(next_buf, req->src, 0,
22382 req->nbytes, 0);
22383 + switch_buf(state);
22384 }
22385 #ifdef DEBUG
22386 print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
22387 @@ -1691,7 +1521,6 @@ struct caam_hash_template {
22388 unsigned int blocksize;
22389 struct ahash_alg template_ahash;
22390 u32 alg_type;
22391 - u32 alg_op;
22392 };
22393
22394 /* ahash descriptors */
22395 @@ -1717,7 +1546,6 @@ static struct caam_hash_template driver_
22396 },
22397 },
22398 .alg_type = OP_ALG_ALGSEL_SHA1,
22399 - .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
22400 }, {
22401 .name = "sha224",
22402 .driver_name = "sha224-caam",
22403 @@ -1739,7 +1567,6 @@ static struct caam_hash_template driver_
22404 },
22405 },
22406 .alg_type = OP_ALG_ALGSEL_SHA224,
22407 - .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
22408 }, {
22409 .name = "sha256",
22410 .driver_name = "sha256-caam",
22411 @@ -1761,7 +1588,6 @@ static struct caam_hash_template driver_
22412 },
22413 },
22414 .alg_type = OP_ALG_ALGSEL_SHA256,
22415 - .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
22416 }, {
22417 .name = "sha384",
22418 .driver_name = "sha384-caam",
22419 @@ -1783,7 +1609,6 @@ static struct caam_hash_template driver_
22420 },
22421 },
22422 .alg_type = OP_ALG_ALGSEL_SHA384,
22423 - .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
22424 }, {
22425 .name = "sha512",
22426 .driver_name = "sha512-caam",
22427 @@ -1805,7 +1630,6 @@ static struct caam_hash_template driver_
22428 },
22429 },
22430 .alg_type = OP_ALG_ALGSEL_SHA512,
22431 - .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
22432 }, {
22433 .name = "md5",
22434 .driver_name = "md5-caam",
22435 @@ -1827,14 +1651,12 @@ static struct caam_hash_template driver_
22436 },
22437 },
22438 .alg_type = OP_ALG_ALGSEL_MD5,
22439 - .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
22440 },
22441 };
22442
22443 struct caam_hash_alg {
22444 struct list_head entry;
22445 int alg_type;
22446 - int alg_op;
22447 struct ahash_alg ahash_alg;
22448 };
22449
22450 @@ -1856,6 +1678,7 @@ static int caam_hash_cra_init(struct cry
22451 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
22452 HASH_MSG_LEN + 64,
22453 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
22454 + dma_addr_t dma_addr;
22455
22456 /*
22457 * Get a Job ring from Job Ring driver to ensure in-order
22458 @@ -1866,11 +1689,31 @@ static int caam_hash_cra_init(struct cry
22459 pr_err("Job Ring Device allocation for transform failed\n");
22460 return PTR_ERR(ctx->jrdev);
22461 }
22462 +
22463 + dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
22464 + offsetof(struct caam_hash_ctx,
22465 + sh_desc_update_dma),
22466 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
22467 + if (dma_mapping_error(ctx->jrdev, dma_addr)) {
22468 + dev_err(ctx->jrdev, "unable to map shared descriptors\n");
22469 + caam_jr_free(ctx->jrdev);
22470 + return -ENOMEM;
22471 + }
22472 +
22473 + ctx->sh_desc_update_dma = dma_addr;
22474 + ctx->sh_desc_update_first_dma = dma_addr +
22475 + offsetof(struct caam_hash_ctx,
22476 + sh_desc_update_first);
22477 + ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
22478 + sh_desc_fin);
22479 + ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
22480 + sh_desc_digest);
22481 +
22482 /* copy descriptor header template value */
22483 - ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
22484 - ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
22485 + ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
22486
22487 - ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
22488 + ctx->ctx_len = runninglen[(ctx->adata.algtype &
22489 + OP_ALG_ALGSEL_SUBMASK) >>
22490 OP_ALG_ALGSEL_SHIFT];
22491
22492 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
22493 @@ -1882,30 +1725,10 @@ static void caam_hash_cra_exit(struct cr
22494 {
22495 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
22496
22497 - if (ctx->sh_desc_update_dma &&
22498 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
22499 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
22500 - desc_bytes(ctx->sh_desc_update),
22501 - DMA_TO_DEVICE);
22502 - if (ctx->sh_desc_update_first_dma &&
22503 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
22504 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
22505 - desc_bytes(ctx->sh_desc_update_first),
22506 - DMA_TO_DEVICE);
22507 - if (ctx->sh_desc_fin_dma &&
22508 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
22509 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
22510 - desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
22511 - if (ctx->sh_desc_digest_dma &&
22512 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
22513 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
22514 - desc_bytes(ctx->sh_desc_digest),
22515 - DMA_TO_DEVICE);
22516 - if (ctx->sh_desc_finup_dma &&
22517 - !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
22518 - dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
22519 - desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
22520 -
22521 + dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
22522 + offsetof(struct caam_hash_ctx,
22523 + sh_desc_update_dma),
22524 + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
22525 caam_jr_free(ctx->jrdev);
22526 }
22527
22528 @@ -1964,7 +1787,6 @@ caam_hash_alloc(struct caam_hash_templat
22529 alg->cra_type = &crypto_ahash_type;
22530
22531 t_alg->alg_type = template->alg_type;
22532 - t_alg->alg_op = template->alg_op;
22533
22534 return t_alg;
22535 }
22536 --- /dev/null
22537 +++ b/drivers/crypto/caam/caamhash_desc.c
22538 @@ -0,0 +1,108 @@
22539 +/*
22540 + * Shared descriptors for ahash algorithms
22541 + *
22542 + * Copyright 2017 NXP
22543 + *
22544 + * Redistribution and use in source and binary forms, with or without
22545 + * modification, are permitted provided that the following conditions are met:
22546 + * * Redistributions of source code must retain the above copyright
22547 + * notice, this list of conditions and the following disclaimer.
22548 + * * Redistributions in binary form must reproduce the above copyright
22549 + * notice, this list of conditions and the following disclaimer in the
22550 + * documentation and/or other materials provided with the distribution.
22551 + * * Neither the names of the above-listed copyright holders nor the
22552 + * names of any contributors may be used to endorse or promote products
22553 + * derived from this software without specific prior written permission.
22554 + *
22555 + *
22556 + * ALTERNATIVELY, this software may be distributed under the terms of the
22557 + * GNU General Public License ("GPL") as published by the Free Software
22558 + * Foundation, either version 2 of that License or (at your option) any
22559 + * later version.
22560 + *
22561 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22562 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22563 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22564 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22565 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22566 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22567 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22568 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22569 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22570 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22571 + * POSSIBILITY OF SUCH DAMAGE.
22572 + */
22573 +
22574 +#include "compat.h"
22575 +#include "desc_constr.h"
22576 +#include "caamhash_desc.h"
22577 +
22578 +/**
22579 + * cnstr_shdsc_ahash - ahash shared descriptor
22580 + * @desc: pointer to buffer used for descriptor construction
22581 + * @adata: pointer to authentication transform definitions.
22582 + * A split key is required for SEC Era < 6; the size of the split key
22583 + * is specified in this case.
22584 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
22585 + * SHA256, SHA384, SHA512}.
22586 + * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
22587 + * @digestsize: algorithm's digest size
22588 + * @ctx_len: size of Context Register
22589 + * @import_ctx: true if previous Context Register needs to be restored
22590 + * must be true for ahash update and final
22591 + * must be false for for ahash first and digest
22592 + * @era: SEC Era
22593 + */
22594 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
22595 + int digestsize, int ctx_len, bool import_ctx, int era)
22596 +{
22597 + u32 op = adata->algtype;
22598 +
22599 + init_sh_desc(desc, HDR_SHARE_SERIAL);
22600 +
22601 + /* Append key if it has been set; ahash update excluded */
22602 + if (state != OP_ALG_AS_UPDATE && adata->keylen) {
22603 + u32 *skip_key_load;
22604 +
22605 + /* Skip key loading if already shared */
22606 + skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
22607 + JUMP_COND_SHRD);
22608 +
22609 + if (era < 6)
22610 + append_key_as_imm(desc, adata->key_virt,
22611 + adata->keylen_pad,
22612 + adata->keylen, CLASS_2 |
22613 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
22614 + else
22615 + append_proto_dkp(desc, adata);
22616 +
22617 + set_jump_tgt_here(desc, skip_key_load);
22618 +
22619 + op |= OP_ALG_AAI_HMAC_PRECOMP;
22620 + }
22621 +
22622 + /* If needed, import context from software */
22623 + if (import_ctx)
22624 + append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
22625 + LDST_SRCDST_BYTE_CONTEXT);
22626 +
22627 + /* Class 2 operation */
22628 + append_operation(desc, op | state | OP_ALG_ENCRYPT);
22629 +
22630 + /*
22631 + * Load from buf and/or src and write to req->result or state->context
22632 + * Calculate remaining bytes to read
22633 + */
22634 + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
22635 + /* Read remaining bytes */
22636 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
22637 + FIFOLD_TYPE_MSG | KEY_VLF);
22638 + /* Store class2 context bytes */
22639 + append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
22640 + LDST_SRCDST_BYTE_CONTEXT);
22641 +}
22642 +EXPORT_SYMBOL(cnstr_shdsc_ahash);
22643 +
22644 +MODULE_LICENSE("Dual BSD/GPL");
22645 +MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
22646 +MODULE_AUTHOR("NXP Semiconductors");
22647 --- /dev/null
22648 +++ b/drivers/crypto/caam/caamhash_desc.h
22649 @@ -0,0 +1,49 @@
22650 +/*
22651 + * Shared descriptors for ahash algorithms
22652 + *
22653 + * Copyright 2017 NXP
22654 + *
22655 + * Redistribution and use in source and binary forms, with or without
22656 + * modification, are permitted provided that the following conditions are met:
22657 + * * Redistributions of source code must retain the above copyright
22658 + * notice, this list of conditions and the following disclaimer.
22659 + * * Redistributions in binary form must reproduce the above copyright
22660 + * notice, this list of conditions and the following disclaimer in the
22661 + * documentation and/or other materials provided with the distribution.
22662 + * * Neither the names of the above-listed copyright holders nor the
22663 + * names of any contributors may be used to endorse or promote products
22664 + * derived from this software without specific prior written permission.
22665 + *
22666 + *
22667 + * ALTERNATIVELY, this software may be distributed under the terms of the
22668 + * GNU General Public License ("GPL") as published by the Free Software
22669 + * Foundation, either version 2 of that License or (at your option) any
22670 + * later version.
22671 + *
22672 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22673 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22674 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22675 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
22676 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22677 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22678 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22679 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
22680 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
22681 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
22682 + * POSSIBILITY OF SUCH DAMAGE.
22683 + */
22684 +
22685 +#ifndef _CAAMHASH_DESC_H_
22686 +#define _CAAMHASH_DESC_H_
22687 +
22688 +/* length of descriptors text */
22689 +#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
22690 +#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
22691 +#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
22692 +#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
22693 +#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
22694 +
22695 +void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
22696 + int digestsize, int ctx_len, bool import_ctx, int era);
22697 +
22698 +#endif /* _CAAMHASH_DESC_H_ */
22699 --- a/drivers/crypto/caam/caampkc.c
22700 +++ b/drivers/crypto/caam/caampkc.c
22701 @@ -18,6 +18,10 @@
22702 #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
22703 #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22704 sizeof(struct rsa_priv_f1_pdb))
22705 +#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
22706 + sizeof(struct rsa_priv_f2_pdb))
22707 +#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
22708 + sizeof(struct rsa_priv_f3_pdb))
22709
22710 static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
22711 struct akcipher_request *req)
22712 @@ -54,6 +58,42 @@ static void rsa_priv_f1_unmap(struct dev
22713 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22714 }
22715
22716 +static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
22717 + struct akcipher_request *req)
22718 +{
22719 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22720 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22721 + struct caam_rsa_key *key = &ctx->key;
22722 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
22723 + size_t p_sz = key->p_sz;
22724 + size_t q_sz = key->p_sz;
22725 +
22726 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22727 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22728 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22729 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22730 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
22731 +}
22732 +
22733 +static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
22734 + struct akcipher_request *req)
22735 +{
22736 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22737 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22738 + struct caam_rsa_key *key = &ctx->key;
22739 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
22740 + size_t p_sz = key->p_sz;
22741 + size_t q_sz = key->p_sz;
22742 +
22743 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22744 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22745 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
22746 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
22747 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
22748 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22749 + dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
22750 +}
22751 +
22752 /* RSA Job Completion handler */
22753 static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
22754 {
22755 @@ -90,6 +130,42 @@ static void rsa_priv_f1_done(struct devi
22756 akcipher_request_complete(req, err);
22757 }
22758
22759 +static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
22760 + void *context)
22761 +{
22762 + struct akcipher_request *req = context;
22763 + struct rsa_edesc *edesc;
22764 +
22765 + if (err)
22766 + caam_jr_strstatus(dev, err);
22767 +
22768 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
22769 +
22770 + rsa_priv_f2_unmap(dev, edesc, req);
22771 + rsa_io_unmap(dev, edesc, req);
22772 + kfree(edesc);
22773 +
22774 + akcipher_request_complete(req, err);
22775 +}
22776 +
22777 +static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
22778 + void *context)
22779 +{
22780 + struct akcipher_request *req = context;
22781 + struct rsa_edesc *edesc;
22782 +
22783 + if (err)
22784 + caam_jr_strstatus(dev, err);
22785 +
22786 + edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
22787 +
22788 + rsa_priv_f3_unmap(dev, edesc, req);
22789 + rsa_io_unmap(dev, edesc, req);
22790 + kfree(edesc);
22791 +
22792 + akcipher_request_complete(req, err);
22793 +}
22794 +
22795 static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
22796 size_t desclen)
22797 {
22798 @@ -97,8 +173,8 @@ static struct rsa_edesc *rsa_edesc_alloc
22799 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22800 struct device *dev = ctx->dev;
22801 struct rsa_edesc *edesc;
22802 - gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
22803 - CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
22804 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
22805 + GFP_KERNEL : GFP_ATOMIC;
22806 int sgc;
22807 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
22808 int src_nents, dst_nents;
22809 @@ -258,6 +334,172 @@ static int set_rsa_priv_f1_pdb(struct ak
22810 return 0;
22811 }
22812
22813 +static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
22814 + struct rsa_edesc *edesc)
22815 +{
22816 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22817 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22818 + struct caam_rsa_key *key = &ctx->key;
22819 + struct device *dev = ctx->dev;
22820 + struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
22821 + int sec4_sg_index = 0;
22822 + size_t p_sz = key->p_sz;
22823 + size_t q_sz = key->p_sz;
22824 +
22825 + pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
22826 + if (dma_mapping_error(dev, pdb->d_dma)) {
22827 + dev_err(dev, "Unable to map RSA private exponent memory\n");
22828 + return -ENOMEM;
22829 + }
22830 +
22831 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
22832 + if (dma_mapping_error(dev, pdb->p_dma)) {
22833 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
22834 + goto unmap_d;
22835 + }
22836 +
22837 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
22838 + if (dma_mapping_error(dev, pdb->q_dma)) {
22839 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
22840 + goto unmap_p;
22841 + }
22842 +
22843 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
22844 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
22845 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
22846 + goto unmap_q;
22847 + }
22848 +
22849 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
22850 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
22851 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
22852 + goto unmap_tmp1;
22853 + }
22854 +
22855 + if (edesc->src_nents > 1) {
22856 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
22857 + pdb->g_dma = edesc->sec4_sg_dma;
22858 + sec4_sg_index += edesc->src_nents;
22859 + } else {
22860 + pdb->g_dma = sg_dma_address(req->src);
22861 + }
22862 +
22863 + if (edesc->dst_nents > 1) {
22864 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
22865 + pdb->f_dma = edesc->sec4_sg_dma +
22866 + sec4_sg_index * sizeof(struct sec4_sg_entry);
22867 + } else {
22868 + pdb->f_dma = sg_dma_address(req->dst);
22869 + }
22870 +
22871 + pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
22872 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
22873 +
22874 + return 0;
22875 +
22876 +unmap_tmp1:
22877 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22878 +unmap_q:
22879 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22880 +unmap_p:
22881 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22882 +unmap_d:
22883 + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
22884 +
22885 + return -ENOMEM;
22886 +}
22887 +
22888 +static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
22889 + struct rsa_edesc *edesc)
22890 +{
22891 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22892 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22893 + struct caam_rsa_key *key = &ctx->key;
22894 + struct device *dev = ctx->dev;
22895 + struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
22896 + int sec4_sg_index = 0;
22897 + size_t p_sz = key->p_sz;
22898 + size_t q_sz = key->p_sz;
22899 +
22900 + pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
22901 + if (dma_mapping_error(dev, pdb->p_dma)) {
22902 + dev_err(dev, "Unable to map RSA prime factor p memory\n");
22903 + return -ENOMEM;
22904 + }
22905 +
22906 + pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
22907 + if (dma_mapping_error(dev, pdb->q_dma)) {
22908 + dev_err(dev, "Unable to map RSA prime factor q memory\n");
22909 + goto unmap_p;
22910 + }
22911 +
22912 + pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
22913 + if (dma_mapping_error(dev, pdb->dp_dma)) {
22914 + dev_err(dev, "Unable to map RSA exponent dp memory\n");
22915 + goto unmap_q;
22916 + }
22917 +
22918 + pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
22919 + if (dma_mapping_error(dev, pdb->dq_dma)) {
22920 + dev_err(dev, "Unable to map RSA exponent dq memory\n");
22921 + goto unmap_dp;
22922 + }
22923 +
22924 + pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
22925 + if (dma_mapping_error(dev, pdb->c_dma)) {
22926 + dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
22927 + goto unmap_dq;
22928 + }
22929 +
22930 + pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
22931 + if (dma_mapping_error(dev, pdb->tmp1_dma)) {
22932 + dev_err(dev, "Unable to map RSA tmp1 memory\n");
22933 + goto unmap_qinv;
22934 + }
22935 +
22936 + pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
22937 + if (dma_mapping_error(dev, pdb->tmp2_dma)) {
22938 + dev_err(dev, "Unable to map RSA tmp2 memory\n");
22939 + goto unmap_tmp1;
22940 + }
22941 +
22942 + if (edesc->src_nents > 1) {
22943 + pdb->sgf |= RSA_PRIV_PDB_SGF_G;
22944 + pdb->g_dma = edesc->sec4_sg_dma;
22945 + sec4_sg_index += edesc->src_nents;
22946 + } else {
22947 + pdb->g_dma = sg_dma_address(req->src);
22948 + }
22949 +
22950 + if (edesc->dst_nents > 1) {
22951 + pdb->sgf |= RSA_PRIV_PDB_SGF_F;
22952 + pdb->f_dma = edesc->sec4_sg_dma +
22953 + sec4_sg_index * sizeof(struct sec4_sg_entry);
22954 + } else {
22955 + pdb->f_dma = sg_dma_address(req->dst);
22956 + }
22957 +
22958 + pdb->sgf |= key->n_sz;
22959 + pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
22960 +
22961 + return 0;
22962 +
22963 +unmap_tmp1:
22964 + dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
22965 +unmap_qinv:
22966 + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
22967 +unmap_dq:
22968 + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
22969 +unmap_dp:
22970 + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
22971 +unmap_q:
22972 + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
22973 +unmap_p:
22974 + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
22975 +
22976 + return -ENOMEM;
22977 +}
22978 +
22979 static int caam_rsa_enc(struct akcipher_request *req)
22980 {
22981 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22982 @@ -301,24 +543,14 @@ init_fail:
22983 return ret;
22984 }
22985
22986 -static int caam_rsa_dec(struct akcipher_request *req)
22987 +static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
22988 {
22989 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
22990 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
22991 - struct caam_rsa_key *key = &ctx->key;
22992 struct device *jrdev = ctx->dev;
22993 struct rsa_edesc *edesc;
22994 int ret;
22995
22996 - if (unlikely(!key->n || !key->d))
22997 - return -EINVAL;
22998 -
22999 - if (req->dst_len < key->n_sz) {
23000 - req->dst_len = key->n_sz;
23001 - dev_err(jrdev, "Output buffer length less than parameter n\n");
23002 - return -EOVERFLOW;
23003 - }
23004 -
23005 /* Allocate extended descriptor */
23006 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
23007 if (IS_ERR(edesc))
23008 @@ -344,17 +576,147 @@ init_fail:
23009 return ret;
23010 }
23011
23012 +static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
23013 +{
23014 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23015 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23016 + struct device *jrdev = ctx->dev;
23017 + struct rsa_edesc *edesc;
23018 + int ret;
23019 +
23020 + /* Allocate extended descriptor */
23021 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
23022 + if (IS_ERR(edesc))
23023 + return PTR_ERR(edesc);
23024 +
23025 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
23026 + ret = set_rsa_priv_f2_pdb(req, edesc);
23027 + if (ret)
23028 + goto init_fail;
23029 +
23030 + /* Initialize Job Descriptor */
23031 + init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
23032 +
23033 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
23034 + if (!ret)
23035 + return -EINPROGRESS;
23036 +
23037 + rsa_priv_f2_unmap(jrdev, edesc, req);
23038 +
23039 +init_fail:
23040 + rsa_io_unmap(jrdev, edesc, req);
23041 + kfree(edesc);
23042 + return ret;
23043 +}
23044 +
23045 +static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
23046 +{
23047 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23048 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23049 + struct device *jrdev = ctx->dev;
23050 + struct rsa_edesc *edesc;
23051 + int ret;
23052 +
23053 + /* Allocate extended descriptor */
23054 + edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
23055 + if (IS_ERR(edesc))
23056 + return PTR_ERR(edesc);
23057 +
23058 + /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
23059 + ret = set_rsa_priv_f3_pdb(req, edesc);
23060 + if (ret)
23061 + goto init_fail;
23062 +
23063 + /* Initialize Job Descriptor */
23064 + init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
23065 +
23066 + ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
23067 + if (!ret)
23068 + return -EINPROGRESS;
23069 +
23070 + rsa_priv_f3_unmap(jrdev, edesc, req);
23071 +
23072 +init_fail:
23073 + rsa_io_unmap(jrdev, edesc, req);
23074 + kfree(edesc);
23075 + return ret;
23076 +}
23077 +
23078 +static int caam_rsa_dec(struct akcipher_request *req)
23079 +{
23080 + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
23081 + struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23082 + struct caam_rsa_key *key = &ctx->key;
23083 + int ret;
23084 +
23085 + if (unlikely(!key->n || !key->d))
23086 + return -EINVAL;
23087 +
23088 + if (req->dst_len < key->n_sz) {
23089 + req->dst_len = key->n_sz;
23090 + dev_err(ctx->dev, "Output buffer length less than parameter n\n");
23091 + return -EOVERFLOW;
23092 + }
23093 +
23094 + if (key->priv_form == FORM3)
23095 + ret = caam_rsa_dec_priv_f3(req);
23096 + else if (key->priv_form == FORM2)
23097 + ret = caam_rsa_dec_priv_f2(req);
23098 + else
23099 + ret = caam_rsa_dec_priv_f1(req);
23100 +
23101 + return ret;
23102 +}
23103 +
23104 static void caam_rsa_free_key(struct caam_rsa_key *key)
23105 {
23106 kzfree(key->d);
23107 + kzfree(key->p);
23108 + kzfree(key->q);
23109 + kzfree(key->dp);
23110 + kzfree(key->dq);
23111 + kzfree(key->qinv);
23112 + kzfree(key->tmp1);
23113 + kzfree(key->tmp2);
23114 kfree(key->e);
23115 kfree(key->n);
23116 - key->d = NULL;
23117 - key->e = NULL;
23118 - key->n = NULL;
23119 - key->d_sz = 0;
23120 - key->e_sz = 0;
23121 - key->n_sz = 0;
23122 + memset(key, 0, sizeof(*key));
23123 +}
23124 +
23125 +static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
23126 +{
23127 + while (!**ptr && *nbytes) {
23128 + (*ptr)++;
23129 + (*nbytes)--;
23130 + }
23131 +}
23132 +
23133 +/**
23134 + * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
23135 + * dP, dQ and qInv could decode to less than corresponding p, q length, as the
23136 + * BER-encoding requires that the minimum number of bytes be used to encode the
23137 + * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
23138 + * length.
23139 + *
23140 + * @ptr : pointer to {dP, dQ, qInv} CRT member
23141 + * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
23142 + * @dstlen: length in bytes of corresponding p or q prime factor
23143 + */
23144 +static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
23145 +{
23146 + u8 *dst;
23147 +
23148 + caam_rsa_drop_leading_zeros(&ptr, &nbytes);
23149 + if (!nbytes)
23150 + return NULL;
23151 +
23152 + dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
23153 + if (!dst)
23154 + return NULL;
23155 +
23156 + memcpy(dst + (dstlen - nbytes), ptr, nbytes);
23157 +
23158 + return dst;
23159 }
23160
23161 /**
23162 @@ -370,10 +732,9 @@ static inline u8 *caam_read_raw_data(con
23163 {
23164 u8 *val;
23165
23166 - while (!*buf && *nbytes) {
23167 - buf++;
23168 - (*nbytes)--;
23169 - }
23170 + caam_rsa_drop_leading_zeros(&buf, nbytes);
23171 + if (!*nbytes)
23172 + return NULL;
23173
23174 val = kzalloc(*nbytes, GFP_DMA | GFP_KERNEL);
23175 if (!val)
23176 @@ -395,7 +756,7 @@ static int caam_rsa_set_pub_key(struct c
23177 unsigned int keylen)
23178 {
23179 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23180 - struct rsa_key raw_key = {0};
23181 + struct rsa_key raw_key = {NULL};
23182 struct caam_rsa_key *rsa_key = &ctx->key;
23183 int ret;
23184
23185 @@ -437,11 +798,69 @@ err:
23186 return -ENOMEM;
23187 }
23188
23189 +static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
23190 + struct rsa_key *raw_key)
23191 +{
23192 + struct caam_rsa_key *rsa_key = &ctx->key;
23193 + size_t p_sz = raw_key->p_sz;
23194 + size_t q_sz = raw_key->q_sz;
23195 +
23196 + rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
23197 + if (!rsa_key->p)
23198 + return;
23199 + rsa_key->p_sz = p_sz;
23200 +
23201 + rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
23202 + if (!rsa_key->q)
23203 + goto free_p;
23204 + rsa_key->q_sz = q_sz;
23205 +
23206 + rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
23207 + if (!rsa_key->tmp1)
23208 + goto free_q;
23209 +
23210 + rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
23211 + if (!rsa_key->tmp2)
23212 + goto free_tmp1;
23213 +
23214 + rsa_key->priv_form = FORM2;
23215 +
23216 + rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
23217 + if (!rsa_key->dp)
23218 + goto free_tmp2;
23219 +
23220 + rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
23221 + if (!rsa_key->dq)
23222 + goto free_dp;
23223 +
23224 + rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
23225 + q_sz);
23226 + if (!rsa_key->qinv)
23227 + goto free_dq;
23228 +
23229 + rsa_key->priv_form = FORM3;
23230 +
23231 + return;
23232 +
23233 +free_dq:
23234 + kzfree(rsa_key->dq);
23235 +free_dp:
23236 + kzfree(rsa_key->dp);
23237 +free_tmp2:
23238 + kzfree(rsa_key->tmp2);
23239 +free_tmp1:
23240 + kzfree(rsa_key->tmp1);
23241 +free_q:
23242 + kzfree(rsa_key->q);
23243 +free_p:
23244 + kzfree(rsa_key->p);
23245 +}
23246 +
23247 static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
23248 unsigned int keylen)
23249 {
23250 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
23251 - struct rsa_key raw_key = {0};
23252 + struct rsa_key raw_key = {NULL};
23253 struct caam_rsa_key *rsa_key = &ctx->key;
23254 int ret;
23255
23256 @@ -483,6 +902,8 @@ static int caam_rsa_set_priv_key(struct
23257 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
23258 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
23259
23260 + caam_rsa_set_priv_key_form(ctx, &raw_key);
23261 +
23262 return 0;
23263
23264 err:
23265 --- a/drivers/crypto/caam/caampkc.h
23266 +++ b/drivers/crypto/caam/caampkc.h
23267 @@ -13,21 +13,75 @@
23268 #include "pdb.h"
23269
23270 /**
23271 + * caam_priv_key_form - CAAM RSA private key representation
23272 + * CAAM RSA private key may have either of three forms.
23273 + *
23274 + * 1. The first representation consists of the pair (n, d), where the
23275 + * components have the following meanings:
23276 + * n the RSA modulus
23277 + * d the RSA private exponent
23278 + *
23279 + * 2. The second representation consists of the triplet (p, q, d), where the
23280 + * components have the following meanings:
23281 + * p the first prime factor of the RSA modulus n
23282 + * q the second prime factor of the RSA modulus n
23283 + * d the RSA private exponent
23284 + *
23285 + * 3. The third representation consists of the quintuple (p, q, dP, dQ, qInv),
23286 + * where the components have the following meanings:
23287 + * p the first prime factor of the RSA modulus n
23288 + * q the second prime factor of the RSA modulus n
23289 + * dP the first factors's CRT exponent
23290 + * dQ the second factors's CRT exponent
23291 + * qInv the (first) CRT coefficient
23292 + *
23293 + * The benefit of using the third or the second key form is lower computational
23294 + * cost for the decryption and signature operations.
23295 + */
23296 +enum caam_priv_key_form {
23297 + FORM1,
23298 + FORM2,
23299 + FORM3
23300 +};
23301 +
23302 +/**
23303 * caam_rsa_key - CAAM RSA key structure. Keys are allocated in DMA zone.
23304 * @n : RSA modulus raw byte stream
23305 * @e : RSA public exponent raw byte stream
23306 * @d : RSA private exponent raw byte stream
23307 + * @p : RSA prime factor p of RSA modulus n
23308 + * @q : RSA prime factor q of RSA modulus n
23309 + * @dp : RSA CRT exponent of p
23310 + * @dp : RSA CRT exponent of q
23311 + * @qinv : RSA CRT coefficient
23312 + * @tmp1 : CAAM uses this temporary buffer as internal state buffer.
23313 + * It is assumed to be as long as p.
23314 + * @tmp2 : CAAM uses this temporary buffer as internal state buffer.
23315 + * It is assumed to be as long as q.
23316 * @n_sz : length in bytes of RSA modulus n
23317 * @e_sz : length in bytes of RSA public exponent
23318 * @d_sz : length in bytes of RSA private exponent
23319 + * @p_sz : length in bytes of RSA prime factor p of RSA modulus n
23320 + * @q_sz : length in bytes of RSA prime factor q of RSA modulus n
23321 + * @priv_form : CAAM RSA private key representation
23322 */
23323 struct caam_rsa_key {
23324 u8 *n;
23325 u8 *e;
23326 u8 *d;
23327 + u8 *p;
23328 + u8 *q;
23329 + u8 *dp;
23330 + u8 *dq;
23331 + u8 *qinv;
23332 + u8 *tmp1;
23333 + u8 *tmp2;
23334 size_t n_sz;
23335 size_t e_sz;
23336 size_t d_sz;
23337 + size_t p_sz;
23338 + size_t q_sz;
23339 + enum caam_priv_key_form priv_form;
23340 };
23341
23342 /**
23343 @@ -59,6 +113,8 @@ struct rsa_edesc {
23344 union {
23345 struct rsa_pub_pdb pub;
23346 struct rsa_priv_f1_pdb priv_f1;
23347 + struct rsa_priv_f2_pdb priv_f2;
23348 + struct rsa_priv_f3_pdb priv_f3;
23349 } pdb;
23350 u32 hw_desc[];
23351 };
23352 @@ -66,5 +122,7 @@ struct rsa_edesc {
23353 /* Descriptor construction primitives. */
23354 void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb);
23355 void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb);
23356 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb);
23357 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb);
23358
23359 #endif
23360 --- a/drivers/crypto/caam/caamrng.c
23361 +++ b/drivers/crypto/caam/caamrng.c
23362 @@ -52,7 +52,7 @@
23363
23364 /* length of descriptors */
23365 #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
23366 -#define DESC_RNG_LEN (4 * CAAM_CMD_SZ)
23367 +#define DESC_RNG_LEN (3 * CAAM_CMD_SZ)
23368
23369 /* Buffer, its dma address and lock */
23370 struct buf_data {
23371 @@ -100,8 +100,7 @@ static void rng_done(struct device *jrde
23372 {
23373 struct buf_data *bd;
23374
23375 - bd = (struct buf_data *)((char *)desc -
23376 - offsetof(struct buf_data, hw_desc));
23377 + bd = container_of(desc, struct buf_data, hw_desc[0]);
23378
23379 if (err)
23380 caam_jr_strstatus(jrdev, err);
23381 @@ -196,9 +195,6 @@ static inline int rng_create_sh_desc(str
23382
23383 init_sh_desc(desc, HDR_SHARE_SERIAL);
23384
23385 - /* Propagate errors from shared to job descriptor */
23386 - append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
23387 -
23388 /* Generate random bytes */
23389 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
23390
23391 @@ -289,11 +285,7 @@ static int caam_init_rng(struct caam_rng
23392 if (err)
23393 return err;
23394
23395 - err = caam_init_buf(ctx, 1);
23396 - if (err)
23397 - return err;
23398 -
23399 - return 0;
23400 + return caam_init_buf(ctx, 1);
23401 }
23402
23403 static struct hwrng caam_rng = {
23404 @@ -351,7 +343,7 @@ static int __init caam_rng_init(void)
23405 pr_err("Job Ring Device allocation for transform failed\n");
23406 return PTR_ERR(dev);
23407 }
23408 - rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
23409 + rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
23410 if (!rng_ctx) {
23411 err = -ENOMEM;
23412 goto free_caam_alloc;
23413 --- a/drivers/crypto/caam/compat.h
23414 +++ b/drivers/crypto/caam/compat.h
23415 @@ -16,6 +16,7 @@
23416 #include <linux/of_platform.h>
23417 #include <linux/dma-mapping.h>
23418 #include <linux/io.h>
23419 +#include <linux/iommu.h>
23420 #include <linux/spinlock.h>
23421 #include <linux/rtnetlink.h>
23422 #include <linux/in.h>
23423 --- a/drivers/crypto/caam/ctrl.c
23424 +++ b/drivers/crypto/caam/ctrl.c
23425 @@ -2,40 +2,41 @@
23426 * Controller-level driver, kernel property detection, initialization
23427 *
23428 * Copyright 2008-2012 Freescale Semiconductor, Inc.
23429 + * Copyright 2017 NXP
23430 */
23431
23432 #include <linux/device.h>
23433 #include <linux/of_address.h>
23434 #include <linux/of_irq.h>
23435 +#include <linux/sys_soc.h>
23436
23437 #include "compat.h"
23438 #include "regs.h"
23439 #include "intern.h"
23440 #include "jr.h"
23441 #include "desc_constr.h"
23442 -#include "error.h"
23443 #include "ctrl.h"
23444
23445 bool caam_little_end;
23446 EXPORT_SYMBOL(caam_little_end);
23447 +bool caam_imx;
23448 +EXPORT_SYMBOL(caam_imx);
23449 +bool caam_dpaa2;
23450 +EXPORT_SYMBOL(caam_dpaa2);
23451 +
23452 +#ifdef CONFIG_CAAM_QI
23453 +#include "qi.h"
23454 +#endif
23455
23456 /*
23457 * i.MX targets tend to have clock control subsystems that can
23458 * enable/disable clocking to our device.
23459 */
23460 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
23461 -static inline struct clk *caam_drv_identify_clk(struct device *dev,
23462 - char *clk_name)
23463 -{
23464 - return devm_clk_get(dev, clk_name);
23465 -}
23466 -#else
23467 static inline struct clk *caam_drv_identify_clk(struct device *dev,
23468 char *clk_name)
23469 {
23470 - return NULL;
23471 + return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
23472 }
23473 -#endif
23474
23475 /*
23476 * Descriptor to instantiate RNG State Handle 0 in normal mode and
23477 @@ -274,7 +275,7 @@ static int deinstantiate_rng(struct devi
23478 /*
23479 * If the corresponding bit is set, then it means the state
23480 * handle was initialized by us, and thus it needs to be
23481 - * deintialized as well
23482 + * deinitialized as well
23483 */
23484 if ((1 << sh_idx) & state_handle_mask) {
23485 /*
23486 @@ -307,20 +308,24 @@ static int caam_remove(struct platform_d
23487 struct device *ctrldev;
23488 struct caam_drv_private *ctrlpriv;
23489 struct caam_ctrl __iomem *ctrl;
23490 - int ring;
23491
23492 ctrldev = &pdev->dev;
23493 ctrlpriv = dev_get_drvdata(ctrldev);
23494 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
23495
23496 - /* Remove platform devices for JobRs */
23497 - for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
23498 - if (ctrlpriv->jrpdev[ring])
23499 - of_device_unregister(ctrlpriv->jrpdev[ring]);
23500 - }
23501 + /* Remove platform devices under the crypto node */
23502 + of_platform_depopulate(ctrldev);
23503 +
23504 +#ifdef CONFIG_CAAM_QI
23505 + if (ctrlpriv->qidev)
23506 + caam_qi_shutdown(ctrlpriv->qidev);
23507 +#endif
23508
23509 - /* De-initialize RNG state handles initialized by this driver. */
23510 - if (ctrlpriv->rng4_sh_init)
23511 + /*
23512 + * De-initialize RNG state handles initialized by this driver.
23513 + * In case of DPAA 2.x, RNG is managed by MC firmware.
23514 + */
23515 + if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
23516 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
23517
23518 /* Shut down debug views */
23519 @@ -335,8 +340,8 @@ static int caam_remove(struct platform_d
23520 clk_disable_unprepare(ctrlpriv->caam_ipg);
23521 clk_disable_unprepare(ctrlpriv->caam_mem);
23522 clk_disable_unprepare(ctrlpriv->caam_aclk);
23523 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23524 -
23525 + if (ctrlpriv->caam_emi_slow)
23526 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23527 return 0;
23528 }
23529
23530 @@ -370,11 +375,8 @@ static void kick_trng(struct platform_de
23531 */
23532 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
23533 >> RTSDCTL_ENT_DLY_SHIFT;
23534 - if (ent_delay <= val) {
23535 - /* put RNG4 into run mode */
23536 - clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
23537 - return;
23538 - }
23539 + if (ent_delay <= val)
23540 + goto start_rng;
23541
23542 val = rd_reg32(&r4tst->rtsdctl);
23543 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
23544 @@ -386,15 +388,12 @@ static void kick_trng(struct platform_de
23545 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
23546 /* read the control register */
23547 val = rd_reg32(&r4tst->rtmctl);
23548 +start_rng:
23549 /*
23550 * select raw sampling in both entropy shifter
23551 - * and statistical checker
23552 + * and statistical checker; ; put RNG4 into run mode
23553 */
23554 - clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
23555 - /* put RNG4 into run mode */
23556 - clrsetbits_32(&val, RTMCTL_PRGM, 0);
23557 - /* write back the control register */
23558 - wr_reg32(&r4tst->rtmctl, val);
23559 + clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
23560 }
23561
23562 /**
23563 @@ -415,28 +414,26 @@ int caam_get_era(void)
23564 }
23565 EXPORT_SYMBOL(caam_get_era);
23566
23567 -#ifdef CONFIG_DEBUG_FS
23568 -static int caam_debugfs_u64_get(void *data, u64 *val)
23569 -{
23570 - *val = caam64_to_cpu(*(u64 *)data);
23571 - return 0;
23572 -}
23573 -
23574 -static int caam_debugfs_u32_get(void *data, u64 *val)
23575 -{
23576 - *val = caam32_to_cpu(*(u32 *)data);
23577 - return 0;
23578 -}
23579 -
23580 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
23581 -DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
23582 -#endif
23583 +static const struct of_device_id caam_match[] = {
23584 + {
23585 + .compatible = "fsl,sec-v4.0",
23586 + },
23587 + {
23588 + .compatible = "fsl,sec4.0",
23589 + },
23590 + {},
23591 +};
23592 +MODULE_DEVICE_TABLE(of, caam_match);
23593
23594 /* Probe routine for CAAM top (controller) level */
23595 static int caam_probe(struct platform_device *pdev)
23596 {
23597 - int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
23598 + int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
23599 u64 caam_id;
23600 + static const struct soc_device_attribute imx_soc[] = {
23601 + {.family = "Freescale i.MX"},
23602 + {},
23603 + };
23604 struct device *dev;
23605 struct device_node *nprop, *np;
23606 struct caam_ctrl __iomem *ctrl;
23607 @@ -456,9 +453,10 @@ static int caam_probe(struct platform_de
23608
23609 dev = &pdev->dev;
23610 dev_set_drvdata(dev, ctrlpriv);
23611 - ctrlpriv->pdev = pdev;
23612 nprop = pdev->dev.of_node;
23613
23614 + caam_imx = (bool)soc_device_match(imx_soc);
23615 +
23616 /* Enable clocking */
23617 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
23618 if (IS_ERR(clk)) {
23619 @@ -487,14 +485,16 @@ static int caam_probe(struct platform_de
23620 }
23621 ctrlpriv->caam_aclk = clk;
23622
23623 - clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
23624 - if (IS_ERR(clk)) {
23625 - ret = PTR_ERR(clk);
23626 - dev_err(&pdev->dev,
23627 - "can't identify CAAM emi_slow clk: %d\n", ret);
23628 - return ret;
23629 + if (!of_machine_is_compatible("fsl,imx6ul")) {
23630 + clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
23631 + if (IS_ERR(clk)) {
23632 + ret = PTR_ERR(clk);
23633 + dev_err(&pdev->dev,
23634 + "can't identify CAAM emi_slow clk: %d\n", ret);
23635 + return ret;
23636 + }
23637 + ctrlpriv->caam_emi_slow = clk;
23638 }
23639 - ctrlpriv->caam_emi_slow = clk;
23640
23641 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
23642 if (ret < 0) {
23643 @@ -515,11 +515,13 @@ static int caam_probe(struct platform_de
23644 goto disable_caam_mem;
23645 }
23646
23647 - ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
23648 - if (ret < 0) {
23649 - dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
23650 - ret);
23651 - goto disable_caam_aclk;
23652 + if (ctrlpriv->caam_emi_slow) {
23653 + ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
23654 + if (ret < 0) {
23655 + dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
23656 + ret);
23657 + goto disable_caam_aclk;
23658 + }
23659 }
23660
23661 /* Get configuration properties from device tree */
23662 @@ -546,13 +548,13 @@ static int caam_probe(struct platform_de
23663 else
23664 BLOCK_OFFSET = PG_SIZE_64K;
23665
23666 - ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
23667 - ctrlpriv->assure = (struct caam_assurance __force *)
23668 - ((uint8_t *)ctrl +
23669 + ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
23670 + ctrlpriv->assure = (struct caam_assurance __iomem __force *)
23671 + ((__force uint8_t *)ctrl +
23672 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
23673 );
23674 - ctrlpriv->deco = (struct caam_deco __force *)
23675 - ((uint8_t *)ctrl +
23676 + ctrlpriv->deco = (struct caam_deco __iomem __force *)
23677 + ((__force uint8_t *)ctrl +
23678 BLOCK_OFFSET * DECO_BLOCK_NUMBER
23679 );
23680
23681 @@ -561,12 +563,17 @@ static int caam_probe(struct platform_de
23682
23683 /*
23684 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
23685 - * long pointers in master configuration register
23686 + * long pointers in master configuration register.
23687 + * In case of DPAA 2.x, Management Complex firmware performs
23688 + * the configuration.
23689 */
23690 - clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
23691 - MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
23692 - MCFGR_WDENABLE | MCFGR_LARGE_BURST |
23693 - (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
23694 + caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
23695 + if (!caam_dpaa2)
23696 + clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
23697 + MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
23698 + MCFGR_WDENABLE | MCFGR_LARGE_BURST |
23699 + (sizeof(dma_addr_t) == sizeof(u64) ?
23700 + MCFGR_LONG_PTR : 0));
23701
23702 /*
23703 * Read the Compile Time paramters and SCFGR to determine
23704 @@ -594,64 +601,69 @@ static int caam_probe(struct platform_de
23705 JRSTART_JR1_START | JRSTART_JR2_START |
23706 JRSTART_JR3_START);
23707
23708 - if (sizeof(dma_addr_t) == sizeof(u64))
23709 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
23710 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
23711 + if (sizeof(dma_addr_t) == sizeof(u64)) {
23712 + if (caam_dpaa2)
23713 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
23714 + else if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
23715 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
23716 else
23717 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
23718 - else
23719 - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
23720 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
23721 + } else {
23722 + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
23723 + }
23724 + if (ret) {
23725 + dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
23726 + goto iounmap_ctrl;
23727 + }
23728
23729 - /*
23730 - * Detect and enable JobRs
23731 - * First, find out how many ring spec'ed, allocate references
23732 - * for all, then go probe each one.
23733 - */
23734 - rspec = 0;
23735 - for_each_available_child_of_node(nprop, np)
23736 - if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
23737 - of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
23738 - rspec++;
23739 + ctrlpriv->era = caam_get_era();
23740
23741 - ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
23742 - sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
23743 - if (ctrlpriv->jrpdev == NULL) {
23744 - ret = -ENOMEM;
23745 + ret = of_platform_populate(nprop, caam_match, NULL, dev);
23746 + if (ret) {
23747 + dev_err(dev, "JR platform devices creation error\n");
23748 goto iounmap_ctrl;
23749 }
23750
23751 +#ifdef CONFIG_DEBUG_FS
23752 + /*
23753 + * FIXME: needs better naming distinction, as some amalgamation of
23754 + * "caam" and nprop->full_name. The OF name isn't distinctive,
23755 + * but does separate instances
23756 + */
23757 + perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
23758 +
23759 + ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
23760 + ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
23761 +#endif
23762 ring = 0;
23763 - ctrlpriv->total_jobrs = 0;
23764 for_each_available_child_of_node(nprop, np)
23765 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
23766 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
23767 - ctrlpriv->jrpdev[ring] =
23768 - of_platform_device_create(np, NULL, dev);
23769 - if (!ctrlpriv->jrpdev[ring]) {
23770 - pr_warn("JR%d Platform device creation error\n",
23771 - ring);
23772 - continue;
23773 - }
23774 - ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
23775 - ((uint8_t *)ctrl +
23776 + ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
23777 + ((__force uint8_t *)ctrl +
23778 (ring + JR_BLOCK_NUMBER) *
23779 BLOCK_OFFSET
23780 );
23781 ctrlpriv->total_jobrs++;
23782 ring++;
23783 - }
23784 + }
23785
23786 - /* Check to see if QI present. If so, enable */
23787 - ctrlpriv->qi_present =
23788 - !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
23789 - CTPR_MS_QI_MASK);
23790 - if (ctrlpriv->qi_present) {
23791 - ctrlpriv->qi = (struct caam_queue_if __force *)
23792 - ((uint8_t *)ctrl +
23793 + /* Check to see if (DPAA 1.x) QI present. If so, enable */
23794 + ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
23795 + if (ctrlpriv->qi_present && !caam_dpaa2) {
23796 + ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
23797 + ((__force uint8_t *)ctrl +
23798 BLOCK_OFFSET * QI_BLOCK_NUMBER
23799 );
23800 /* This is all that's required to physically enable QI */
23801 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
23802 +
23803 + /* If QMAN driver is present, init CAAM-QI backend */
23804 +#ifdef CONFIG_CAAM_QI
23805 + ret = caam_qi_init(pdev);
23806 + if (ret)
23807 + dev_err(dev, "caam qi i/f init failed: %d\n", ret);
23808 +#endif
23809 }
23810
23811 /* If no QI and no rings specified, quit and go home */
23812 @@ -666,8 +678,10 @@ static int caam_probe(struct platform_de
23813 /*
23814 * If SEC has RNG version >= 4 and RNG state handle has not been
23815 * already instantiated, do RNG instantiation
23816 + * In case of DPAA 2.x, RNG is managed by MC firmware.
23817 */
23818 - if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
23819 + if (!caam_dpaa2 &&
23820 + (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
23821 ctrlpriv->rng4_sh_init =
23822 rd_reg32(&ctrl->r4tst[0].rdsta);
23823 /*
23824 @@ -734,78 +748,47 @@ static int caam_probe(struct platform_de
23825
23826 /* Report "alive" for developer to see */
23827 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
23828 - caam_get_era());
23829 - dev_info(dev, "job rings = %d, qi = %d\n",
23830 - ctrlpriv->total_jobrs, ctrlpriv->qi_present);
23831 + ctrlpriv->era);
23832 + dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
23833 + ctrlpriv->total_jobrs, ctrlpriv->qi_present,
23834 + caam_dpaa2 ? "yes" : "no");
23835
23836 #ifdef CONFIG_DEBUG_FS
23837 - /*
23838 - * FIXME: needs better naming distinction, as some amalgamation of
23839 - * "caam" and nprop->full_name. The OF name isn't distinctive,
23840 - * but does separate instances
23841 - */
23842 - perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
23843 -
23844 - ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
23845 - ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
23846 -
23847 - /* Controller-level - performance monitor counters */
23848 -
23849 - ctrlpriv->ctl_rq_dequeued =
23850 - debugfs_create_file("rq_dequeued",
23851 - S_IRUSR | S_IRGRP | S_IROTH,
23852 - ctrlpriv->ctl, &perfmon->req_dequeued,
23853 - &caam_fops_u64_ro);
23854 - ctrlpriv->ctl_ob_enc_req =
23855 - debugfs_create_file("ob_rq_encrypted",
23856 - S_IRUSR | S_IRGRP | S_IROTH,
23857 - ctrlpriv->ctl, &perfmon->ob_enc_req,
23858 - &caam_fops_u64_ro);
23859 - ctrlpriv->ctl_ib_dec_req =
23860 - debugfs_create_file("ib_rq_decrypted",
23861 - S_IRUSR | S_IRGRP | S_IROTH,
23862 - ctrlpriv->ctl, &perfmon->ib_dec_req,
23863 - &caam_fops_u64_ro);
23864 - ctrlpriv->ctl_ob_enc_bytes =
23865 - debugfs_create_file("ob_bytes_encrypted",
23866 - S_IRUSR | S_IRGRP | S_IROTH,
23867 - ctrlpriv->ctl, &perfmon->ob_enc_bytes,
23868 - &caam_fops_u64_ro);
23869 - ctrlpriv->ctl_ob_prot_bytes =
23870 - debugfs_create_file("ob_bytes_protected",
23871 - S_IRUSR | S_IRGRP | S_IROTH,
23872 - ctrlpriv->ctl, &perfmon->ob_prot_bytes,
23873 - &caam_fops_u64_ro);
23874 - ctrlpriv->ctl_ib_dec_bytes =
23875 - debugfs_create_file("ib_bytes_decrypted",
23876 - S_IRUSR | S_IRGRP | S_IROTH,
23877 - ctrlpriv->ctl, &perfmon->ib_dec_bytes,
23878 - &caam_fops_u64_ro);
23879 - ctrlpriv->ctl_ib_valid_bytes =
23880 - debugfs_create_file("ib_bytes_validated",
23881 - S_IRUSR | S_IRGRP | S_IROTH,
23882 - ctrlpriv->ctl, &perfmon->ib_valid_bytes,
23883 - &caam_fops_u64_ro);
23884 + debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
23885 + ctrlpriv->ctl, &perfmon->req_dequeued,
23886 + &caam_fops_u64_ro);
23887 + debugfs_create_file("ob_rq_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
23888 + ctrlpriv->ctl, &perfmon->ob_enc_req,
23889 + &caam_fops_u64_ro);
23890 + debugfs_create_file("ib_rq_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
23891 + ctrlpriv->ctl, &perfmon->ib_dec_req,
23892 + &caam_fops_u64_ro);
23893 + debugfs_create_file("ob_bytes_encrypted", S_IRUSR | S_IRGRP | S_IROTH,
23894 + ctrlpriv->ctl, &perfmon->ob_enc_bytes,
23895 + &caam_fops_u64_ro);
23896 + debugfs_create_file("ob_bytes_protected", S_IRUSR | S_IRGRP | S_IROTH,
23897 + ctrlpriv->ctl, &perfmon->ob_prot_bytes,
23898 + &caam_fops_u64_ro);
23899 + debugfs_create_file("ib_bytes_decrypted", S_IRUSR | S_IRGRP | S_IROTH,
23900 + ctrlpriv->ctl, &perfmon->ib_dec_bytes,
23901 + &caam_fops_u64_ro);
23902 + debugfs_create_file("ib_bytes_validated", S_IRUSR | S_IRGRP | S_IROTH,
23903 + ctrlpriv->ctl, &perfmon->ib_valid_bytes,
23904 + &caam_fops_u64_ro);
23905
23906 /* Controller level - global status values */
23907 - ctrlpriv->ctl_faultaddr =
23908 - debugfs_create_file("fault_addr",
23909 - S_IRUSR | S_IRGRP | S_IROTH,
23910 - ctrlpriv->ctl, &perfmon->faultaddr,
23911 - &caam_fops_u32_ro);
23912 - ctrlpriv->ctl_faultdetail =
23913 - debugfs_create_file("fault_detail",
23914 - S_IRUSR | S_IRGRP | S_IROTH,
23915 - ctrlpriv->ctl, &perfmon->faultdetail,
23916 - &caam_fops_u32_ro);
23917 - ctrlpriv->ctl_faultstatus =
23918 - debugfs_create_file("fault_status",
23919 - S_IRUSR | S_IRGRP | S_IROTH,
23920 - ctrlpriv->ctl, &perfmon->status,
23921 - &caam_fops_u32_ro);
23922 + debugfs_create_file("fault_addr", S_IRUSR | S_IRGRP | S_IROTH,
23923 + ctrlpriv->ctl, &perfmon->faultaddr,
23924 + &caam_fops_u32_ro);
23925 + debugfs_create_file("fault_detail", S_IRUSR | S_IRGRP | S_IROTH,
23926 + ctrlpriv->ctl, &perfmon->faultdetail,
23927 + &caam_fops_u32_ro);
23928 + debugfs_create_file("fault_status", S_IRUSR | S_IRGRP | S_IROTH,
23929 + ctrlpriv->ctl, &perfmon->status,
23930 + &caam_fops_u32_ro);
23931
23932 /* Internal covering keys (useful in non-secure mode only) */
23933 - ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
23934 + ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
23935 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23936 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
23937 S_IRUSR |
23938 @@ -813,7 +796,7 @@ static int caam_probe(struct platform_de
23939 ctrlpriv->ctl,
23940 &ctrlpriv->ctl_kek_wrap);
23941
23942 - ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
23943 + ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
23944 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23945 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
23946 S_IRUSR |
23947 @@ -821,7 +804,7 @@ static int caam_probe(struct platform_de
23948 ctrlpriv->ctl,
23949 &ctrlpriv->ctl_tkek_wrap);
23950
23951 - ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
23952 + ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
23953 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
23954 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
23955 S_IRUSR |
23956 @@ -832,13 +815,17 @@ static int caam_probe(struct platform_de
23957 return 0;
23958
23959 caam_remove:
23960 +#ifdef CONFIG_DEBUG_FS
23961 + debugfs_remove_recursive(ctrlpriv->dfs_root);
23962 +#endif
23963 caam_remove(pdev);
23964 return ret;
23965
23966 iounmap_ctrl:
23967 iounmap(ctrl);
23968 disable_caam_emi_slow:
23969 - clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23970 + if (ctrlpriv->caam_emi_slow)
23971 + clk_disable_unprepare(ctrlpriv->caam_emi_slow);
23972 disable_caam_aclk:
23973 clk_disable_unprepare(ctrlpriv->caam_aclk);
23974 disable_caam_mem:
23975 @@ -848,17 +835,6 @@ disable_caam_ipg:
23976 return ret;
23977 }
23978
23979 -static struct of_device_id caam_match[] = {
23980 - {
23981 - .compatible = "fsl,sec-v4.0",
23982 - },
23983 - {
23984 - .compatible = "fsl,sec4.0",
23985 - },
23986 - {},
23987 -};
23988 -MODULE_DEVICE_TABLE(of, caam_match);
23989 -
23990 static struct platform_driver caam_driver = {
23991 .driver = {
23992 .name = "caam",
23993 --- a/drivers/crypto/caam/ctrl.h
23994 +++ b/drivers/crypto/caam/ctrl.h
23995 @@ -10,4 +10,6 @@
23996 /* Prototypes for backend-level services exposed to APIs */
23997 int caam_get_era(void);
23998
23999 +extern bool caam_dpaa2;
24000 +
24001 #endif /* CTRL_H */
24002 --- a/drivers/crypto/caam/desc.h
24003 +++ b/drivers/crypto/caam/desc.h
24004 @@ -22,12 +22,6 @@
24005 #define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
24006 #define SEC4_SG_OFFSET_MASK 0x00001fff
24007
24008 -struct sec4_sg_entry {
24009 - u64 ptr;
24010 - u32 len;
24011 - u32 bpid_offset;
24012 -};
24013 -
24014 /* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
24015 #define MAX_CAAM_DESCSIZE 64
24016
24017 @@ -47,6 +41,7 @@ struct sec4_sg_entry {
24018 #define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
24019 #define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
24020 #define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
24021 +#define CMD_MOVEB (0x07 << CMD_SHIFT)
24022 #define CMD_STORE (0x0a << CMD_SHIFT)
24023 #define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
24024 #define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
24025 @@ -90,8 +85,8 @@ struct sec4_sg_entry {
24026 #define HDR_ZRO 0x00008000
24027
24028 /* Start Index or SharedDesc Length */
24029 -#define HDR_START_IDX_MASK 0x3f
24030 #define HDR_START_IDX_SHIFT 16
24031 +#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
24032
24033 /* If shared descriptor header, 6-bit length */
24034 #define HDR_DESCLEN_SHR_MASK 0x3f
24035 @@ -121,10 +116,10 @@ struct sec4_sg_entry {
24036 #define HDR_PROP_DNR 0x00000800
24037
24038 /* JobDesc/SharedDesc share property */
24039 -#define HDR_SD_SHARE_MASK 0x03
24040 #define HDR_SD_SHARE_SHIFT 8
24041 -#define HDR_JD_SHARE_MASK 0x07
24042 +#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
24043 #define HDR_JD_SHARE_SHIFT 8
24044 +#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
24045
24046 #define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
24047 #define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
24048 @@ -235,7 +230,7 @@ struct sec4_sg_entry {
24049 #define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
24050 #define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
24051 #define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
24052 -#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
24053 +#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
24054 #define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
24055 #define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
24056 #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
24057 @@ -360,6 +355,7 @@ struct sec4_sg_entry {
24058 #define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
24059 #define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
24060 #define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
24061 +#define FIFOLD_TYPE_IFIFO (0x0f << FIFOLD_TYPE_SHIFT)
24062
24063 /* Other types. Need to OR in last/flush bits as desired */
24064 #define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
24065 @@ -400,7 +396,7 @@ struct sec4_sg_entry {
24066 #define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
24067 #define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
24068 #define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
24069 -#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
24070 +#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
24071 #define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
24072 #define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
24073 #define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
24074 @@ -413,6 +409,7 @@ struct sec4_sg_entry {
24075 #define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
24076 #define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
24077 #define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
24078 +#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
24079 #define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
24080
24081 /*
24082 @@ -449,6 +446,18 @@ struct sec4_sg_entry {
24083 #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
24084 #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
24085 #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
24086 +#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
24087 +#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
24088 +#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
24089 +#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
24090 +#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
24091 +#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
24092 +#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
24093 +#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
24094 +#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
24095 +#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
24096 +#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
24097 +#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
24098
24099 /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
24100 #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
24101 @@ -1098,6 +1107,22 @@ struct sec4_sg_entry {
24102 /* MacSec protinfos */
24103 #define OP_PCL_MACSEC 0x0001
24104
24105 +/* Derived Key Protocol (DKP) Protinfo */
24106 +#define OP_PCL_DKP_SRC_SHIFT 14
24107 +#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
24108 +#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
24109 +#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
24110 +#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
24111 +#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
24112 +#define OP_PCL_DKP_DST_SHIFT 12
24113 +#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
24114 +#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
24115 +#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
24116 +#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
24117 +#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
24118 +#define OP_PCL_DKP_KEY_SHIFT 0
24119 +#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
24120 +
24121 /* PKI unidirectional protocol protinfo bits */
24122 #define OP_PCL_PKPROT_TEST 0x0008
24123 #define OP_PCL_PKPROT_DECRYPT 0x0004
24124 @@ -1107,8 +1132,8 @@ struct sec4_sg_entry {
24125 /* For non-protocol/alg-only op commands */
24126 #define OP_ALG_TYPE_SHIFT 24
24127 #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
24128 -#define OP_ALG_TYPE_CLASS1 2
24129 -#define OP_ALG_TYPE_CLASS2 4
24130 +#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
24131 +#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
24132
24133 #define OP_ALG_ALGSEL_SHIFT 16
24134 #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
24135 @@ -1249,7 +1274,7 @@ struct sec4_sg_entry {
24136 #define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
24137
24138 /* PKHA mode copy-memory functions */
24139 -#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
24140 +#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
24141 #define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
24142 #define OP_ALG_PKMODE_DST_REG_SHIFT 10
24143 #define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
24144 @@ -1445,10 +1470,11 @@ struct sec4_sg_entry {
24145 #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
24146 #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
24147 #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
24148 -#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
24149 +#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
24150 #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
24151 #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
24152 #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
24153 +#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
24154
24155 /* Destination selectors */
24156 #define MATH_DEST_SHIFT 8
24157 @@ -1457,6 +1483,7 @@ struct sec4_sg_entry {
24158 #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
24159 #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
24160 #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
24161 +#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
24162 #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
24163 #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
24164 #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
24165 @@ -1629,4 +1656,31 @@ struct sec4_sg_entry {
24166 /* Frame Descriptor Command for Replacement Job Descriptor */
24167 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
24168
24169 +/* CHA Control Register bits */
24170 +#define CCTRL_RESET_CHA_ALL 0x1
24171 +#define CCTRL_RESET_CHA_AESA 0x2
24172 +#define CCTRL_RESET_CHA_DESA 0x4
24173 +#define CCTRL_RESET_CHA_AFHA 0x8
24174 +#define CCTRL_RESET_CHA_KFHA 0x10
24175 +#define CCTRL_RESET_CHA_SF8A 0x20
24176 +#define CCTRL_RESET_CHA_PKHA 0x40
24177 +#define CCTRL_RESET_CHA_MDHA 0x80
24178 +#define CCTRL_RESET_CHA_CRCA 0x100
24179 +#define CCTRL_RESET_CHA_RNG 0x200
24180 +#define CCTRL_RESET_CHA_SF9A 0x400
24181 +#define CCTRL_RESET_CHA_ZUCE 0x800
24182 +#define CCTRL_RESET_CHA_ZUCA 0x1000
24183 +#define CCTRL_UNLOAD_PK_A0 0x10000
24184 +#define CCTRL_UNLOAD_PK_A1 0x20000
24185 +#define CCTRL_UNLOAD_PK_A2 0x40000
24186 +#define CCTRL_UNLOAD_PK_A3 0x80000
24187 +#define CCTRL_UNLOAD_PK_B0 0x100000
24188 +#define CCTRL_UNLOAD_PK_B1 0x200000
24189 +#define CCTRL_UNLOAD_PK_B2 0x400000
24190 +#define CCTRL_UNLOAD_PK_B3 0x800000
24191 +#define CCTRL_UNLOAD_PK_N 0x1000000
24192 +#define CCTRL_UNLOAD_PK_A 0x4000000
24193 +#define CCTRL_UNLOAD_PK_B 0x8000000
24194 +#define CCTRL_UNLOAD_SBOX 0x10000000
24195 +
24196 #endif /* DESC_H */
24197 --- a/drivers/crypto/caam/desc_constr.h
24198 +++ b/drivers/crypto/caam/desc_constr.h
24199 @@ -4,6 +4,9 @@
24200 * Copyright 2008-2012 Freescale Semiconductor, Inc.
24201 */
24202
24203 +#ifndef DESC_CONSTR_H
24204 +#define DESC_CONSTR_H
24205 +
24206 #include "desc.h"
24207 #include "regs.h"
24208
24209 @@ -33,38 +36,39 @@
24210
24211 extern bool caam_little_end;
24212
24213 -static inline int desc_len(u32 *desc)
24214 +static inline int desc_len(u32 * const desc)
24215 {
24216 return caam32_to_cpu(*desc) & HDR_DESCLEN_MASK;
24217 }
24218
24219 -static inline int desc_bytes(void *desc)
24220 +static inline int desc_bytes(void * const desc)
24221 {
24222 return desc_len(desc) * CAAM_CMD_SZ;
24223 }
24224
24225 -static inline u32 *desc_end(u32 *desc)
24226 +static inline u32 *desc_end(u32 * const desc)
24227 {
24228 return desc + desc_len(desc);
24229 }
24230
24231 -static inline void *sh_desc_pdb(u32 *desc)
24232 +static inline void *sh_desc_pdb(u32 * const desc)
24233 {
24234 return desc + 1;
24235 }
24236
24237 -static inline void init_desc(u32 *desc, u32 options)
24238 +static inline void init_desc(u32 * const desc, u32 options)
24239 {
24240 *desc = cpu_to_caam32((options | HDR_ONE) + 1);
24241 }
24242
24243 -static inline void init_sh_desc(u32 *desc, u32 options)
24244 +static inline void init_sh_desc(u32 * const desc, u32 options)
24245 {
24246 PRINT_POS;
24247 init_desc(desc, CMD_SHARED_DESC_HDR | options);
24248 }
24249
24250 -static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
24251 +static inline void init_sh_desc_pdb(u32 * const desc, u32 options,
24252 + size_t pdb_bytes)
24253 {
24254 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
24255
24256 @@ -72,19 +76,20 @@ static inline void init_sh_desc_pdb(u32
24257 options);
24258 }
24259
24260 -static inline void init_job_desc(u32 *desc, u32 options)
24261 +static inline void init_job_desc(u32 * const desc, u32 options)
24262 {
24263 init_desc(desc, CMD_DESC_HDR | options);
24264 }
24265
24266 -static inline void init_job_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes)
24267 +static inline void init_job_desc_pdb(u32 * const desc, u32 options,
24268 + size_t pdb_bytes)
24269 {
24270 u32 pdb_len = (pdb_bytes + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ;
24271
24272 init_job_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT)) | options);
24273 }
24274
24275 -static inline void append_ptr(u32 *desc, dma_addr_t ptr)
24276 +static inline void append_ptr(u32 * const desc, dma_addr_t ptr)
24277 {
24278 dma_addr_t *offset = (dma_addr_t *)desc_end(desc);
24279
24280 @@ -94,8 +99,8 @@ static inline void append_ptr(u32 *desc,
24281 CAAM_PTR_SZ / CAAM_CMD_SZ);
24282 }
24283
24284 -static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len,
24285 - u32 options)
24286 +static inline void init_job_desc_shared(u32 * const desc, dma_addr_t ptr,
24287 + int len, u32 options)
24288 {
24289 PRINT_POS;
24290 init_job_desc(desc, HDR_SHARED | options |
24291 @@ -103,7 +108,7 @@ static inline void init_job_desc_shared(
24292 append_ptr(desc, ptr);
24293 }
24294
24295 -static inline void append_data(u32 *desc, void *data, int len)
24296 +static inline void append_data(u32 * const desc, const void *data, int len)
24297 {
24298 u32 *offset = desc_end(desc);
24299
24300 @@ -114,7 +119,7 @@ static inline void append_data(u32 *desc
24301 (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ);
24302 }
24303
24304 -static inline void append_cmd(u32 *desc, u32 command)
24305 +static inline void append_cmd(u32 * const desc, u32 command)
24306 {
24307 u32 *cmd = desc_end(desc);
24308
24309 @@ -125,7 +130,7 @@ static inline void append_cmd(u32 *desc,
24310
24311 #define append_u32 append_cmd
24312
24313 -static inline void append_u64(u32 *desc, u64 data)
24314 +static inline void append_u64(u32 * const desc, u64 data)
24315 {
24316 u32 *offset = desc_end(desc);
24317
24318 @@ -142,14 +147,14 @@ static inline void append_u64(u32 *desc,
24319 }
24320
24321 /* Write command without affecting header, and return pointer to next word */
24322 -static inline u32 *write_cmd(u32 *desc, u32 command)
24323 +static inline u32 *write_cmd(u32 * const desc, u32 command)
24324 {
24325 *desc = cpu_to_caam32(command);
24326
24327 return desc + 1;
24328 }
24329
24330 -static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
24331 +static inline void append_cmd_ptr(u32 * const desc, dma_addr_t ptr, int len,
24332 u32 command)
24333 {
24334 append_cmd(desc, command | len);
24335 @@ -157,7 +162,7 @@ static inline void append_cmd_ptr(u32 *d
24336 }
24337
24338 /* Write length after pointer, rather than inside command */
24339 -static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr,
24340 +static inline void append_cmd_ptr_extlen(u32 * const desc, dma_addr_t ptr,
24341 unsigned int len, u32 command)
24342 {
24343 append_cmd(desc, command);
24344 @@ -166,7 +171,7 @@ static inline void append_cmd_ptr_extlen
24345 append_cmd(desc, len);
24346 }
24347
24348 -static inline void append_cmd_data(u32 *desc, void *data, int len,
24349 +static inline void append_cmd_data(u32 * const desc, const void *data, int len,
24350 u32 command)
24351 {
24352 append_cmd(desc, command | IMMEDIATE | len);
24353 @@ -174,7 +179,7 @@ static inline void append_cmd_data(u32 *
24354 }
24355
24356 #define APPEND_CMD_RET(cmd, op) \
24357 -static inline u32 *append_##cmd(u32 *desc, u32 options) \
24358 +static inline u32 *append_##cmd(u32 * const desc, u32 options) \
24359 { \
24360 u32 *cmd = desc_end(desc); \
24361 PRINT_POS; \
24362 @@ -183,14 +188,15 @@ static inline u32 *append_##cmd(u32 *des
24363 }
24364 APPEND_CMD_RET(jump, JUMP)
24365 APPEND_CMD_RET(move, MOVE)
24366 +APPEND_CMD_RET(moveb, MOVEB)
24367
24368 -static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
24369 +static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
24370 {
24371 *jump_cmd = cpu_to_caam32(caam32_to_cpu(*jump_cmd) |
24372 (desc_len(desc) - (jump_cmd - desc)));
24373 }
24374
24375 -static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
24376 +static inline void set_move_tgt_here(u32 * const desc, u32 *move_cmd)
24377 {
24378 u32 val = caam32_to_cpu(*move_cmd);
24379
24380 @@ -200,7 +206,7 @@ static inline void set_move_tgt_here(u32
24381 }
24382
24383 #define APPEND_CMD(cmd, op) \
24384 -static inline void append_##cmd(u32 *desc, u32 options) \
24385 +static inline void append_##cmd(u32 * const desc, u32 options) \
24386 { \
24387 PRINT_POS; \
24388 append_cmd(desc, CMD_##op | options); \
24389 @@ -208,7 +214,8 @@ static inline void append_##cmd(u32 *des
24390 APPEND_CMD(operation, OPERATION)
24391
24392 #define APPEND_CMD_LEN(cmd, op) \
24393 -static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
24394 +static inline void append_##cmd(u32 * const desc, unsigned int len, \
24395 + u32 options) \
24396 { \
24397 PRINT_POS; \
24398 append_cmd(desc, CMD_##op | len | options); \
24399 @@ -220,8 +227,8 @@ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_L
24400 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
24401
24402 #define APPEND_CMD_PTR(cmd, op) \
24403 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
24404 - u32 options) \
24405 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
24406 + unsigned int len, u32 options) \
24407 { \
24408 PRINT_POS; \
24409 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
24410 @@ -231,8 +238,8 @@ APPEND_CMD_PTR(load, LOAD)
24411 APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
24412 APPEND_CMD_PTR(fifo_store, FIFO_STORE)
24413
24414 -static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
24415 - u32 options)
24416 +static inline void append_store(u32 * const desc, dma_addr_t ptr,
24417 + unsigned int len, u32 options)
24418 {
24419 u32 cmd_src;
24420
24421 @@ -249,7 +256,8 @@ static inline void append_store(u32 *des
24422 }
24423
24424 #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
24425 -static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
24426 +static inline void append_seq_##cmd##_ptr_intlen(u32 * const desc, \
24427 + dma_addr_t ptr, \
24428 unsigned int len, \
24429 u32 options) \
24430 { \
24431 @@ -263,7 +271,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
24432 APPEND_SEQ_PTR_INTLEN(out, OUT)
24433
24434 #define APPEND_CMD_PTR_TO_IMM(cmd, op) \
24435 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
24436 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
24437 unsigned int len, u32 options) \
24438 { \
24439 PRINT_POS; \
24440 @@ -273,7 +281,7 @@ APPEND_CMD_PTR_TO_IMM(load, LOAD);
24441 APPEND_CMD_PTR_TO_IMM(fifo_load, FIFO_LOAD);
24442
24443 #define APPEND_CMD_PTR_EXTLEN(cmd, op) \
24444 -static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
24445 +static inline void append_##cmd##_extlen(u32 * const desc, dma_addr_t ptr, \
24446 unsigned int len, u32 options) \
24447 { \
24448 PRINT_POS; \
24449 @@ -287,7 +295,7 @@ APPEND_CMD_PTR_EXTLEN(seq_out_ptr, SEQ_O
24450 * the size of its type
24451 */
24452 #define APPEND_CMD_PTR_LEN(cmd, op, type) \
24453 -static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
24454 +static inline void append_##cmd(u32 * const desc, dma_addr_t ptr, \
24455 type len, u32 options) \
24456 { \
24457 PRINT_POS; \
24458 @@ -304,7 +312,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
24459 * from length of immediate data provided, e.g., split keys
24460 */
24461 #define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
24462 -static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
24463 +static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
24464 unsigned int data_len, \
24465 unsigned int len, u32 options) \
24466 { \
24467 @@ -315,7 +323,7 @@ static inline void append_##cmd##_as_imm
24468 APPEND_CMD_PTR_TO_IMM2(key, KEY);
24469
24470 #define APPEND_CMD_RAW_IMM(cmd, op, type) \
24471 -static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
24472 +static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
24473 u32 options) \
24474 { \
24475 PRINT_POS; \
24476 @@ -426,3 +434,107 @@ do { \
24477 APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
24478 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
24479 APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
24480 +
24481 +/**
24482 + * struct alginfo - Container for algorithm details
24483 + * @algtype: algorithm selector; for valid values, see documentation of the
24484 + * functions where it is used.
24485 + * @keylen: length of the provided algorithm key, in bytes
24486 + * @keylen_pad: padded length of the provided algorithm key, in bytes
24487 + * @key: address where algorithm key resides; virtual address if key_inline
24488 + * is true, dma (bus) address if key_inline is false.
24489 + * @key_inline: true - key can be inlined in the descriptor; false - key is
24490 + * referenced by the descriptor
24491 + */
24492 +struct alginfo {
24493 + u32 algtype;
24494 + unsigned int keylen;
24495 + unsigned int keylen_pad;
24496 + union {
24497 + dma_addr_t key_dma;
24498 + const void *key_virt;
24499 + };
24500 + bool key_inline;
24501 +};
24502 +
24503 +/**
24504 + * desc_inline_query() - Provide indications on which data items can be inlined
24505 + * and which shall be referenced in a shared descriptor.
24506 + * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
24507 + * excluding the data items to be inlined (or corresponding
24508 + * pointer if an item is not inlined). Each cnstr_* function that
24509 + * generates descriptors should have a define mentioning
24510 + * corresponding length.
24511 + * @jd_len: Maximum length of the job descriptor(s) that will be used
24512 + * together with the shared descriptor.
24513 + * @data_len: Array of lengths of the data items trying to be inlined
24514 + * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
24515 + * otherwise.
24516 + * @count: Number of data items (size of @data_len array); must be <= 32
24517 + *
24518 + * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
24519 + * check @inl_mask for details.
24520 + */
24521 +static inline int desc_inline_query(unsigned int sd_base_len,
24522 + unsigned int jd_len, unsigned int *data_len,
24523 + u32 *inl_mask, unsigned int count)
24524 +{
24525 + int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
24526 + unsigned int i;
24527 +
24528 + *inl_mask = 0;
24529 + for (i = 0; (i < count) && (rem_bytes > 0); i++) {
24530 + if (rem_bytes - (int)(data_len[i] +
24531 + (count - i - 1) * CAAM_PTR_SZ) >= 0) {
24532 + rem_bytes -= data_len[i];
24533 + *inl_mask |= (1 << i);
24534 + } else {
24535 + rem_bytes -= CAAM_PTR_SZ;
24536 + }
24537 + }
24538 +
24539 + return (rem_bytes >= 0) ? 0 : -1;
24540 +}
24541 +
24542 +/**
24543 + * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
24544 + * @desc: pointer to buffer used for descriptor construction
24545 + * @adata: pointer to authentication transform definitions.
24546 + * keylen should be the length of initial key, while keylen_pad
24547 + * the length of the derived (split) key.
24548 + * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
24549 + * SHA256, SHA384, SHA512}.
24550 + */
24551 +static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
24552 +{
24553 + u32 protid;
24554 +
24555 + /*
24556 + * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
24557 + * to OP_PCLID_DKP_{MD5, SHA*}
24558 + */
24559 + protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
24560 + (0x20 << OP_ALG_ALGSEL_SHIFT);
24561 +
24562 + if (adata->key_inline) {
24563 + int words;
24564 +
24565 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
24566 + OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
24567 + adata->keylen);
24568 + append_data(desc, adata->key_virt, adata->keylen);
24569 +
24570 + /* Reserve space in descriptor buffer for the derived key */
24571 + words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
24572 + ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
24573 + if (words)
24574 + (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
24575 + } else {
24576 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
24577 + OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
24578 + adata->keylen);
24579 + append_ptr(desc, adata->key_dma);
24580 + }
24581 +}
24582 +
24583 +#endif /* DESC_CONSTR_H */
24584 --- /dev/null
24585 +++ b/drivers/crypto/caam/dpseci.c
24586 @@ -0,0 +1,859 @@
24587 +/*
24588 + * Copyright 2013-2016 Freescale Semiconductor Inc.
24589 + * Copyright 2017 NXP
24590 + *
24591 + * Redistribution and use in source and binary forms, with or without
24592 + * modification, are permitted provided that the following conditions are met:
24593 + * * Redistributions of source code must retain the above copyright
24594 + * notice, this list of conditions and the following disclaimer.
24595 + * * Redistributions in binary form must reproduce the above copyright
24596 + * notice, this list of conditions and the following disclaimer in the
24597 + * documentation and/or other materials provided with the distribution.
24598 + * * Neither the names of the above-listed copyright holders nor the
24599 + * names of any contributors may be used to endorse or promote products
24600 + * derived from this software without specific prior written permission.
24601 + *
24602 + *
24603 + * ALTERNATIVELY, this software may be distributed under the terms of the
24604 + * GNU General Public License ("GPL") as published by the Free Software
24605 + * Foundation, either version 2 of that License or (at your option) any
24606 + * later version.
24607 + *
24608 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24609 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24610 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24611 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
24612 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24613 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24614 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24615 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24616 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24617 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
24618 + * POSSIBILITY OF SUCH DAMAGE.
24619 + */
24620 +
24621 +#include "../../../drivers/staging/fsl-mc/include/mc-sys.h"
24622 +#include "../../../drivers/staging/fsl-mc/include/mc-cmd.h"
24623 +#include "../../../drivers/staging/fsl-mc/include/dpopr.h"
24624 +#include "dpseci.h"
24625 +#include "dpseci_cmd.h"
24626 +
24627 +/**
24628 + * dpseci_open() - Open a control session for the specified object
24629 + * @mc_io: Pointer to MC portal's I/O object
24630 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24631 + * @dpseci_id: DPSECI unique ID
24632 + * @token: Returned token; use in subsequent API calls
24633 + *
24634 + * This function can be used to open a control session for an already created
24635 + * object; an object may have been declared in the DPL or by calling the
24636 + * dpseci_create() function.
24637 + * This function returns a unique authentication token, associated with the
24638 + * specific object ID and the specific MC portal; this token must be used in all
24639 + * subsequent commands for this specific object.
24640 + *
24641 + * Return: '0' on success, error code otherwise
24642 + */
24643 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
24644 + u16 *token)
24645 +{
24646 + struct mc_command cmd = { 0 };
24647 + struct dpseci_cmd_open *cmd_params;
24648 + int err;
24649 +
24650 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
24651 + cmd_flags,
24652 + 0);
24653 + cmd_params = (struct dpseci_cmd_open *)cmd.params;
24654 + cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
24655 + err = mc_send_command(mc_io, &cmd);
24656 + if (err)
24657 + return err;
24658 +
24659 + *token = mc_cmd_hdr_read_token(&cmd);
24660 +
24661 + return 0;
24662 +}
24663 +
24664 +/**
24665 + * dpseci_close() - Close the control session of the object
24666 + * @mc_io: Pointer to MC portal's I/O object
24667 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24668 + * @token: Token of DPSECI object
24669 + *
24670 + * After this function is called, no further operations are allowed on the
24671 + * object without opening a new control session.
24672 + *
24673 + * Return: '0' on success, error code otherwise
24674 + */
24675 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24676 +{
24677 + struct mc_command cmd = { 0 };
24678 +
24679 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
24680 + cmd_flags,
24681 + token);
24682 + return mc_send_command(mc_io, &cmd);
24683 +}
24684 +
24685 +/**
24686 + * dpseci_create() - Create the DPSECI object
24687 + * @mc_io: Pointer to MC portal's I/O object
24688 + * @dprc_token: Parent container token; '0' for default container
24689 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24690 + * @cfg: Configuration structure
24691 + * @obj_id: returned object id
24692 + *
24693 + * Create the DPSECI object, allocate required resources and perform required
24694 + * initialization.
24695 + *
24696 + * The object can be created either by declaring it in the DPL file, or by
24697 + * calling this function.
24698 + *
24699 + * The function accepts an authentication token of a parent container that this
24700 + * object should be assigned to. The token can be '0' so the object will be
24701 + * assigned to the default container.
24702 + * The newly created object can be opened with the returned object id and using
24703 + * the container's associated tokens and MC portals.
24704 + *
24705 + * Return: '0' on success, error code otherwise
24706 + */
24707 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
24708 + const struct dpseci_cfg *cfg, u32 *obj_id)
24709 +{
24710 + struct mc_command cmd = { 0 };
24711 + struct dpseci_cmd_create *cmd_params;
24712 + int i, err;
24713 +
24714 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
24715 + cmd_flags,
24716 + dprc_token);
24717 + cmd_params = (struct dpseci_cmd_create *)cmd.params;
24718 + for (i = 0; i < 8; i++)
24719 + cmd_params->priorities[i] = cfg->priorities[i];
24720 + cmd_params->num_tx_queues = cfg->num_tx_queues;
24721 + cmd_params->num_rx_queues = cfg->num_rx_queues;
24722 + cmd_params->options = cpu_to_le32(cfg->options);
24723 + err = mc_send_command(mc_io, &cmd);
24724 + if (err)
24725 + return err;
24726 +
24727 + *obj_id = mc_cmd_read_object_id(&cmd);
24728 +
24729 + return 0;
24730 +}
24731 +
24732 +/**
24733 + * dpseci_destroy() - Destroy the DPSECI object and release all its resources
24734 + * @mc_io: Pointer to MC portal's I/O object
24735 + * @dprc_token: Parent container token; '0' for default container
24736 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24737 + * @object_id: The object id; it must be a valid id within the container that
24738 + * created this object
24739 + *
24740 + * The function accepts the authentication token of the parent container that
24741 + * created the object (not the one that currently owns the object). The object
24742 + * is searched within parent using the provided 'object_id'.
24743 + * All tokens to the object must be closed before calling destroy.
24744 + *
24745 + * Return: '0' on success, error code otherwise
24746 + */
24747 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
24748 + u32 object_id)
24749 +{
24750 + struct mc_command cmd = { 0 };
24751 + struct dpseci_cmd_destroy *cmd_params;
24752 +
24753 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
24754 + cmd_flags,
24755 + dprc_token);
24756 + cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
24757 + cmd_params->object_id = cpu_to_le32(object_id);
24758 +
24759 + return mc_send_command(mc_io, &cmd);
24760 +}
24761 +
24762 +/**
24763 + * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
24764 + * @mc_io: Pointer to MC portal's I/O object
24765 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24766 + * @token: Token of DPSECI object
24767 + *
24768 + * Return: '0' on success, error code otherwise
24769 + */
24770 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24771 +{
24772 + struct mc_command cmd = { 0 };
24773 +
24774 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
24775 + cmd_flags,
24776 + token);
24777 + return mc_send_command(mc_io, &cmd);
24778 +}
24779 +
24780 +/**
24781 + * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
24782 + * @mc_io: Pointer to MC portal's I/O object
24783 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24784 + * @token: Token of DPSECI object
24785 + *
24786 + * Return: '0' on success, error code otherwise
24787 + */
24788 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24789 +{
24790 + struct mc_command cmd = { 0 };
24791 +
24792 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
24793 + cmd_flags,
24794 + token);
24795 +
24796 + return mc_send_command(mc_io, &cmd);
24797 +}
24798 +
24799 +/**
24800 + * dpseci_is_enabled() - Check if the DPSECI is enabled.
24801 + * @mc_io: Pointer to MC portal's I/O object
24802 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24803 + * @token: Token of DPSECI object
24804 + * @en: Returns '1' if object is enabled; '0' otherwise
24805 + *
24806 + * Return: '0' on success, error code otherwise
24807 + */
24808 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24809 + int *en)
24810 +{
24811 + struct mc_command cmd = { 0 };
24812 + struct dpseci_rsp_is_enabled *rsp_params;
24813 + int err;
24814 +
24815 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
24816 + cmd_flags,
24817 + token);
24818 + err = mc_send_command(mc_io, &cmd);
24819 + if (err)
24820 + return err;
24821 +
24822 + rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
24823 + *en = le32_to_cpu(rsp_params->is_enabled);
24824 +
24825 + return 0;
24826 +}
24827 +
24828 +/**
24829 + * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
24830 + * @mc_io: Pointer to MC portal's I/O object
24831 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24832 + * @token: Token of DPSECI object
24833 + *
24834 + * Return: '0' on success, error code otherwise
24835 + */
24836 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
24837 +{
24838 + struct mc_command cmd = { 0 };
24839 +
24840 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
24841 + cmd_flags,
24842 + token);
24843 +
24844 + return mc_send_command(mc_io, &cmd);
24845 +}
24846 +
24847 +/**
24848 + * dpseci_get_irq_enable() - Get overall interrupt state
24849 + * @mc_io: Pointer to MC portal's I/O object
24850 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24851 + * @token: Token of DPSECI object
24852 + * @irq_index: The interrupt index to configure
24853 + * @en: Returned Interrupt state - enable = 1, disable = 0
24854 + *
24855 + * Return: '0' on success, error code otherwise
24856 + */
24857 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24858 + u8 irq_index, u8 *en)
24859 +{
24860 + struct mc_command cmd = { 0 };
24861 + struct dpseci_cmd_irq_enable *cmd_params;
24862 + struct dpseci_rsp_get_irq_enable *rsp_params;
24863 + int err;
24864 +
24865 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
24866 + cmd_flags,
24867 + token);
24868 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
24869 + cmd_params->irq_index = irq_index;
24870 + err = mc_send_command(mc_io, &cmd);
24871 + if (err)
24872 + return err;
24873 +
24874 + rsp_params = (struct dpseci_rsp_get_irq_enable *)cmd.params;
24875 + *en = rsp_params->enable_state;
24876 +
24877 + return 0;
24878 +}
24879 +
24880 +/**
24881 + * dpseci_set_irq_enable() - Set overall interrupt state.
24882 + * @mc_io: Pointer to MC portal's I/O object
24883 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24884 + * @token: Token of DPSECI object
24885 + * @irq_index: The interrupt index to configure
24886 + * @en: Interrupt state - enable = 1, disable = 0
24887 + *
24888 + * Allows GPP software to control when interrupts are generated.
24889 + * Each interrupt can have up to 32 causes. The enable/disable control's the
24890 + * overall interrupt state. If the interrupt is disabled no causes will cause
24891 + * an interrupt.
24892 + *
24893 + * Return: '0' on success, error code otherwise
24894 + */
24895 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24896 + u8 irq_index, u8 en)
24897 +{
24898 + struct mc_command cmd = { 0 };
24899 + struct dpseci_cmd_irq_enable *cmd_params;
24900 +
24901 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
24902 + cmd_flags,
24903 + token);
24904 + cmd_params = (struct dpseci_cmd_irq_enable *)cmd.params;
24905 + cmd_params->irq_index = irq_index;
24906 + cmd_params->enable_state = en;
24907 +
24908 + return mc_send_command(mc_io, &cmd);
24909 +}
24910 +
24911 +/**
24912 + * dpseci_get_irq_mask() - Get interrupt mask.
24913 + * @mc_io: Pointer to MC portal's I/O object
24914 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24915 + * @token: Token of DPSECI object
24916 + * @irq_index: The interrupt index to configure
24917 + * @mask: Returned event mask to trigger interrupt
24918 + *
24919 + * Every interrupt can have up to 32 causes and the interrupt model supports
24920 + * masking/unmasking each cause independently.
24921 + *
24922 + * Return: '0' on success, error code otherwise
24923 + */
24924 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24925 + u8 irq_index, u32 *mask)
24926 +{
24927 + struct mc_command cmd = { 0 };
24928 + struct dpseci_cmd_irq_mask *cmd_params;
24929 + int err;
24930 +
24931 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
24932 + cmd_flags,
24933 + token);
24934 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
24935 + cmd_params->irq_index = irq_index;
24936 + err = mc_send_command(mc_io, &cmd);
24937 + if (err)
24938 + return err;
24939 +
24940 + *mask = le32_to_cpu(cmd_params->mask);
24941 +
24942 + return 0;
24943 +}
24944 +
24945 +/**
24946 + * dpseci_set_irq_mask() - Set interrupt mask.
24947 + * @mc_io: Pointer to MC portal's I/O object
24948 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24949 + * @token: Token of DPSECI object
24950 + * @irq_index: The interrupt index to configure
24951 + * @mask: event mask to trigger interrupt;
24952 + * each bit:
24953 + * 0 = ignore event
24954 + * 1 = consider event for asserting IRQ
24955 + *
24956 + * Every interrupt can have up to 32 causes and the interrupt model supports
24957 + * masking/unmasking each cause independently
24958 + *
24959 + * Return: '0' on success, error code otherwise
24960 + */
24961 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24962 + u8 irq_index, u32 mask)
24963 +{
24964 + struct mc_command cmd = { 0 };
24965 + struct dpseci_cmd_irq_mask *cmd_params;
24966 +
24967 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
24968 + cmd_flags,
24969 + token);
24970 + cmd_params = (struct dpseci_cmd_irq_mask *)cmd.params;
24971 + cmd_params->mask = cpu_to_le32(mask);
24972 + cmd_params->irq_index = irq_index;
24973 +
24974 + return mc_send_command(mc_io, &cmd);
24975 +}
24976 +
24977 +/**
24978 + * dpseci_get_irq_status() - Get the current status of any pending interrupts
24979 + * @mc_io: Pointer to MC portal's I/O object
24980 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
24981 + * @token: Token of DPSECI object
24982 + * @irq_index: The interrupt index to configure
24983 + * @status: Returned interrupts status - one bit per cause:
24984 + * 0 = no interrupt pending
24985 + * 1 = interrupt pending
24986 + *
24987 + * Return: '0' on success, error code otherwise
24988 + */
24989 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
24990 + u8 irq_index, u32 *status)
24991 +{
24992 + struct mc_command cmd = { 0 };
24993 + struct dpseci_cmd_irq_status *cmd_params;
24994 + int err;
24995 +
24996 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
24997 + cmd_flags,
24998 + token);
24999 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
25000 + cmd_params->status = cpu_to_le32(*status);
25001 + cmd_params->irq_index = irq_index;
25002 + err = mc_send_command(mc_io, &cmd);
25003 + if (err)
25004 + return err;
25005 +
25006 + *status = le32_to_cpu(cmd_params->status);
25007 +
25008 + return 0;
25009 +}
25010 +
25011 +/**
25012 + * dpseci_clear_irq_status() - Clear a pending interrupt's status
25013 + * @mc_io: Pointer to MC portal's I/O object
25014 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25015 + * @token: Token of DPSECI object
25016 + * @irq_index: The interrupt index to configure
25017 + * @status: bits to clear (W1C) - one bit per cause:
25018 + * 0 = don't change
25019 + * 1 = clear status bit
25020 + *
25021 + * Return: '0' on success, error code otherwise
25022 + */
25023 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25024 + u8 irq_index, u32 status)
25025 +{
25026 + struct mc_command cmd = { 0 };
25027 + struct dpseci_cmd_irq_status *cmd_params;
25028 +
25029 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
25030 + cmd_flags,
25031 + token);
25032 + cmd_params = (struct dpseci_cmd_irq_status *)cmd.params;
25033 + cmd_params->status = cpu_to_le32(status);
25034 + cmd_params->irq_index = irq_index;
25035 +
25036 + return mc_send_command(mc_io, &cmd);
25037 +}
25038 +
25039 +/**
25040 + * dpseci_get_attributes() - Retrieve DPSECI attributes
25041 + * @mc_io: Pointer to MC portal's I/O object
25042 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25043 + * @token: Token of DPSECI object
25044 + * @attr: Returned object's attributes
25045 + *
25046 + * Return: '0' on success, error code otherwise
25047 + */
25048 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25049 + struct dpseci_attr *attr)
25050 +{
25051 + struct mc_command cmd = { 0 };
25052 + struct dpseci_rsp_get_attributes *rsp_params;
25053 + int err;
25054 +
25055 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
25056 + cmd_flags,
25057 + token);
25058 + err = mc_send_command(mc_io, &cmd);
25059 + if (err)
25060 + return err;
25061 +
25062 + rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
25063 + attr->id = le32_to_cpu(rsp_params->id);
25064 + attr->num_tx_queues = rsp_params->num_tx_queues;
25065 + attr->num_rx_queues = rsp_params->num_rx_queues;
25066 + attr->options = le32_to_cpu(rsp_params->options);
25067 +
25068 + return 0;
25069 +}
25070 +
25071 +/**
25072 + * dpseci_set_rx_queue() - Set Rx queue configuration
25073 + * @mc_io: Pointer to MC portal's I/O object
25074 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25075 + * @token: Token of DPSECI object
25076 + * @queue: Select the queue relative to number of priorities configured at
25077 + * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
25078 + * Rx queues identically.
25079 + * @cfg: Rx queue configuration
25080 + *
25081 + * Return: '0' on success, error code otherwise
25082 + */
25083 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25084 + u8 queue, const struct dpseci_rx_queue_cfg *cfg)
25085 +{
25086 + struct mc_command cmd = { 0 };
25087 + struct dpseci_cmd_queue *cmd_params;
25088 +
25089 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
25090 + cmd_flags,
25091 + token);
25092 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25093 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
25094 + cmd_params->priority = cfg->dest_cfg.priority;
25095 + cmd_params->queue = queue;
25096 + cmd_params->dest_type = cfg->dest_cfg.dest_type;
25097 + cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
25098 + cmd_params->options = cpu_to_le32(cfg->options);
25099 + cmd_params->order_preservation_en =
25100 + cpu_to_le32(cfg->order_preservation_en);
25101 +
25102 + return mc_send_command(mc_io, &cmd);
25103 +}
25104 +
25105 +/**
25106 + * dpseci_get_rx_queue() - Retrieve Rx queue attributes
25107 + * @mc_io: Pointer to MC portal's I/O object
25108 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25109 + * @token: Token of DPSECI object
25110 + * @queue: Select the queue relative to number of priorities configured at
25111 + * DPSECI creation
25112 + * @attr: Returned Rx queue attributes
25113 + *
25114 + * Return: '0' on success, error code otherwise
25115 + */
25116 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25117 + u8 queue, struct dpseci_rx_queue_attr *attr)
25118 +{
25119 + struct mc_command cmd = { 0 };
25120 + struct dpseci_cmd_queue *cmd_params;
25121 + int err;
25122 +
25123 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
25124 + cmd_flags,
25125 + token);
25126 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25127 + cmd_params->queue = queue;
25128 + err = mc_send_command(mc_io, &cmd);
25129 + if (err)
25130 + return err;
25131 +
25132 + attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
25133 + attr->dest_cfg.priority = cmd_params->priority;
25134 + attr->dest_cfg.dest_type = cmd_params->dest_type;
25135 + attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
25136 + attr->fqid = le32_to_cpu(cmd_params->fqid);
25137 + attr->order_preservation_en =
25138 + le32_to_cpu(cmd_params->order_preservation_en);
25139 +
25140 + return 0;
25141 +}
25142 +
25143 +/**
25144 + * dpseci_get_tx_queue() - Retrieve Tx queue attributes
25145 + * @mc_io: Pointer to MC portal's I/O object
25146 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25147 + * @token: Token of DPSECI object
25148 + * @queue: Select the queue relative to number of priorities configured at
25149 + * DPSECI creation
25150 + * @attr: Returned Tx queue attributes
25151 + *
25152 + * Return: '0' on success, error code otherwise
25153 + */
25154 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25155 + u8 queue, struct dpseci_tx_queue_attr *attr)
25156 +{
25157 + struct mc_command cmd = { 0 };
25158 + struct dpseci_cmd_queue *cmd_params;
25159 + struct dpseci_rsp_get_tx_queue *rsp_params;
25160 + int err;
25161 +
25162 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
25163 + cmd_flags,
25164 + token);
25165 + cmd_params = (struct dpseci_cmd_queue *)cmd.params;
25166 + cmd_params->queue = queue;
25167 + err = mc_send_command(mc_io, &cmd);
25168 + if (err)
25169 + return err;
25170 +
25171 + rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
25172 + attr->fqid = le32_to_cpu(rsp_params->fqid);
25173 + attr->priority = rsp_params->priority;
25174 +
25175 + return 0;
25176 +}
25177 +
25178 +/**
25179 + * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
25180 + * @mc_io: Pointer to MC portal's I/O object
25181 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25182 + * @token: Token of DPSECI object
25183 + * @attr: Returned SEC attributes
25184 + *
25185 + * Return: '0' on success, error code otherwise
25186 + */
25187 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25188 + struct dpseci_sec_attr *attr)
25189 +{
25190 + struct mc_command cmd = { 0 };
25191 + struct dpseci_rsp_get_sec_attr *rsp_params;
25192 + int err;
25193 +
25194 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
25195 + cmd_flags,
25196 + token);
25197 + err = mc_send_command(mc_io, &cmd);
25198 + if (err)
25199 + return err;
25200 +
25201 + rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
25202 + attr->ip_id = le16_to_cpu(rsp_params->ip_id);
25203 + attr->major_rev = rsp_params->major_rev;
25204 + attr->minor_rev = rsp_params->minor_rev;
25205 + attr->era = rsp_params->era;
25206 + attr->deco_num = rsp_params->deco_num;
25207 + attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
25208 + attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
25209 + attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
25210 + attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
25211 + attr->crc_acc_num = rsp_params->crc_acc_num;
25212 + attr->pk_acc_num = rsp_params->pk_acc_num;
25213 + attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
25214 + attr->rng_acc_num = rsp_params->rng_acc_num;
25215 + attr->md_acc_num = rsp_params->md_acc_num;
25216 + attr->arc4_acc_num = rsp_params->arc4_acc_num;
25217 + attr->des_acc_num = rsp_params->des_acc_num;
25218 + attr->aes_acc_num = rsp_params->aes_acc_num;
25219 +
25220 + return 0;
25221 +}
25222 +
25223 +/**
25224 + * dpseci_get_sec_counters() - Retrieve SEC accelerator counters
25225 + * @mc_io: Pointer to MC portal's I/O object
25226 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25227 + * @token: Token of DPSECI object
25228 + * @counters: Returned SEC counters
25229 + *
25230 + * Return: '0' on success, error code otherwise
25231 + */
25232 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25233 + struct dpseci_sec_counters *counters)
25234 +{
25235 + struct mc_command cmd = { 0 };
25236 + struct dpseci_rsp_get_sec_counters *rsp_params;
25237 + int err;
25238 +
25239 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
25240 + cmd_flags,
25241 + token);
25242 + err = mc_send_command(mc_io, &cmd);
25243 + if (err)
25244 + return err;
25245 +
25246 + rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
25247 + counters->dequeued_requests =
25248 + le64_to_cpu(rsp_params->dequeued_requests);
25249 + counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
25250 + counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
25251 + counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
25252 + counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
25253 + counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
25254 + counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
25255 +
25256 + return 0;
25257 +}
25258 +
25259 +/**
25260 + * dpseci_get_api_version() - Get Data Path SEC Interface API version
25261 + * @mc_io: Pointer to MC portal's I/O object
25262 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25263 + * @major_ver: Major version of data path sec API
25264 + * @minor_ver: Minor version of data path sec API
25265 + *
25266 + * Return: '0' on success, error code otherwise
25267 + */
25268 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
25269 + u16 *major_ver, u16 *minor_ver)
25270 +{
25271 + struct mc_command cmd = { 0 };
25272 + struct dpseci_rsp_get_api_version *rsp_params;
25273 + int err;
25274 +
25275 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
25276 + cmd_flags, 0);
25277 + err = mc_send_command(mc_io, &cmd);
25278 + if (err)
25279 + return err;
25280 +
25281 + rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
25282 + *major_ver = le16_to_cpu(rsp_params->major);
25283 + *minor_ver = le16_to_cpu(rsp_params->minor);
25284 +
25285 + return 0;
25286 +}
25287 +
25288 +/**
25289 + * dpseci_set_opr() - Set Order Restoration configuration
25290 + * @mc_io: Pointer to MC portal's I/O object
25291 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25292 + * @token: Token of DPSECI object
25293 + * @index: The queue index
25294 + * @options: Configuration mode options; can be OPR_OPT_CREATE or
25295 + * OPR_OPT_RETIRE
25296 + * @cfg: Configuration options for the OPR
25297 + *
25298 + * Return: '0' on success, error code otherwise
25299 + */
25300 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25301 + u8 options, struct opr_cfg *cfg)
25302 +{
25303 + struct mc_command cmd = { 0 };
25304 + struct dpseci_cmd_opr *cmd_params;
25305 +
25306 + cmd.header = mc_encode_cmd_header(
25307 + DPSECI_CMDID_SET_OPR,
25308 + cmd_flags,
25309 + token);
25310 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
25311 + cmd_params->index = index;
25312 + cmd_params->options = options;
25313 + cmd_params->oloe = cfg->oloe;
25314 + cmd_params->oeane = cfg->oeane;
25315 + cmd_params->olws = cfg->olws;
25316 + cmd_params->oa = cfg->oa;
25317 + cmd_params->oprrws = cfg->oprrws;
25318 +
25319 + return mc_send_command(mc_io, &cmd);
25320 +}
25321 +
25322 +/**
25323 + * dpseci_get_opr() - Retrieve Order Restoration config and query
25324 + * @mc_io: Pointer to MC portal's I/O object
25325 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25326 + * @token: Token of DPSECI object
25327 + * @index: The queue index
25328 + * @cfg: Returned OPR configuration
25329 + * @qry: Returned OPR query
25330 + *
25331 + * Return: '0' on success, error code otherwise
25332 + */
25333 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25334 + struct opr_cfg *cfg, struct opr_qry *qry)
25335 +{
25336 + struct mc_command cmd = { 0 };
25337 + struct dpseci_cmd_opr *cmd_params;
25338 + struct dpseci_rsp_get_opr *rsp_params;
25339 + int err;
25340 +
25341 + cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
25342 + cmd_flags,
25343 + token);
25344 + cmd_params = (struct dpseci_cmd_opr *)cmd.params;
25345 + cmd_params->index = index;
25346 + err = mc_send_command(mc_io, &cmd);
25347 + if (err)
25348 + return err;
25349 +
25350 + rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
25351 + qry->rip = dpseci_get_field(rsp_params->rip_enable, OPR_RIP);
25352 + qry->enable = dpseci_get_field(rsp_params->rip_enable, OPR_ENABLE);
25353 + cfg->oloe = rsp_params->oloe;
25354 + cfg->oeane = rsp_params->oeane;
25355 + cfg->olws = rsp_params->olws;
25356 + cfg->oa = rsp_params->oa;
25357 + cfg->oprrws = rsp_params->oprrws;
25358 + qry->nesn = le16_to_cpu(rsp_params->nesn);
25359 + qry->ndsn = le16_to_cpu(rsp_params->ndsn);
25360 + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
25361 + qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_TSEQ_NLIS);
25362 + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
25363 + qry->hseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, OPR_HSEQ_NLIS);
25364 + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
25365 + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
25366 + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
25367 + qry->opr_id = le16_to_cpu(rsp_params->opr_id);
25368 +
25369 + return 0;
25370 +}
25371 +
25372 +/**
25373 + * dpseci_set_congestion_notification() - Set congestion group
25374 + * notification configuration
25375 + * @mc_io: Pointer to MC portal's I/O object
25376 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25377 + * @token: Token of DPSECI object
25378 + * @cfg: congestion notification configuration
25379 + *
25380 + * Return: '0' on success, error code otherwise
25381 + */
25382 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25383 + u16 token, const struct dpseci_congestion_notification_cfg *cfg)
25384 +{
25385 + struct mc_command cmd = { 0 };
25386 + struct dpseci_cmd_congestion_notification *cmd_params;
25387 +
25388 + cmd.header = mc_encode_cmd_header(
25389 + DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
25390 + cmd_flags,
25391 + token);
25392 + cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
25393 + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
25394 + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
25395 + cmd_params->priority = cfg->dest_cfg.priority;
25396 + dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
25397 + cfg->dest_cfg.dest_type);
25398 + dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
25399 + cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
25400 + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
25401 + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
25402 + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
25403 +
25404 + return mc_send_command(mc_io, &cmd);
25405 +}
25406 +
25407 +/**
25408 + * dpseci_get_congestion_notification() - Get congestion group notification
25409 + * configuration
25410 + * @mc_io: Pointer to MC portal's I/O object
25411 + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
25412 + * @token: Token of DPSECI object
25413 + * @cfg: congestion notification configuration
25414 + *
25415 + * Return: '0' on success, error code otherwise
25416 + */
25417 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25418 + u16 token, struct dpseci_congestion_notification_cfg *cfg)
25419 +{
25420 + struct mc_command cmd = { 0 };
25421 + struct dpseci_cmd_congestion_notification *rsp_params;
25422 + int err;
25423 +
25424 + cmd.header = mc_encode_cmd_header(
25425 + DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
25426 + cmd_flags,
25427 + token);
25428 + err = mc_send_command(mc_io, &cmd);
25429 + if (err)
25430 + return err;
25431 +
25432 + rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
25433 + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
25434 + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
25435 + cfg->dest_cfg.priority = rsp_params->priority;
25436 + cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
25437 + CGN_DEST_TYPE);
25438 + cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
25439 + cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
25440 + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
25441 + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
25442 + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
25443 +
25444 + return 0;
25445 +}
25446 --- /dev/null
25447 +++ b/drivers/crypto/caam/dpseci.h
25448 @@ -0,0 +1,395 @@
25449 +/*
25450 + * Copyright 2013-2016 Freescale Semiconductor Inc.
25451 + * Copyright 2017 NXP
25452 + *
25453 + * Redistribution and use in source and binary forms, with or without
25454 + * modification, are permitted provided that the following conditions are met:
25455 + * * Redistributions of source code must retain the above copyright
25456 + * notice, this list of conditions and the following disclaimer.
25457 + * * Redistributions in binary form must reproduce the above copyright
25458 + * notice, this list of conditions and the following disclaimer in the
25459 + * documentation and/or other materials provided with the distribution.
25460 + * * Neither the names of the above-listed copyright holders nor the
25461 + * names of any contributors may be used to endorse or promote products
25462 + * derived from this software without specific prior written permission.
25463 + *
25464 + *
25465 + * ALTERNATIVELY, this software may be distributed under the terms of the
25466 + * GNU General Public License ("GPL") as published by the Free Software
25467 + * Foundation, either version 2 of that License or (at your option) any
25468 + * later version.
25469 + *
25470 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25471 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25472 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25473 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25474 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25475 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25476 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25477 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25478 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25479 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25480 + * POSSIBILITY OF SUCH DAMAGE.
25481 + */
25482 +#ifndef _DPSECI_H_
25483 +#define _DPSECI_H_
25484 +
25485 +/*
25486 + * Data Path SEC Interface API
25487 + * Contains initialization APIs and runtime control APIs for DPSECI
25488 + */
25489 +
25490 +struct fsl_mc_io;
25491 +struct opr_cfg;
25492 +struct opr_qry;
25493 +
25494 +/**
25495 + * General DPSECI macros
25496 + */
25497 +
25498 +/**
25499 + * Maximum number of Tx/Rx priorities per DPSECI object
25500 + */
25501 +#define DPSECI_PRIO_NUM 8
25502 +
25503 +/**
25504 + * All queues considered; see dpseci_set_rx_queue()
25505 + */
25506 +#define DPSECI_ALL_QUEUES (u8)(-1)
25507 +
25508 +int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
25509 + u16 *token);
25510 +
25511 +int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25512 +
25513 +/**
25514 + * Enable the Congestion Group support
25515 + */
25516 +#define DPSECI_OPT_HAS_CG 0x000020
25517 +
25518 +/**
25519 + * Enable the Order Restoration support
25520 + */
25521 +#define DPSECI_OPT_HAS_OPR 0x000040
25522 +
25523 +/**
25524 + * Order Point Records are shared for the entire DPSECI
25525 + */
25526 +#define DPSECI_OPT_OPR_SHARED 0x000080
25527 +
25528 +/**
25529 + * struct dpseci_cfg - Structure representing DPSECI configuration
25530 + * @options: Any combination of the following options:
25531 + * DPSECI_OPT_HAS_CG
25532 + * DPSECI_OPT_HAS_OPR
25533 + * DPSECI_OPT_OPR_SHARED
25534 + * @num_tx_queues: num of queues towards the SEC
25535 + * @num_rx_queues: num of queues back from the SEC
25536 + * @priorities: Priorities for the SEC hardware processing;
25537 + * each place in the array is the priority of the tx queue
25538 + * towards the SEC;
25539 + * valid priorities are configured with values 1-8;
25540 + */
25541 +struct dpseci_cfg {
25542 + u32 options;
25543 + u8 num_tx_queues;
25544 + u8 num_rx_queues;
25545 + u8 priorities[DPSECI_PRIO_NUM];
25546 +};
25547 +
25548 +int dpseci_create(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25549 + const struct dpseci_cfg *cfg, u32 *obj_id);
25550 +
25551 +int dpseci_destroy(struct fsl_mc_io *mc_io, u16 dprc_token, u32 cmd_flags,
25552 + u32 object_id);
25553 +
25554 +int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25555 +
25556 +int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25557 +
25558 +int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25559 + int *en);
25560 +
25561 +int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
25562 +
25563 +int dpseci_get_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25564 + u8 irq_index, u8 *en);
25565 +
25566 +int dpseci_set_irq_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25567 + u8 irq_index, u8 en);
25568 +
25569 +int dpseci_get_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25570 + u8 irq_index, u32 *mask);
25571 +
25572 +int dpseci_set_irq_mask(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25573 + u8 irq_index, u32 mask);
25574 +
25575 +int dpseci_get_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25576 + u8 irq_index, u32 *status);
25577 +
25578 +int dpseci_clear_irq_status(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25579 + u8 irq_index, u32 status);
25580 +
25581 +/**
25582 + * struct dpseci_attr - Structure representing DPSECI attributes
25583 + * @id: DPSECI object ID
25584 + * @num_tx_queues: number of queues towards the SEC
25585 + * @num_rx_queues: number of queues back from the SEC
25586 + * @options: any combination of the following options:
25587 + * DPSECI_OPT_HAS_CG
25588 + * DPSECI_OPT_HAS_OPR
25589 + * DPSECI_OPT_OPR_SHARED
25590 + */
25591 +struct dpseci_attr {
25592 + int id;
25593 + u8 num_tx_queues;
25594 + u8 num_rx_queues;
25595 + u32 options;
25596 +};
25597 +
25598 +int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25599 + struct dpseci_attr *attr);
25600 +
25601 +/**
25602 + * enum dpseci_dest - DPSECI destination types
25603 + * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
25604 + * and does not generate FQDAN notifications; user is expected to dequeue
25605 + * from the queue based on polling or other user-defined method
25606 + * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
25607 + * notifications to the specified DPIO; user is expected to dequeue from
25608 + * the queue only after notification is received
25609 + * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
25610 + * FQDAN notifications, but is connected to the specified DPCON object;
25611 + * user is expected to dequeue from the DPCON channel
25612 + */
25613 +enum dpseci_dest {
25614 + DPSECI_DEST_NONE = 0,
25615 + DPSECI_DEST_DPIO,
25616 + DPSECI_DEST_DPCON
25617 +};
25618 +
25619 +/**
25620 + * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
25621 + * @dest_type: Destination type
25622 + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
25623 + * @priority: Priority selection within the DPIO or DPCON channel; valid values
25624 + * are 0-1 or 0-7, depending on the number of priorities in that channel;
25625 + * not relevant for 'DPSECI_DEST_NONE' option
25626 + */
25627 +struct dpseci_dest_cfg {
25628 + enum dpseci_dest dest_type;
25629 + int dest_id;
25630 + u8 priority;
25631 +};
25632 +
25633 +/**
25634 + * DPSECI queue modification options
25635 + */
25636 +
25637 +/**
25638 + * Select to modify the user's context associated with the queue
25639 + */
25640 +#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
25641 +
25642 +/**
25643 + * Select to modify the queue's destination
25644 + */
25645 +#define DPSECI_QUEUE_OPT_DEST 0x00000002
25646 +
25647 +/**
25648 + * Select to modify the queue's order preservation
25649 + */
25650 +#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
25651 +
25652 +/**
25653 + * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
25654 + * @options: Flags representing the suggested modifications to the queue;
25655 + * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
25656 + * @order_preservation_en: order preservation configuration for the rx queue
25657 + * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
25658 + * @user_ctx: User context value provided in the frame descriptor of each
25659 + * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
25660 + * in 'options'
25661 + * @dest_cfg: Queue destination parameters; valid only if
25662 + * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
25663 + */
25664 +struct dpseci_rx_queue_cfg {
25665 + u32 options;
25666 + int order_preservation_en;
25667 + u64 user_ctx;
25668 + struct dpseci_dest_cfg dest_cfg;
25669 +};
25670 +
25671 +int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25672 + u8 queue, const struct dpseci_rx_queue_cfg *cfg);
25673 +
25674 +/**
25675 + * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
25676 + * @user_ctx: User context value provided in the frame descriptor of each
25677 + * dequeued frame
25678 + * @order_preservation_en: Status of the order preservation configuration on the
25679 + * queue
25680 + * @dest_cfg: Queue destination configuration
25681 + * @fqid: Virtual FQID value to be used for dequeue operations
25682 + */
25683 +struct dpseci_rx_queue_attr {
25684 + u64 user_ctx;
25685 + int order_preservation_en;
25686 + struct dpseci_dest_cfg dest_cfg;
25687 + u32 fqid;
25688 +};
25689 +
25690 +int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25691 + u8 queue, struct dpseci_rx_queue_attr *attr);
25692 +
25693 +/**
25694 + * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
25695 + * @fqid: Virtual FQID to be used for sending frames to SEC hardware
25696 + * @priority: SEC hardware processing priority for the queue
25697 + */
25698 +struct dpseci_tx_queue_attr {
25699 + u32 fqid;
25700 + u8 priority;
25701 +};
25702 +
25703 +int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25704 + u8 queue, struct dpseci_tx_queue_attr *attr);
25705 +
25706 +/**
25707 + * struct dpseci_sec_attr - Structure representing attributes of the SEC
25708 + * hardware accelerator
25709 + * @ip_id: ID for SEC
25710 + * @major_rev: Major revision number for SEC
25711 + * @minor_rev: Minor revision number for SEC
25712 + * @era: SEC Era
25713 + * @deco_num: The number of copies of the DECO that are implemented in this
25714 + * version of SEC
25715 + * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
25716 + * version of SEC
25717 + * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
25718 + * version of SEC
25719 + * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
25720 + * implemented in this version of SEC
25721 + * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
25722 + * implemented in this version of SEC
25723 + * @crc_acc_num: The number of copies of the CRC module that are implemented in
25724 + * this version of SEC
25725 + * @pk_acc_num: The number of copies of the Public Key module that are
25726 + * implemented in this version of SEC
25727 + * @kasumi_acc_num: The number of copies of the Kasumi module that are
25728 + * implemented in this version of SEC
25729 + * @rng_acc_num: The number of copies of the Random Number Generator that are
25730 + * implemented in this version of SEC
25731 + * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
25732 + * implemented in this version of SEC
25733 + * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
25734 + * in this version of SEC
25735 + * @des_acc_num: The number of copies of the DES module that are implemented in
25736 + * this version of SEC
25737 + * @aes_acc_num: The number of copies of the AES module that are implemented in
25738 + * this version of SEC
25739 + **/
25740 +struct dpseci_sec_attr {
25741 + u16 ip_id;
25742 + u8 major_rev;
25743 + u8 minor_rev;
25744 + u8 era;
25745 + u8 deco_num;
25746 + u8 zuc_auth_acc_num;
25747 + u8 zuc_enc_acc_num;
25748 + u8 snow_f8_acc_num;
25749 + u8 snow_f9_acc_num;
25750 + u8 crc_acc_num;
25751 + u8 pk_acc_num;
25752 + u8 kasumi_acc_num;
25753 + u8 rng_acc_num;
25754 + u8 md_acc_num;
25755 + u8 arc4_acc_num;
25756 + u8 des_acc_num;
25757 + u8 aes_acc_num;
25758 +};
25759 +
25760 +int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25761 + struct dpseci_sec_attr *attr);
25762 +
25763 +/**
25764 + * struct dpseci_sec_counters - Structure representing global SEC counters and
25765 + * not per dpseci counters
25766 + * @dequeued_requests: Number of Requests Dequeued
25767 + * @ob_enc_requests: Number of Outbound Encrypt Requests
25768 + * @ib_dec_requests: Number of Inbound Decrypt Requests
25769 + * @ob_enc_bytes: Number of Outbound Bytes Encrypted
25770 + * @ob_prot_bytes: Number of Outbound Bytes Protected
25771 + * @ib_dec_bytes: Number of Inbound Bytes Decrypted
25772 + * @ib_valid_bytes: Number of Inbound Bytes Validated
25773 + */
25774 +struct dpseci_sec_counters {
25775 + u64 dequeued_requests;
25776 + u64 ob_enc_requests;
25777 + u64 ib_dec_requests;
25778 + u64 ob_enc_bytes;
25779 + u64 ob_prot_bytes;
25780 + u64 ib_dec_bytes;
25781 + u64 ib_valid_bytes;
25782 +};
25783 +
25784 +int dpseci_get_sec_counters(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
25785 + struct dpseci_sec_counters *counters);
25786 +
25787 +int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
25788 + u16 *major_ver, u16 *minor_ver);
25789 +
25790 +int dpseci_set_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25791 + u8 options, struct opr_cfg *cfg);
25792 +
25793 +int dpseci_get_opr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 index,
25794 + struct opr_cfg *cfg, struct opr_qry *qry);
25795 +
25796 +/**
25797 + * enum dpseci_congestion_unit - DPSECI congestion units
25798 + * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
25799 + * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
25800 + */
25801 +enum dpseci_congestion_unit {
25802 + DPSECI_CONGESTION_UNIT_BYTES = 0,
25803 + DPSECI_CONGESTION_UNIT_FRAMES
25804 +};
25805 +
25806 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
25807 +#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
25808 +#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
25809 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
25810 +#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
25811 +#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
25812 +
25813 +/**
25814 + * struct dpseci_congestion_notification_cfg - congestion notification
25815 + * configuration
25816 + * @units: units type
25817 + * @threshold_entry: above this threshold we enter a congestion state.
25818 + * set it to '0' to disable it
25819 + * @threshold_exit: below this threshold we exit the congestion state.
25820 + * @message_ctx: The context that will be part of the CSCN message
25821 + * @message_iova: I/O virtual address (must be in DMA-able memory),
25822 + * must be 16B aligned;
25823 + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
25824 + * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
25825 + * values
25826 + */
25827 +struct dpseci_congestion_notification_cfg {
25828 + enum dpseci_congestion_unit units;
25829 + u32 threshold_entry;
25830 + u32 threshold_exit;
25831 + u64 message_ctx;
25832 + u64 message_iova;
25833 + struct dpseci_dest_cfg dest_cfg;
25834 + u16 notification_mode;
25835 +};
25836 +
25837 +int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25838 + u16 token, const struct dpseci_congestion_notification_cfg *cfg);
25839 +
25840 +int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
25841 + u16 token, struct dpseci_congestion_notification_cfg *cfg);
25842 +
25843 +#endif /* _DPSECI_H_ */
25844 --- /dev/null
25845 +++ b/drivers/crypto/caam/dpseci_cmd.h
25846 @@ -0,0 +1,261 @@
25847 +/*
25848 + * Copyright 2013-2016 Freescale Semiconductor Inc.
25849 + * Copyright 2017 NXP
25850 + *
25851 + * Redistribution and use in source and binary forms, with or without
25852 + * modification, are permitted provided that the following conditions are met:
25853 + * * Redistributions of source code must retain the above copyright
25854 + * notice, this list of conditions and the following disclaimer.
25855 + * * Redistributions in binary form must reproduce the above copyright
25856 + * notice, this list of conditions and the following disclaimer in the
25857 + * documentation and/or other materials provided with the distribution.
25858 + * * Neither the names of the above-listed copyright holders nor the
25859 + * names of any contributors may be used to endorse or promote products
25860 + * derived from this software without specific prior written permission.
25861 + *
25862 + *
25863 + * ALTERNATIVELY, this software may be distributed under the terms of the
25864 + * GNU General Public License ("GPL") as published by the Free Software
25865 + * Foundation, either version 2 of that License or (at your option) any
25866 + * later version.
25867 + *
25868 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25869 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25870 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25871 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
25872 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25873 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25874 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25875 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25876 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25877 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25878 + * POSSIBILITY OF SUCH DAMAGE.
25879 + */
25880 +
25881 +#ifndef _DPSECI_CMD_H_
25882 +#define _DPSECI_CMD_H_
25883 +
25884 +/* DPSECI Version */
25885 +#define DPSECI_VER_MAJOR 5
25886 +#define DPSECI_VER_MINOR 1
25887 +
25888 +#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
25889 +#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
25890 +
25891 +/* Command IDs */
25892 +
25893 +#define DPSECI_CMDID_CLOSE 0x8001
25894 +#define DPSECI_CMDID_OPEN 0x8091
25895 +#define DPSECI_CMDID_CREATE 0x9092
25896 +#define DPSECI_CMDID_DESTROY 0x9891
25897 +#define DPSECI_CMDID_GET_API_VERSION 0xa091
25898 +
25899 +#define DPSECI_CMDID_ENABLE 0x0021
25900 +#define DPSECI_CMDID_DISABLE 0x0031
25901 +#define DPSECI_CMDID_GET_ATTR 0x0041
25902 +#define DPSECI_CMDID_RESET 0x0051
25903 +#define DPSECI_CMDID_IS_ENABLED 0x0061
25904 +
25905 +#define DPSECI_CMDID_SET_IRQ_ENABLE 0x0121
25906 +#define DPSECI_CMDID_GET_IRQ_ENABLE 0x0131
25907 +#define DPSECI_CMDID_SET_IRQ_MASK 0x0141
25908 +#define DPSECI_CMDID_GET_IRQ_MASK 0x0151
25909 +#define DPSECI_CMDID_GET_IRQ_STATUS 0x0161
25910 +#define DPSECI_CMDID_CLEAR_IRQ_STATUS 0x0171
25911 +
25912 +#define DPSECI_CMDID_SET_RX_QUEUE 0x1941
25913 +#define DPSECI_CMDID_GET_RX_QUEUE 0x1961
25914 +#define DPSECI_CMDID_GET_TX_QUEUE 0x1971
25915 +#define DPSECI_CMDID_GET_SEC_ATTR 0x1981
25916 +#define DPSECI_CMDID_GET_SEC_COUNTERS 0x1991
25917 +#define DPSECI_CMDID_SET_OPR 0x19A1
25918 +#define DPSECI_CMDID_GET_OPR 0x19B1
25919 +
25920 +#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION 0x1701
25921 +#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION 0x1711
25922 +
25923 +/* Macros for accessing command fields smaller than 1 byte */
25924 +#define DPSECI_MASK(field) \
25925 + GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
25926 + DPSECI_##field##_SHIFT)
25927 +
25928 +#define dpseci_set_field(var, field, val) \
25929 + ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
25930 +
25931 +#define dpseci_get_field(var, field) \
25932 + (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
25933 +
25934 +struct dpseci_cmd_open {
25935 + __le32 dpseci_id;
25936 +};
25937 +
25938 +struct dpseci_cmd_create {
25939 + u8 priorities[8];
25940 + u8 num_tx_queues;
25941 + u8 num_rx_queues;
25942 + __le16 pad;
25943 + __le32 options;
25944 +};
25945 +
25946 +struct dpseci_cmd_destroy {
25947 + __le32 object_id;
25948 +};
25949 +
25950 +struct dpseci_rsp_is_enabled {
25951 + __le32 is_enabled;
25952 +};
25953 +
25954 +struct dpseci_cmd_irq_enable {
25955 + u8 enable_state;
25956 + u8 pad[3];
25957 + u8 irq_index;
25958 +};
25959 +
25960 +struct dpseci_rsp_get_irq_enable {
25961 + u8 enable_state;
25962 +};
25963 +
25964 +struct dpseci_cmd_irq_mask {
25965 + __le32 mask;
25966 + u8 irq_index;
25967 +};
25968 +
25969 +struct dpseci_cmd_irq_status {
25970 + __le32 status;
25971 + u8 irq_index;
25972 +};
25973 +
25974 +struct dpseci_rsp_get_attributes {
25975 + __le32 id;
25976 + __le32 pad0;
25977 + u8 num_tx_queues;
25978 + u8 num_rx_queues;
25979 + u8 pad1[6];
25980 + __le32 options;
25981 +};
25982 +
25983 +struct dpseci_cmd_queue {
25984 + __le32 dest_id;
25985 + u8 priority;
25986 + u8 queue;
25987 + u8 dest_type;
25988 + u8 pad;
25989 + __le64 user_ctx;
25990 + union {
25991 + __le32 options;
25992 + __le32 fqid;
25993 + };
25994 + __le32 order_preservation_en;
25995 +};
25996 +
25997 +struct dpseci_rsp_get_tx_queue {
25998 + __le32 pad;
25999 + __le32 fqid;
26000 + u8 priority;
26001 +};
26002 +
26003 +struct dpseci_rsp_get_sec_attr {
26004 + __le16 ip_id;
26005 + u8 major_rev;
26006 + u8 minor_rev;
26007 + u8 era;
26008 + u8 pad0[3];
26009 + u8 deco_num;
26010 + u8 zuc_auth_acc_num;
26011 + u8 zuc_enc_acc_num;
26012 + u8 pad1;
26013 + u8 snow_f8_acc_num;
26014 + u8 snow_f9_acc_num;
26015 + u8 crc_acc_num;
26016 + u8 pad2;
26017 + u8 pk_acc_num;
26018 + u8 kasumi_acc_num;
26019 + u8 rng_acc_num;
26020 + u8 pad3;
26021 + u8 md_acc_num;
26022 + u8 arc4_acc_num;
26023 + u8 des_acc_num;
26024 + u8 aes_acc_num;
26025 +};
26026 +
26027 +struct dpseci_rsp_get_sec_counters {
26028 + __le64 dequeued_requests;
26029 + __le64 ob_enc_requests;
26030 + __le64 ib_dec_requests;
26031 + __le64 ob_enc_bytes;
26032 + __le64 ob_prot_bytes;
26033 + __le64 ib_dec_bytes;
26034 + __le64 ib_valid_bytes;
26035 +};
26036 +
26037 +struct dpseci_rsp_get_api_version {
26038 + __le16 major;
26039 + __le16 minor;
26040 +};
26041 +
26042 +struct dpseci_cmd_opr {
26043 + __le16 pad;
26044 + u8 index;
26045 + u8 options;
26046 + u8 pad1[7];
26047 + u8 oloe;
26048 + u8 oeane;
26049 + u8 olws;
26050 + u8 oa;
26051 + u8 oprrws;
26052 +};
26053 +
26054 +#define DPSECI_OPR_RIP_SHIFT 0
26055 +#define DPSECI_OPR_RIP_SIZE 1
26056 +#define DPSECI_OPR_ENABLE_SHIFT 1
26057 +#define DPSECI_OPR_ENABLE_SIZE 1
26058 +#define DPSECI_OPR_TSEQ_NLIS_SHIFT 1
26059 +#define DPSECI_OPR_TSEQ_NLIS_SIZE 1
26060 +#define DPSECI_OPR_HSEQ_NLIS_SHIFT 1
26061 +#define DPSECI_OPR_HSEQ_NLIS_SIZE 1
26062 +
26063 +struct dpseci_rsp_get_opr {
26064 + __le64 pad;
26065 + u8 rip_enable;
26066 + u8 pad0[2];
26067 + u8 oloe;
26068 + u8 oeane;
26069 + u8 olws;
26070 + u8 oa;
26071 + u8 oprrws;
26072 + __le16 nesn;
26073 + __le16 pad1;
26074 + __le16 ndsn;
26075 + __le16 pad2;
26076 + __le16 ea_tseq;
26077 + u8 tseq_nlis;
26078 + u8 pad3;
26079 + __le16 ea_hseq;
26080 + u8 hseq_nlis;
26081 + u8 pad4;
26082 + __le16 ea_hptr;
26083 + __le16 pad5;
26084 + __le16 ea_tptr;
26085 + __le16 pad6;
26086 + __le16 opr_vid;
26087 + __le16 pad7;
26088 + __le16 opr_id;
26089 +};
26090 +
26091 +#define DPSECI_CGN_DEST_TYPE_SHIFT 0
26092 +#define DPSECI_CGN_DEST_TYPE_SIZE 4
26093 +#define DPSECI_CGN_UNITS_SHIFT 4
26094 +#define DPSECI_CGN_UNITS_SIZE 2
26095 +
26096 +struct dpseci_cmd_congestion_notification {
26097 + __le32 dest_id;
26098 + __le16 notification_mode;
26099 + u8 priority;
26100 + u8 options;
26101 + __le64 message_iova;
26102 + __le64 message_ctx;
26103 + __le32 threshold_entry;
26104 + __le32 threshold_exit;
26105 +};
26106 +
26107 +#endif /* _DPSECI_CMD_H_ */
26108 --- a/drivers/crypto/caam/error.c
26109 +++ b/drivers/crypto/caam/error.c
26110 @@ -6,11 +6,54 @@
26111
26112 #include "compat.h"
26113 #include "regs.h"
26114 -#include "intern.h"
26115 #include "desc.h"
26116 -#include "jr.h"
26117 #include "error.h"
26118
26119 +#ifdef DEBUG
26120 +
26121 +#include <linux/highmem.h>
26122 +
26123 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26124 + int rowsize, int groupsize, struct scatterlist *sg,
26125 + size_t tlen, bool ascii)
26126 +{
26127 + struct scatterlist *it;
26128 + void *it_page;
26129 + size_t len;
26130 + void *buf;
26131 +
26132 + for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
26133 + /*
26134 + * make sure the scatterlist's page
26135 + * has a valid virtual memory mapping
26136 + */
26137 + it_page = kmap_atomic(sg_page(it));
26138 + if (unlikely(!it_page)) {
26139 + pr_err("caam_dump_sg: kmap failed\n");
26140 + return;
26141 + }
26142 +
26143 + buf = it_page + it->offset;
26144 + len = min_t(size_t, tlen, it->length);
26145 + print_hex_dump(level, prefix_str, prefix_type, rowsize,
26146 + groupsize, buf, len, ascii);
26147 + tlen -= len;
26148 +
26149 + kunmap_atomic(it_page);
26150 + }
26151 +}
26152 +
26153 +#else
26154 +
26155 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26156 + int rowsize, int groupsize, struct scatterlist *sg,
26157 + size_t tlen, bool ascii)
26158 +{}
26159 +
26160 +#endif
26161 +
26162 +EXPORT_SYMBOL(caam_dump_sg);
26163 +
26164 static const struct {
26165 u8 value;
26166 const char *error_text;
26167 @@ -69,6 +112,54 @@ static const struct {
26168 { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
26169 };
26170
26171 +static const struct {
26172 + u8 value;
26173 + const char *error_text;
26174 +} qi_error_list[] = {
26175 + { 0x1F, "Job terminated by FQ or ICID flush" },
26176 + { 0x20, "FD format error"},
26177 + { 0x21, "FD command format error"},
26178 + { 0x23, "FL format error"},
26179 + { 0x25, "CRJD specified in FD, but not enabled in FLC"},
26180 + { 0x30, "Max. buffer size too small"},
26181 + { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
26182 + { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
26183 + { 0x33, "Size over/underflow (allocate mode)"},
26184 + { 0x34, "Size over/underflow (reuse mode)"},
26185 + { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
26186 + { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
26187 + { 0x41, "SBC frame format not supported (allocate mode)"},
26188 + { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
26189 + { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
26190 + { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
26191 + { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
26192 + { 0x46, "Annotation length exceeds offset (reuse mode)"},
26193 + { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
26194 + { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
26195 + { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
26196 + { 0x51, "Unsupported IF reuse mode"},
26197 + { 0x52, "Unsupported FL use mode"},
26198 + { 0x53, "Unsupported RJD use mode"},
26199 + { 0x54, "Unsupported inline descriptor use mode"},
26200 + { 0xC0, "Table buffer pool 0 depletion"},
26201 + { 0xC1, "Table buffer pool 1 depletion"},
26202 + { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
26203 + { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
26204 + { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
26205 + { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
26206 + { 0xD0, "FLC read error"},
26207 + { 0xD1, "FL read error"},
26208 + { 0xD2, "FL write error"},
26209 + { 0xD3, "OF SGT write error"},
26210 + { 0xD4, "PTA read error"},
26211 + { 0xD5, "PTA write error"},
26212 + { 0xD6, "OF SGT F-bit write error"},
26213 + { 0xD7, "ASA write error"},
26214 + { 0xE1, "FLC[ICR]=0 ICID error"},
26215 + { 0xE2, "FLC[ICR]=1 ICID error"},
26216 + { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
26217 +};
26218 +
26219 static const char * const cha_id_list[] = {
26220 "",
26221 "AES",
26222 @@ -146,10 +237,9 @@ static void report_ccb_status(struct dev
26223 strlen(rng_err_id_list[err_id])) {
26224 /* RNG-only error */
26225 err_str = rng_err_id_list[err_id];
26226 - } else if (err_id < ARRAY_SIZE(err_id_list))
26227 + } else {
26228 err_str = err_id_list[err_id];
26229 - else
26230 - snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
26231 + }
26232
26233 /*
26234 * CCB ICV check failures are part of normal operation life;
26235 @@ -198,6 +288,27 @@ static void report_deco_status(struct de
26236 status, error, idx_str, idx, err_str, err_err_code);
26237 }
26238
26239 +static void report_qi_status(struct device *qidev, const u32 status,
26240 + const char *error)
26241 +{
26242 + u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
26243 + const char *err_str = "unidentified error value 0x";
26244 + char err_err_code[3] = { 0 };
26245 + int i;
26246 +
26247 + for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
26248 + if (qi_error_list[i].value == err_id)
26249 + break;
26250 +
26251 + if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
26252 + err_str = qi_error_list[i].error_text;
26253 + else
26254 + snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
26255 +
26256 + dev_err(qidev, "%08x: %s: %s%s\n",
26257 + status, error, err_str, err_err_code);
26258 +}
26259 +
26260 static void report_jr_status(struct device *jrdev, const u32 status,
26261 const char *error)
26262 {
26263 @@ -212,7 +323,7 @@ static void report_cond_code_status(stru
26264 status, error, __func__);
26265 }
26266
26267 -void caam_jr_strstatus(struct device *jrdev, u32 status)
26268 +void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
26269 {
26270 static const struct stat_src {
26271 void (*report_ssed)(struct device *jrdev, const u32 status,
26272 @@ -224,7 +335,7 @@ void caam_jr_strstatus(struct device *jr
26273 { report_ccb_status, "CCB" },
26274 { report_jump_status, "Jump" },
26275 { report_deco_status, "DECO" },
26276 - { NULL, "Queue Manager Interface" },
26277 + { report_qi_status, "Queue Manager Interface" },
26278 { report_jr_status, "Job Ring" },
26279 { report_cond_code_status, "Condition Code" },
26280 { NULL, NULL },
26281 @@ -250,4 +361,4 @@ void caam_jr_strstatus(struct device *jr
26282 else
26283 dev_err(jrdev, "%d: unknown error source\n", ssrc);
26284 }
26285 -EXPORT_SYMBOL(caam_jr_strstatus);
26286 +EXPORT_SYMBOL(caam_strstatus);
26287 --- a/drivers/crypto/caam/error.h
26288 +++ b/drivers/crypto/caam/error.h
26289 @@ -7,5 +7,13 @@
26290 #ifndef CAAM_ERROR_H
26291 #define CAAM_ERROR_H
26292 #define CAAM_ERROR_STR_MAX 302
26293 -void caam_jr_strstatus(struct device *jrdev, u32 status);
26294 +
26295 +void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
26296 +
26297 +#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
26298 +#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
26299 +
26300 +void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
26301 + int rowsize, int groupsize, struct scatterlist *sg,
26302 + size_t tlen, bool ascii);
26303 #endif /* CAAM_ERROR_H */
26304 --- a/drivers/crypto/caam/intern.h
26305 +++ b/drivers/crypto/caam/intern.h
26306 @@ -64,10 +64,9 @@ struct caam_drv_private_jr {
26307 * Driver-private storage for a single CAAM block instance
26308 */
26309 struct caam_drv_private {
26310 -
26311 - struct device *dev;
26312 - struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
26313 - struct platform_device *pdev;
26314 +#ifdef CONFIG_CAAM_QI
26315 + struct device *qidev;
26316 +#endif
26317
26318 /* Physical-presence section */
26319 struct caam_ctrl __iomem *ctrl; /* controller region */
26320 @@ -84,6 +83,7 @@ struct caam_drv_private {
26321 u8 qi_present; /* Nonzero if QI present in device */
26322 int secvio_irq; /* Security violation interrupt number */
26323 int virt_en; /* Virtualization enabled in CAAM */
26324 + int era; /* CAAM Era (internal HW revision) */
26325
26326 #define RNG4_MAX_HANDLES 2
26327 /* RNG4 block */
26328 @@ -103,11 +103,6 @@ struct caam_drv_private {
26329 #ifdef CONFIG_DEBUG_FS
26330 struct dentry *dfs_root;
26331 struct dentry *ctl; /* controller dir */
26332 - struct dentry *ctl_rq_dequeued, *ctl_ob_enc_req, *ctl_ib_dec_req;
26333 - struct dentry *ctl_ob_enc_bytes, *ctl_ob_prot_bytes;
26334 - struct dentry *ctl_ib_dec_bytes, *ctl_ib_valid_bytes;
26335 - struct dentry *ctl_faultaddr, *ctl_faultdetail, *ctl_faultstatus;
26336 -
26337 struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
26338 struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
26339 #endif
26340 @@ -115,4 +110,22 @@ struct caam_drv_private {
26341
26342 void caam_jr_algapi_init(struct device *dev);
26343 void caam_jr_algapi_remove(struct device *dev);
26344 +
26345 +#ifdef CONFIG_DEBUG_FS
26346 +static int caam_debugfs_u64_get(void *data, u64 *val)
26347 +{
26348 + *val = caam64_to_cpu(*(u64 *)data);
26349 + return 0;
26350 +}
26351 +
26352 +static int caam_debugfs_u32_get(void *data, u64 *val)
26353 +{
26354 + *val = caam32_to_cpu(*(u32 *)data);
26355 + return 0;
26356 +}
26357 +
26358 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
26359 +DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
26360 +#endif
26361 +
26362 #endif /* INTERN_H */
26363 --- a/drivers/crypto/caam/jr.c
26364 +++ b/drivers/crypto/caam/jr.c
26365 @@ -9,6 +9,7 @@
26366 #include <linux/of_address.h>
26367
26368 #include "compat.h"
26369 +#include "ctrl.h"
26370 #include "regs.h"
26371 #include "jr.h"
26372 #include "desc.h"
26373 @@ -22,6 +23,14 @@ struct jr_driver_data {
26374
26375 static struct jr_driver_data driver_data;
26376
26377 +static int jr_driver_probed;
26378 +
26379 +int caam_jr_driver_probed(void)
26380 +{
26381 + return jr_driver_probed;
26382 +}
26383 +EXPORT_SYMBOL(caam_jr_driver_probed);
26384 +
26385 static int caam_reset_hw_jr(struct device *dev)
26386 {
26387 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
26388 @@ -118,6 +127,8 @@ static int caam_jr_remove(struct platfor
26389 dev_err(jrdev, "Failed to shut down job ring\n");
26390 irq_dispose_mapping(jrpriv->irq);
26391
26392 + jr_driver_probed--;
26393 +
26394 return ret;
26395 }
26396
26397 @@ -281,6 +292,36 @@ struct device *caam_jr_alloc(void)
26398 EXPORT_SYMBOL(caam_jr_alloc);
26399
26400 /**
26401 + * caam_jridx_alloc() - Alloc a specific job ring based on its index.
26402 + *
26403 + * returns : pointer to the newly allocated physical
26404 + * JobR dev can be written to if successful.
26405 + **/
26406 +struct device *caam_jridx_alloc(int idx)
26407 +{
26408 + struct caam_drv_private_jr *jrpriv;
26409 + struct device *dev = ERR_PTR(-ENODEV);
26410 +
26411 + spin_lock(&driver_data.jr_alloc_lock);
26412 +
26413 + if (list_empty(&driver_data.jr_list))
26414 + goto end;
26415 +
26416 + list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
26417 + if (jrpriv->ridx == idx) {
26418 + atomic_inc(&jrpriv->tfm_count);
26419 + dev = jrpriv->dev;
26420 + break;
26421 + }
26422 + }
26423 +
26424 +end:
26425 + spin_unlock(&driver_data.jr_alloc_lock);
26426 + return dev;
26427 +}
26428 +EXPORT_SYMBOL(caam_jridx_alloc);
26429 +
26430 +/**
26431 * caam_jr_free() - Free the Job Ring
26432 * @rdev - points to the dev that identifies the Job ring to
26433 * be released.
26434 @@ -497,15 +538,28 @@ static int caam_jr_probe(struct platform
26435 return -ENOMEM;
26436 }
26437
26438 - jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
26439 + jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
26440
26441 - if (sizeof(dma_addr_t) == sizeof(u64))
26442 - if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
26443 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
26444 + if (sizeof(dma_addr_t) == sizeof(u64)) {
26445 + if (caam_dpaa2)
26446 + error = dma_set_mask_and_coherent(jrdev,
26447 + DMA_BIT_MASK(49));
26448 + else if (of_device_is_compatible(nprop,
26449 + "fsl,sec-v5.0-job-ring"))
26450 + error = dma_set_mask_and_coherent(jrdev,
26451 + DMA_BIT_MASK(40));
26452 else
26453 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
26454 - else
26455 - dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
26456 + error = dma_set_mask_and_coherent(jrdev,
26457 + DMA_BIT_MASK(36));
26458 + } else {
26459 + error = dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
26460 + }
26461 + if (error) {
26462 + dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
26463 + error);
26464 + iounmap(ctrl);
26465 + return error;
26466 + }
26467
26468 /* Identify the interrupt */
26469 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
26470 @@ -525,10 +579,12 @@ static int caam_jr_probe(struct platform
26471
26472 atomic_set(&jrpriv->tfm_count, 0);
26473
26474 + jr_driver_probed++;
26475 +
26476 return 0;
26477 }
26478
26479 -static struct of_device_id caam_jr_match[] = {
26480 +static const struct of_device_id caam_jr_match[] = {
26481 {
26482 .compatible = "fsl,sec-v4.0-job-ring",
26483 },
26484 --- a/drivers/crypto/caam/jr.h
26485 +++ b/drivers/crypto/caam/jr.h
26486 @@ -8,7 +8,9 @@
26487 #define JR_H
26488
26489 /* Prototypes for backend-level services exposed to APIs */
26490 +int caam_jr_driver_probed(void);
26491 struct device *caam_jr_alloc(void);
26492 +struct device *caam_jridx_alloc(int idx);
26493 void caam_jr_free(struct device *rdev);
26494 int caam_jr_enqueue(struct device *dev, u32 *desc,
26495 void (*cbk)(struct device *dev, u32 *desc, u32 status,
26496 --- a/drivers/crypto/caam/key_gen.c
26497 +++ b/drivers/crypto/caam/key_gen.c
26498 @@ -41,15 +41,29 @@ Split key generation--------------------
26499 [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40
26500 @0xffe04000
26501 */
26502 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
26503 - int split_key_pad_len, const u8 *key_in, u32 keylen,
26504 - u32 alg_op)
26505 +int gen_split_key(struct device *jrdev, u8 *key_out,
26506 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
26507 + int max_keylen)
26508 {
26509 u32 *desc;
26510 struct split_key_result result;
26511 dma_addr_t dma_addr_in, dma_addr_out;
26512 int ret = -ENOMEM;
26513
26514 + adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK);
26515 + adata->keylen_pad = split_key_pad_len(adata->algtype &
26516 + OP_ALG_ALGSEL_MASK);
26517 +
26518 +#ifdef DEBUG
26519 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
26520 + adata->keylen, adata->keylen_pad);
26521 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
26522 + DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
26523 +#endif
26524 +
26525 + if (adata->keylen_pad > max_keylen)
26526 + return -EINVAL;
26527 +
26528 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
26529 if (!desc) {
26530 dev_err(jrdev, "unable to allocate key input memory\n");
26531 @@ -63,7 +77,7 @@ int gen_split_key(struct device *jrdev,
26532 goto out_free;
26533 }
26534
26535 - dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
26536 + dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
26537 DMA_FROM_DEVICE);
26538 if (dma_mapping_error(jrdev, dma_addr_out)) {
26539 dev_err(jrdev, "unable to map key output memory\n");
26540 @@ -74,7 +88,9 @@ int gen_split_key(struct device *jrdev,
26541 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
26542
26543 /* Sets MDHA up into an HMAC-INIT */
26544 - append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT);
26545 + append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
26546 + OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
26547 + OP_ALG_AS_INIT);
26548
26549 /*
26550 * do a FIFO_LOAD of zero, this will trigger the internal key expansion
26551 @@ -87,7 +103,7 @@ int gen_split_key(struct device *jrdev,
26552 * FIFO_STORE with the explicit split-key content store
26553 * (0x26 output type)
26554 */
26555 - append_fifo_store(desc, dma_addr_out, split_key_len,
26556 + append_fifo_store(desc, dma_addr_out, adata->keylen,
26557 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
26558
26559 #ifdef DEBUG
26560 @@ -108,11 +124,11 @@ int gen_split_key(struct device *jrdev,
26561 #ifdef DEBUG
26562 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
26563 DUMP_PREFIX_ADDRESS, 16, 4, key_out,
26564 - split_key_pad_len, 1);
26565 + adata->keylen_pad, 1);
26566 #endif
26567 }
26568
26569 - dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
26570 + dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
26571 DMA_FROM_DEVICE);
26572 out_unmap_in:
26573 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
26574 --- a/drivers/crypto/caam/key_gen.h
26575 +++ b/drivers/crypto/caam/key_gen.h
26576 @@ -5,6 +5,36 @@
26577 *
26578 */
26579
26580 +/**
26581 + * split_key_len - Compute MDHA split key length for a given algorithm
26582 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
26583 + * SHA224, SHA384, SHA512.
26584 + *
26585 + * Return: MDHA split key length
26586 + */
26587 +static inline u32 split_key_len(u32 hash)
26588 +{
26589 + /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
26590 + static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
26591 + u32 idx;
26592 +
26593 + idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
26594 +
26595 + return (u32)(mdpadlen[idx] * 2);
26596 +}
26597 +
26598 +/**
26599 + * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
26600 + * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
26601 + * SHA224, SHA384, SHA512.
26602 + *
26603 + * Return: MDHA split key pad length
26604 + */
26605 +static inline u32 split_key_pad_len(u32 hash)
26606 +{
26607 + return ALIGN(split_key_len(hash), 16);
26608 +}
26609 +
26610 struct split_key_result {
26611 struct completion completion;
26612 int err;
26613 @@ -12,6 +42,6 @@ struct split_key_result {
26614
26615 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
26616
26617 -int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
26618 - int split_key_pad_len, const u8 *key_in, u32 keylen,
26619 - u32 alg_op);
26620 +int gen_split_key(struct device *jrdev, u8 *key_out,
26621 + struct alginfo * const adata, const u8 *key_in, u32 keylen,
26622 + int max_keylen);
26623 --- a/drivers/crypto/caam/pdb.h
26624 +++ b/drivers/crypto/caam/pdb.h
26625 @@ -483,6 +483,8 @@ struct dsa_verify_pdb {
26626 #define RSA_PDB_E_MASK (0xFFF << RSA_PDB_E_SHIFT)
26627 #define RSA_PDB_D_SHIFT 12
26628 #define RSA_PDB_D_MASK (0xFFF << RSA_PDB_D_SHIFT)
26629 +#define RSA_PDB_Q_SHIFT 12
26630 +#define RSA_PDB_Q_MASK (0xFFF << RSA_PDB_Q_SHIFT)
26631
26632 #define RSA_PDB_SGF_F (0x8 << RSA_PDB_SGF_SHIFT)
26633 #define RSA_PDB_SGF_G (0x4 << RSA_PDB_SGF_SHIFT)
26634 @@ -490,6 +492,8 @@ struct dsa_verify_pdb {
26635 #define RSA_PRIV_PDB_SGF_G (0x8 << RSA_PDB_SGF_SHIFT)
26636
26637 #define RSA_PRIV_KEY_FRM_1 0
26638 +#define RSA_PRIV_KEY_FRM_2 1
26639 +#define RSA_PRIV_KEY_FRM_3 2
26640
26641 /**
26642 * RSA Encrypt Protocol Data Block
26643 @@ -525,4 +529,62 @@ struct rsa_priv_f1_pdb {
26644 dma_addr_t d_dma;
26645 } __packed;
26646
26647 +/**
26648 + * RSA Decrypt PDB - Private Key Form #2
26649 + * @sgf : scatter-gather field
26650 + * @g_dma : dma address of encrypted input data
26651 + * @f_dma : dma address of output data
26652 + * @d_dma : dma address of RSA private exponent
26653 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
26654 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
26655 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26656 + * as internal state buffer. It is assumed to be as long as p.
26657 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26658 + * as internal state buffer. It is assumed to be as long as q.
26659 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
26660 + */
26661 +struct rsa_priv_f2_pdb {
26662 + u32 sgf;
26663 + dma_addr_t g_dma;
26664 + dma_addr_t f_dma;
26665 + dma_addr_t d_dma;
26666 + dma_addr_t p_dma;
26667 + dma_addr_t q_dma;
26668 + dma_addr_t tmp1_dma;
26669 + dma_addr_t tmp2_dma;
26670 + u32 p_q_len;
26671 +} __packed;
26672 +
26673 +/**
26674 + * RSA Decrypt PDB - Private Key Form #3
26675 + * This is the RSA Chinese Reminder Theorem (CRT) form for two prime factors of
26676 + * the RSA modulus.
26677 + * @sgf : scatter-gather field
26678 + * @g_dma : dma address of encrypted input data
26679 + * @f_dma : dma address of output data
26680 + * @c_dma : dma address of RSA CRT coefficient
26681 + * @p_dma : dma address of RSA prime factor p of RSA modulus n
26682 + * @q_dma : dma address of RSA prime factor q of RSA modulus n
26683 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor p
26684 + * @dp_dma : dma address of RSA CRT exponent of RSA prime factor q
26685 + * @tmp1_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26686 + * as internal state buffer. It is assumed to be as long as p.
26687 + * @tmp2_dma: dma address of temporary buffer. CAAM uses this temporary buffer
26688 + * as internal state buffer. It is assumed to be as long as q.
26689 + * @p_q_len : length in bytes of first two prime factors of the RSA modulus n
26690 + */
26691 +struct rsa_priv_f3_pdb {
26692 + u32 sgf;
26693 + dma_addr_t g_dma;
26694 + dma_addr_t f_dma;
26695 + dma_addr_t c_dma;
26696 + dma_addr_t p_dma;
26697 + dma_addr_t q_dma;
26698 + dma_addr_t dp_dma;
26699 + dma_addr_t dq_dma;
26700 + dma_addr_t tmp1_dma;
26701 + dma_addr_t tmp2_dma;
26702 + u32 p_q_len;
26703 +} __packed;
26704 +
26705 #endif
26706 --- a/drivers/crypto/caam/pkc_desc.c
26707 +++ b/drivers/crypto/caam/pkc_desc.c
26708 @@ -34,3 +34,39 @@ void init_rsa_priv_f1_desc(u32 *desc, st
26709 append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26710 RSA_PRIV_KEY_FRM_1);
26711 }
26712 +
26713 +/* Descriptor for RSA Private operation - Private Key Form #2 */
26714 +void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb)
26715 +{
26716 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
26717 + append_cmd(desc, pdb->sgf);
26718 + append_ptr(desc, pdb->g_dma);
26719 + append_ptr(desc, pdb->f_dma);
26720 + append_ptr(desc, pdb->d_dma);
26721 + append_ptr(desc, pdb->p_dma);
26722 + append_ptr(desc, pdb->q_dma);
26723 + append_ptr(desc, pdb->tmp1_dma);
26724 + append_ptr(desc, pdb->tmp2_dma);
26725 + append_cmd(desc, pdb->p_q_len);
26726 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26727 + RSA_PRIV_KEY_FRM_2);
26728 +}
26729 +
26730 +/* Descriptor for RSA Private operation - Private Key Form #3 */
26731 +void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb)
26732 +{
26733 + init_job_desc_pdb(desc, 0, sizeof(*pdb));
26734 + append_cmd(desc, pdb->sgf);
26735 + append_ptr(desc, pdb->g_dma);
26736 + append_ptr(desc, pdb->f_dma);
26737 + append_ptr(desc, pdb->c_dma);
26738 + append_ptr(desc, pdb->p_dma);
26739 + append_ptr(desc, pdb->q_dma);
26740 + append_ptr(desc, pdb->dp_dma);
26741 + append_ptr(desc, pdb->dq_dma);
26742 + append_ptr(desc, pdb->tmp1_dma);
26743 + append_ptr(desc, pdb->tmp2_dma);
26744 + append_cmd(desc, pdb->p_q_len);
26745 + append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY |
26746 + RSA_PRIV_KEY_FRM_3);
26747 +}
26748 --- /dev/null
26749 +++ b/drivers/crypto/caam/qi.c
26750 @@ -0,0 +1,797 @@
26751 +/*
26752 + * CAAM/SEC 4.x QI transport/backend driver
26753 + * Queue Interface backend functionality
26754 + *
26755 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
26756 + * Copyright 2016-2017 NXP
26757 + */
26758 +
26759 +#include <linux/cpumask.h>
26760 +#include <linux/kthread.h>
26761 +#include <linux/fsl_qman.h>
26762 +
26763 +#include "regs.h"
26764 +#include "qi.h"
26765 +#include "desc.h"
26766 +#include "intern.h"
26767 +#include "desc_constr.h"
26768 +
26769 +#define PREHDR_RSLS_SHIFT 31
26770 +
26771 +/*
26772 + * Use a reasonable backlog of frames (per CPU) as congestion threshold,
26773 + * so that resources used by the in-flight buffers do not become a memory hog.
26774 + */
26775 +#define MAX_RSP_FQ_BACKLOG_PER_CPU 256
26776 +
26777 +#define CAAM_QI_ENQUEUE_RETRIES 10000
26778 +
26779 +#define CAAM_NAPI_WEIGHT 63
26780 +
26781 +/*
26782 + * caam_napi - struct holding CAAM NAPI-related params
26783 + * @irqtask: IRQ task for QI backend
26784 + * @p: QMan portal
26785 + */
26786 +struct caam_napi {
26787 + struct napi_struct irqtask;
26788 + struct qman_portal *p;
26789 +};
26790 +
26791 +/*
26792 + * caam_qi_pcpu_priv - percpu private data structure to main list of pending
26793 + * responses expected on each cpu.
26794 + * @caam_napi: CAAM NAPI params
26795 + * @net_dev: netdev used by NAPI
26796 + * @rsp_fq: response FQ from CAAM
26797 + */
26798 +struct caam_qi_pcpu_priv {
26799 + struct caam_napi caam_napi;
26800 + struct net_device net_dev;
26801 + struct qman_fq *rsp_fq;
26802 +} ____cacheline_aligned;
26803 +
26804 +static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
26805 +static DEFINE_PER_CPU(int, last_cpu);
26806 +
26807 +/*
26808 + * caam_qi_priv - CAAM QI backend private params
26809 + * @cgr: QMan congestion group
26810 + * @qi_pdev: platform device for QI backend
26811 + */
26812 +struct caam_qi_priv {
26813 + struct qman_cgr cgr;
26814 + struct platform_device *qi_pdev;
26815 +};
26816 +
26817 +static struct caam_qi_priv qipriv ____cacheline_aligned;
26818 +
26819 +/*
26820 + * This is written by only one core - the one that initialized the CGR - and
26821 + * read by multiple cores (all the others).
26822 + */
26823 +bool caam_congested __read_mostly;
26824 +EXPORT_SYMBOL(caam_congested);
26825 +
26826 +#ifdef CONFIG_DEBUG_FS
26827 +/*
26828 + * This is a counter for the number of times the congestion group (where all
26829 + * the request and response queueus are) reached congestion. Incremented
26830 + * each time the congestion callback is called with congested == true.
26831 + */
26832 +static u64 times_congested;
26833 +#endif
26834 +
26835 +/*
26836 + * CPU from where the module initialised. This is required because QMan driver
26837 + * requires CGRs to be removed from same CPU from where they were originally
26838 + * allocated.
26839 + */
26840 +static int mod_init_cpu;
26841 +
26842 +/*
26843 + * This is a a cache of buffers, from which the users of CAAM QI driver
26844 + * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
26845 + * doing malloc on the hotpath.
26846 + * NOTE: A more elegant solution would be to have some headroom in the frames
26847 + * being processed. This could be added by the dpaa-ethernet driver.
26848 + * This would pose a problem for userspace application processing which
26849 + * cannot know of this limitation. So for now, this will work.
26850 + * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
26851 + */
26852 +static struct kmem_cache *qi_cache;
26853 +
26854 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
26855 +{
26856 + struct qm_fd fd;
26857 + int ret;
26858 + int num_retries = 0;
26859 +
26860 + fd.cmd = 0;
26861 + fd.format = qm_fd_compound;
26862 + fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
26863 + fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
26864 + DMA_BIDIRECTIONAL);
26865 + if (dma_mapping_error(qidev, fd.addr)) {
26866 + dev_err(qidev, "DMA mapping error for QI enqueue request\n");
26867 + return -EIO;
26868 + }
26869 +
26870 + do {
26871 + ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
26872 + if (likely(!ret))
26873 + return 0;
26874 +
26875 + if (ret != -EBUSY)
26876 + break;
26877 + num_retries++;
26878 + } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
26879 +
26880 + dev_err(qidev, "qman_enqueue failed: %d\n", ret);
26881 +
26882 + return ret;
26883 +}
26884 +EXPORT_SYMBOL(caam_qi_enqueue);
26885 +
26886 +static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
26887 + const struct qm_mr_entry *msg)
26888 +{
26889 + const struct qm_fd *fd;
26890 + struct caam_drv_req *drv_req;
26891 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
26892 +
26893 + fd = &msg->ern.fd;
26894 +
26895 + if (fd->format != qm_fd_compound) {
26896 + dev_err(qidev, "Non-compound FD from CAAM\n");
26897 + return;
26898 + }
26899 +
26900 + drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
26901 + if (!drv_req) {
26902 + dev_err(qidev,
26903 + "Can't find original request for CAAM response\n");
26904 + return;
26905 + }
26906 +
26907 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
26908 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
26909 +
26910 + drv_req->cbk(drv_req, -EIO);
26911 +}
26912 +
26913 +static struct qman_fq *create_caam_req_fq(struct device *qidev,
26914 + struct qman_fq *rsp_fq,
26915 + dma_addr_t hwdesc,
26916 + int fq_sched_flag)
26917 +{
26918 + int ret;
26919 + struct qman_fq *req_fq;
26920 + struct qm_mcc_initfq opts;
26921 +
26922 + req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
26923 + if (!req_fq)
26924 + return ERR_PTR(-ENOMEM);
26925 +
26926 + req_fq->cb.ern = caam_fq_ern_cb;
26927 + req_fq->cb.fqs = NULL;
26928 +
26929 + ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
26930 + QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
26931 + req_fq);
26932 + if (ret) {
26933 + dev_err(qidev, "Failed to create session req FQ\n");
26934 + goto create_req_fq_fail;
26935 + }
26936 +
26937 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
26938 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
26939 + QM_INITFQ_WE_CGID;
26940 + opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
26941 + opts.fqd.dest.channel = qm_channel_caam;
26942 + opts.fqd.dest.wq = 2;
26943 + opts.fqd.context_b = qman_fq_fqid(rsp_fq);
26944 + opts.fqd.context_a.hi = upper_32_bits(hwdesc);
26945 + opts.fqd.context_a.lo = lower_32_bits(hwdesc);
26946 + opts.fqd.cgid = qipriv.cgr.cgrid;
26947 +
26948 + ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
26949 + if (ret) {
26950 + dev_err(qidev, "Failed to init session req FQ\n");
26951 + goto init_req_fq_fail;
26952 + }
26953 +
26954 + dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
26955 + smp_processor_id());
26956 + return req_fq;
26957 +
26958 +init_req_fq_fail:
26959 + qman_destroy_fq(req_fq, 0);
26960 +create_req_fq_fail:
26961 + kfree(req_fq);
26962 + return ERR_PTR(ret);
26963 +}
26964 +
26965 +static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
26966 +{
26967 + int ret;
26968 +
26969 + ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
26970 + QMAN_VOLATILE_FLAG_FINISH,
26971 + QM_VDQCR_PRECEDENCE_VDQCR |
26972 + QM_VDQCR_NUMFRAMES_TILLEMPTY);
26973 + if (ret) {
26974 + dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
26975 + return ret;
26976 + }
26977 +
26978 + do {
26979 + struct qman_portal *p;
26980 +
26981 + p = qman_get_affine_portal(smp_processor_id());
26982 + qman_p_poll_dqrr(p, 16);
26983 + } while (fq->flags & QMAN_FQ_STATE_NE);
26984 +
26985 + return 0;
26986 +}
26987 +
26988 +static int kill_fq(struct device *qidev, struct qman_fq *fq)
26989 +{
26990 + u32 flags;
26991 + int ret;
26992 +
26993 + ret = qman_retire_fq(fq, &flags);
26994 + if (ret < 0) {
26995 + dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
26996 + return ret;
26997 + }
26998 +
26999 + if (!ret)
27000 + goto empty_fq;
27001 +
27002 + /* Async FQ retirement condition */
27003 + if (ret == 1) {
27004 + /* Retry till FQ gets in retired state */
27005 + do {
27006 + msleep(20);
27007 + } while (fq->state != qman_fq_state_retired);
27008 +
27009 + WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
27010 + WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
27011 + }
27012 +
27013 +empty_fq:
27014 + if (fq->flags & QMAN_FQ_STATE_NE) {
27015 + ret = empty_retired_fq(qidev, fq);
27016 + if (ret) {
27017 + dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
27018 + fq->fqid);
27019 + return ret;
27020 + }
27021 + }
27022 +
27023 + ret = qman_oos_fq(fq);
27024 + if (ret)
27025 + dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
27026 +
27027 + qman_destroy_fq(fq, 0);
27028 + kfree(fq);
27029 +
27030 + return ret;
27031 +}
27032 +
27033 +static int empty_caam_fq(struct qman_fq *fq)
27034 +{
27035 + int ret;
27036 + struct qm_mcr_queryfq_np np;
27037 +
27038 + /* Wait till the older CAAM FQ get empty */
27039 + do {
27040 + ret = qman_query_fq_np(fq, &np);
27041 + if (ret)
27042 + return ret;
27043 +
27044 + if (!np.frm_cnt)
27045 + break;
27046 +
27047 + msleep(20);
27048 + } while (1);
27049 +
27050 + /*
27051 + * Give extra time for pending jobs from this FQ in holding tanks
27052 + * to get processed
27053 + */
27054 + msleep(20);
27055 + return 0;
27056 +}
27057 +
27058 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
27059 +{
27060 + int ret;
27061 + u32 num_words;
27062 + struct qman_fq *new_fq, *old_fq;
27063 + struct device *qidev = drv_ctx->qidev;
27064 +
27065 + num_words = desc_len(sh_desc);
27066 + if (num_words > MAX_SDLEN) {
27067 + dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
27068 + return -EINVAL;
27069 + }
27070 +
27071 + /* Note down older req FQ */
27072 + old_fq = drv_ctx->req_fq;
27073 +
27074 + /* Create a new req FQ in parked state */
27075 + new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
27076 + drv_ctx->context_a, 0);
27077 + if (unlikely(IS_ERR_OR_NULL(new_fq))) {
27078 + dev_err(qidev, "FQ allocation for shdesc update failed\n");
27079 + return PTR_ERR(new_fq);
27080 + }
27081 +
27082 + /* Hook up new FQ to context so that new requests keep queuing */
27083 + drv_ctx->req_fq = new_fq;
27084 +
27085 + /* Empty and remove the older FQ */
27086 + ret = empty_caam_fq(old_fq);
27087 + if (ret) {
27088 + dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
27089 +
27090 + /* We can revert to older FQ */
27091 + drv_ctx->req_fq = old_fq;
27092 +
27093 + if (kill_fq(qidev, new_fq))
27094 + dev_warn(qidev, "New CAAM FQ kill failed\n");
27095 +
27096 + return ret;
27097 + }
27098 +
27099 + /*
27100 + * Re-initialise pre-header. Set RSLS and SDLEN.
27101 + * Update the shared descriptor for driver context.
27102 + */
27103 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
27104 + num_words);
27105 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
27106 + dma_sync_single_for_device(qidev, drv_ctx->context_a,
27107 + sizeof(drv_ctx->sh_desc) +
27108 + sizeof(drv_ctx->prehdr),
27109 + DMA_BIDIRECTIONAL);
27110 +
27111 + /* Put the new FQ in scheduled state */
27112 + ret = qman_schedule_fq(new_fq);
27113 + if (ret) {
27114 + dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
27115 +
27116 + /*
27117 + * We can kill new FQ and revert to old FQ.
27118 + * Since the desc is already modified, it is success case
27119 + */
27120 +
27121 + drv_ctx->req_fq = old_fq;
27122 +
27123 + if (kill_fq(qidev, new_fq))
27124 + dev_warn(qidev, "New CAAM FQ kill failed\n");
27125 + } else if (kill_fq(qidev, old_fq)) {
27126 + dev_warn(qidev, "Old CAAM FQ kill failed\n");
27127 + }
27128 +
27129 + return 0;
27130 +}
27131 +EXPORT_SYMBOL(caam_drv_ctx_update);
27132 +
27133 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
27134 + int *cpu,
27135 + u32 *sh_desc)
27136 +{
27137 + size_t size;
27138 + u32 num_words;
27139 + dma_addr_t hwdesc;
27140 + struct caam_drv_ctx *drv_ctx;
27141 + const cpumask_t *cpus = qman_affine_cpus();
27142 +
27143 + num_words = desc_len(sh_desc);
27144 + if (num_words > MAX_SDLEN) {
27145 + dev_err(qidev, "Invalid descriptor len: %d words\n",
27146 + num_words);
27147 + return ERR_PTR(-EINVAL);
27148 + }
27149 +
27150 + drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
27151 + if (!drv_ctx)
27152 + return ERR_PTR(-ENOMEM);
27153 +
27154 + /*
27155 + * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
27156 + * and dma-map them.
27157 + */
27158 + drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
27159 + num_words);
27160 + memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
27161 + size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
27162 + hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
27163 + DMA_BIDIRECTIONAL);
27164 + if (dma_mapping_error(qidev, hwdesc)) {
27165 + dev_err(qidev, "DMA map error for preheader + shdesc\n");
27166 + kfree(drv_ctx);
27167 + return ERR_PTR(-ENOMEM);
27168 + }
27169 + drv_ctx->context_a = hwdesc;
27170 +
27171 + /* If given CPU does not own the portal, choose another one that does */
27172 + if (!cpumask_test_cpu(*cpu, cpus)) {
27173 + int *pcpu = &get_cpu_var(last_cpu);
27174 +
27175 + *pcpu = cpumask_next(*pcpu, cpus);
27176 + if (*pcpu >= nr_cpu_ids)
27177 + *pcpu = cpumask_first(cpus);
27178 + *cpu = *pcpu;
27179 +
27180 + put_cpu_var(last_cpu);
27181 + }
27182 + drv_ctx->cpu = *cpu;
27183 +
27184 + /* Find response FQ hooked with this CPU */
27185 + drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
27186 +
27187 + /* Attach request FQ */
27188 + drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
27189 + QMAN_INITFQ_FLAG_SCHED);
27190 + if (unlikely(IS_ERR_OR_NULL(drv_ctx->req_fq))) {
27191 + dev_err(qidev, "create_caam_req_fq failed\n");
27192 + dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
27193 + kfree(drv_ctx);
27194 + return ERR_PTR(-ENOMEM);
27195 + }
27196 +
27197 + drv_ctx->qidev = qidev;
27198 + return drv_ctx;
27199 +}
27200 +EXPORT_SYMBOL(caam_drv_ctx_init);
27201 +
27202 +void *qi_cache_alloc(gfp_t flags)
27203 +{
27204 + return kmem_cache_alloc(qi_cache, flags);
27205 +}
27206 +EXPORT_SYMBOL(qi_cache_alloc);
27207 +
27208 +void qi_cache_free(void *obj)
27209 +{
27210 + kmem_cache_free(qi_cache, obj);
27211 +}
27212 +EXPORT_SYMBOL(qi_cache_free);
27213 +
27214 +static int caam_qi_poll(struct napi_struct *napi, int budget)
27215 +{
27216 + struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
27217 +
27218 + int cleaned = qman_p_poll_dqrr(np->p, budget);
27219 +
27220 + if (cleaned < budget) {
27221 + napi_complete(napi);
27222 + qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
27223 + }
27224 +
27225 + return cleaned;
27226 +}
27227 +
27228 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
27229 +{
27230 + if (IS_ERR_OR_NULL(drv_ctx))
27231 + return;
27232 +
27233 + /* Remove request FQ */
27234 + if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
27235 + dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
27236 +
27237 + dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
27238 + sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
27239 + DMA_BIDIRECTIONAL);
27240 + kfree(drv_ctx);
27241 +}
27242 +EXPORT_SYMBOL(caam_drv_ctx_rel);
27243 +
27244 +int caam_qi_shutdown(struct device *qidev)
27245 +{
27246 + int i, ret;
27247 + struct caam_qi_priv *priv = dev_get_drvdata(qidev);
27248 + const cpumask_t *cpus = qman_affine_cpus();
27249 + struct cpumask old_cpumask = current->cpus_allowed;
27250 +
27251 + for_each_cpu(i, cpus) {
27252 + struct napi_struct *irqtask;
27253 +
27254 + irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
27255 + napi_disable(irqtask);
27256 + netif_napi_del(irqtask);
27257 +
27258 + if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
27259 + dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
27260 + }
27261 +
27262 + /*
27263 + * QMan driver requires CGRs to be deleted from same CPU from where they
27264 + * were instantiated. Hence we get the module removal execute from the
27265 + * same CPU from where it was originally inserted.
27266 + */
27267 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
27268 +
27269 + ret = qman_delete_cgr(&priv->cgr);
27270 + if (ret)
27271 + dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
27272 + else
27273 + qman_release_cgrid(priv->cgr.cgrid);
27274 +
27275 + kmem_cache_destroy(qi_cache);
27276 +
27277 + /* Now that we're done with the CGRs, restore the cpus allowed mask */
27278 + set_cpus_allowed_ptr(current, &old_cpumask);
27279 +
27280 + platform_device_unregister(priv->qi_pdev);
27281 + return ret;
27282 +}
27283 +
27284 +static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
27285 +{
27286 + caam_congested = congested;
27287 +
27288 + if (congested) {
27289 +#ifdef CONFIG_DEBUG_FS
27290 + times_congested++;
27291 +#endif
27292 + pr_debug_ratelimited("CAAM entered congestion\n");
27293 +
27294 + } else {
27295 + pr_debug_ratelimited("CAAM exited congestion\n");
27296 + }
27297 +}
27298 +
27299 +static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
27300 +{
27301 + /*
27302 + * In case of threaded ISR, for RT kernels in_irq() does not return
27303 + * appropriate value, so use in_serving_softirq to distinguish between
27304 + * softirq and irq contexts.
27305 + */
27306 + if (unlikely(in_irq() || !in_serving_softirq())) {
27307 + /* Disable QMan IRQ source and invoke NAPI */
27308 + qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
27309 + np->p = p;
27310 + napi_schedule(&np->irqtask);
27311 + return 1;
27312 + }
27313 + return 0;
27314 +}
27315 +
27316 +static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
27317 + struct qman_fq *rsp_fq,
27318 + const struct qm_dqrr_entry *dqrr)
27319 +{
27320 + struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
27321 + struct caam_drv_req *drv_req;
27322 + const struct qm_fd *fd;
27323 + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
27324 +
27325 + if (caam_qi_napi_schedule(p, caam_napi))
27326 + return qman_cb_dqrr_stop;
27327 +
27328 + fd = &dqrr->fd;
27329 + if (unlikely(fd->status))
27330 + dev_err(qidev, "Error: %#x in CAAM response FD\n", fd->status);
27331 +
27332 + if (unlikely(fd->format != fd->format)) {
27333 + dev_err(qidev, "Non-compound FD from CAAM\n");
27334 + return qman_cb_dqrr_consume;
27335 + }
27336 +
27337 + drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
27338 + if (unlikely(!drv_req)) {
27339 + dev_err(qidev,
27340 + "Can't find original request for caam response\n");
27341 + return qman_cb_dqrr_consume;
27342 + }
27343 +
27344 + dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
27345 + sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
27346 +
27347 + drv_req->cbk(drv_req, fd->status);
27348 + return qman_cb_dqrr_consume;
27349 +}
27350 +
27351 +static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
27352 +{
27353 + struct qm_mcc_initfq opts;
27354 + struct qman_fq *fq;
27355 + int ret;
27356 +
27357 + fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
27358 + if (!fq)
27359 + return -ENOMEM;
27360 +
27361 + fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
27362 +
27363 + ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
27364 + QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
27365 + if (ret) {
27366 + dev_err(qidev, "Rsp FQ create failed\n");
27367 + kfree(fq);
27368 + return -ENODEV;
27369 + }
27370 +
27371 + opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
27372 + QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
27373 + QM_INITFQ_WE_CGID;
27374 + opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
27375 + QM_FQCTRL_CGE;
27376 + opts.fqd.dest.channel = qman_affine_channel(cpu);
27377 + opts.fqd.dest.wq = 3;
27378 + opts.fqd.cgid = qipriv.cgr.cgrid;
27379 + opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
27380 + QM_STASHING_EXCL_DATA;
27381 + opts.fqd.context_a.stashing.data_cl = 1;
27382 + opts.fqd.context_a.stashing.context_cl = 1;
27383 +
27384 + ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
27385 + if (ret) {
27386 + dev_err(qidev, "Rsp FQ init failed\n");
27387 + kfree(fq);
27388 + return -ENODEV;
27389 + }
27390 +
27391 + per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
27392 +
27393 + dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
27394 + return 0;
27395 +}
27396 +
27397 +static int init_cgr(struct device *qidev)
27398 +{
27399 + int ret;
27400 + struct qm_mcc_initcgr opts;
27401 + const u64 cpus = *(u64 *)qman_affine_cpus();
27402 + const int num_cpus = hweight64(cpus);
27403 + const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
27404 +
27405 + ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
27406 + if (ret) {
27407 + dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
27408 + return ret;
27409 + }
27410 +
27411 + qipriv.cgr.cb = cgr_cb;
27412 + memset(&opts, 0, sizeof(opts));
27413 + opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
27414 + opts.cgr.cscn_en = QM_CGR_EN;
27415 + opts.cgr.mode = QMAN_CGR_MODE_FRAME;
27416 + qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
27417 +
27418 + ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
27419 + if (ret) {
27420 + dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
27421 + qipriv.cgr.cgrid);
27422 + return ret;
27423 + }
27424 +
27425 + dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
27426 + return 0;
27427 +}
27428 +
27429 +static int alloc_rsp_fqs(struct device *qidev)
27430 +{
27431 + int ret, i;
27432 + const cpumask_t *cpus = qman_affine_cpus();
27433 +
27434 + /*Now create response FQs*/
27435 + for_each_cpu(i, cpus) {
27436 + ret = alloc_rsp_fq_cpu(qidev, i);
27437 + if (ret) {
27438 + dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
27439 + return ret;
27440 + }
27441 + }
27442 +
27443 + return 0;
27444 +}
27445 +
27446 +static void free_rsp_fqs(void)
27447 +{
27448 + int i;
27449 + const cpumask_t *cpus = qman_affine_cpus();
27450 +
27451 + for_each_cpu(i, cpus)
27452 + kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
27453 +}
27454 +
27455 +int caam_qi_init(struct platform_device *caam_pdev)
27456 +{
27457 + int err, i;
27458 + struct platform_device *qi_pdev;
27459 + struct device *ctrldev = &caam_pdev->dev, *qidev;
27460 + struct caam_drv_private *ctrlpriv;
27461 + const cpumask_t *cpus = qman_affine_cpus();
27462 + struct cpumask old_cpumask = current->cpus_allowed;
27463 + static struct platform_device_info qi_pdev_info = {
27464 + .name = "caam_qi",
27465 + .id = PLATFORM_DEVID_NONE
27466 + };
27467 +
27468 + /*
27469 + * QMAN requires CGRs to be removed from same CPU+portal from where it
27470 + * was originally allocated. Hence we need to note down the
27471 + * initialisation CPU and use the same CPU for module exit.
27472 + * We select the first CPU to from the list of portal owning CPUs.
27473 + * Then we pin module init to this CPU.
27474 + */
27475 + mod_init_cpu = cpumask_first(cpus);
27476 + set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
27477 +
27478 + qi_pdev_info.parent = ctrldev;
27479 + qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
27480 + qi_pdev = platform_device_register_full(&qi_pdev_info);
27481 + if (IS_ERR(qi_pdev))
27482 + return PTR_ERR(qi_pdev);
27483 + arch_setup_dma_ops(&qi_pdev->dev, 0, 0, NULL, true);
27484 +
27485 + ctrlpriv = dev_get_drvdata(ctrldev);
27486 + qidev = &qi_pdev->dev;
27487 +
27488 + qipriv.qi_pdev = qi_pdev;
27489 + dev_set_drvdata(qidev, &qipriv);
27490 +
27491 + /* Initialize the congestion detection */
27492 + err = init_cgr(qidev);
27493 + if (err) {
27494 + dev_err(qidev, "CGR initialization failed: %d\n", err);
27495 + platform_device_unregister(qi_pdev);
27496 + return err;
27497 + }
27498 +
27499 + /* Initialise response FQs */
27500 + err = alloc_rsp_fqs(qidev);
27501 + if (err) {
27502 + dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
27503 + free_rsp_fqs();
27504 + platform_device_unregister(qi_pdev);
27505 + return err;
27506 + }
27507 +
27508 + /*
27509 + * Enable the NAPI contexts on each of the core which has an affine
27510 + * portal.
27511 + */
27512 + for_each_cpu(i, cpus) {
27513 + struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
27514 + struct caam_napi *caam_napi = &priv->caam_napi;
27515 + struct napi_struct *irqtask = &caam_napi->irqtask;
27516 + struct net_device *net_dev = &priv->net_dev;
27517 +
27518 + net_dev->dev = *qidev;
27519 + INIT_LIST_HEAD(&net_dev->napi_list);
27520 +
27521 + netif_napi_add(net_dev, irqtask, caam_qi_poll,
27522 + CAAM_NAPI_WEIGHT);
27523 +
27524 + napi_enable(irqtask);
27525 + }
27526 +
27527 + /* Hook up QI device to parent controlling caam device */
27528 + ctrlpriv->qidev = qidev;
27529 +
27530 + qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
27531 + SLAB_CACHE_DMA, NULL);
27532 + if (!qi_cache) {
27533 + dev_err(qidev, "Can't allocate CAAM cache\n");
27534 + free_rsp_fqs();
27535 + platform_device_unregister(qi_pdev);
27536 + return -ENOMEM;
27537 + }
27538 +
27539 + /* Done with the CGRs; restore the cpus allowed mask */
27540 + set_cpus_allowed_ptr(current, &old_cpumask);
27541 +#ifdef CONFIG_DEBUG_FS
27542 + debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
27543 + &times_congested, &caam_fops_u64_ro);
27544 +#endif
27545 + dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
27546 + return 0;
27547 +}
27548 --- /dev/null
27549 +++ b/drivers/crypto/caam/qi.h
27550 @@ -0,0 +1,204 @@
27551 +/*
27552 + * Public definitions for the CAAM/QI (Queue Interface) backend.
27553 + *
27554 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
27555 + * Copyright 2016-2017 NXP
27556 + */
27557 +
27558 +#ifndef __QI_H__
27559 +#define __QI_H__
27560 +
27561 +#include <linux/fsl_qman.h>
27562 +#include "compat.h"
27563 +#include "desc.h"
27564 +#include "desc_constr.h"
27565 +
27566 +/*
27567 + * CAAM hardware constructs a job descriptor which points to a shared descriptor
27568 + * (as pointed by context_a of to-CAAM FQ).
27569 + * When the job descriptor is executed by DECO, the whole job descriptor
27570 + * together with shared descriptor gets loaded in DECO buffer, which is
27571 + * 64 words (each 32-bit) long.
27572 + *
27573 + * The job descriptor constructed by CAAM hardware has the following layout:
27574 + *
27575 + * HEADER (1 word)
27576 + * Shdesc ptr (1 or 2 words)
27577 + * SEQ_OUT_PTR (1 word)
27578 + * Out ptr (1 or 2 words)
27579 + * Out length (1 word)
27580 + * SEQ_IN_PTR (1 word)
27581 + * In ptr (1 or 2 words)
27582 + * In length (1 word)
27583 + *
27584 + * The shdesc ptr is used to fetch shared descriptor contents into DECO buffer.
27585 + *
27586 + * Apart from shdesc contents, the total number of words that get loaded in DECO
27587 + * buffer are '8' or '11'. The remaining words in DECO buffer can be used for
27588 + * storing shared descriptor.
27589 + */
27590 +#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
27591 +
27592 +/* Length of a single buffer in the QI driver memory cache */
27593 +#define CAAM_QI_MEMCACHE_SIZE 768
27594 +
27595 +extern bool caam_congested __read_mostly;
27596 +
27597 +/*
27598 + * This is the request structure the driver application should fill while
27599 + * submitting a job to driver.
27600 + */
27601 +struct caam_drv_req;
27602 +
27603 +/*
27604 + * caam_qi_cbk - application's callback function invoked by the driver when the
27605 + * request has been successfully processed.
27606 + * @drv_req: original request that was submitted
27607 + * @status: completion status of request (0 - success, non-zero - error code)
27608 + */
27609 +typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
27610 +
27611 +enum optype {
27612 + ENCRYPT,
27613 + DECRYPT,
27614 + GIVENCRYPT,
27615 + NUM_OP
27616 +};
27617 +
27618 +/**
27619 + * caam_drv_ctx - CAAM/QI backend driver context
27620 + *
27621 + * The jobs are processed by the driver against a driver context.
27622 + * With every cryptographic context, a driver context is attached.
27623 + * The driver context contains data for private use by driver.
27624 + * For the applications, this is an opaque structure.
27625 + *
27626 + * @prehdr: preheader placed before shrd desc
27627 + * @sh_desc: shared descriptor
27628 + * @context_a: shared descriptor dma address
27629 + * @req_fq: to-CAAM request frame queue
27630 + * @rsp_fq: from-CAAM response frame queue
27631 + * @cpu: cpu on which to receive CAAM response
27632 + * @op_type: operation type
27633 + * @qidev: device pointer for CAAM/QI backend
27634 + */
27635 +struct caam_drv_ctx {
27636 + u32 prehdr[2];
27637 + u32 sh_desc[MAX_SDLEN];
27638 + dma_addr_t context_a;
27639 + struct qman_fq *req_fq;
27640 + struct qman_fq *rsp_fq;
27641 + int cpu;
27642 + enum optype op_type;
27643 + struct device *qidev;
27644 +} ____cacheline_aligned;
27645 +
27646 +/**
27647 + * caam_drv_req - The request structure the driver application should fill while
27648 + * submitting a job to driver.
27649 + * @fd_sgt: QMan S/G pointing to output (fd_sgt[0]) and input (fd_sgt[1])
27650 + * buffers.
27651 + * @cbk: callback function to invoke when job is completed
27652 + * @app_ctx: arbitrary context attached with request by the application
27653 + *
27654 + * The fields mentioned below should not be used by application.
27655 + * These are for private use by driver.
27656 + *
27657 + * @hdr__: linked list header to maintain list of outstanding requests to CAAM
27658 + * @hwaddr: DMA address for the S/G table.
27659 + */
27660 +struct caam_drv_req {
27661 + struct qm_sg_entry fd_sgt[2];
27662 + struct caam_drv_ctx *drv_ctx;
27663 + caam_qi_cbk cbk;
27664 + void *app_ctx;
27665 +} ____cacheline_aligned;
27666 +
27667 +/**
27668 + * caam_drv_ctx_init - Initialise a CAAM/QI driver context
27669 + *
27670 + * A CAAM/QI driver context must be attached with each cryptographic context.
27671 + * This function allocates memory for CAAM/QI context and returns a handle to
27672 + * the application. This handle must be submitted along with each enqueue
27673 + * request to the driver by the application.
27674 + *
27675 + * @cpu: CPU where the application prefers to the driver to receive CAAM
27676 + * responses. The request completion callback would be issued from this
27677 + * CPU.
27678 + * @sh_desc: shared descriptor pointer to be attached with CAAM/QI driver
27679 + * context.
27680 + *
27681 + * Returns a driver context on success or negative error code on failure.
27682 + */
27683 +struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu,
27684 + u32 *sh_desc);
27685 +
27686 +/**
27687 + * caam_qi_enqueue - Submit a request to QI backend driver.
27688 + *
27689 + * The request structure must be properly filled as described above.
27690 + *
27691 + * @qidev: device pointer for QI backend
27692 + * @req: CAAM QI request structure
27693 + *
27694 + * Returns 0 on success or negative error code on failure.
27695 + */
27696 +int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req);
27697 +
27698 +/**
27699 + * caam_drv_ctx_busy - Check if there are too many jobs pending with CAAM
27700 + * or too many CAAM responses are pending to be processed.
27701 + * @drv_ctx: driver context for which job is to be submitted
27702 + *
27703 + * Returns caam congestion status 'true/false'
27704 + */
27705 +bool caam_drv_ctx_busy(struct caam_drv_ctx *drv_ctx);
27706 +
27707 +/**
27708 + * caam_drv_ctx_update - Update QI driver context
27709 + *
27710 + * Invoked when shared descriptor is required to be change in driver context.
27711 + *
27712 + * @drv_ctx: driver context to be updated
27713 + * @sh_desc: new shared descriptor pointer to be updated in QI driver context
27714 + *
27715 + * Returns 0 on success or negative error code on failure.
27716 + */
27717 +int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
27718 +
27719 +/**
27720 + * caam_drv_ctx_rel - Release a QI driver context
27721 + * @drv_ctx: context to be released
27722 + */
27723 +void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
27724 +
27725 +int caam_qi_init(struct platform_device *pdev);
27726 +int caam_qi_shutdown(struct device *dev);
27727 +
27728 +/**
27729 + * qi_cache_alloc - Allocate buffers from CAAM-QI cache
27730 + *
27731 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) needs data which has
27732 + * to be allocated on the hotpath. Instead of using malloc, one can use the
27733 + * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
27734 + * will have a size of 256B, which is sufficient for hosting 16 SG entries.
27735 + *
27736 + * @flags: flags that would be used for the equivalent malloc(..) call
27737 + *
27738 + * Returns a pointer to a retrieved buffer on success or NULL on failure.
27739 + */
27740 +void *qi_cache_alloc(gfp_t flags);
27741 +
27742 +/**
27743 + * qi_cache_free - Frees buffers allocated from CAAM-QI cache
27744 + *
27745 + * Invoked when a user of the CAAM-QI (i.e. caamalg-qi) no longer needs
27746 + * the buffer previously allocated by a qi_cache_alloc call.
27747 + * No checking is being done, the call is a passthrough call to
27748 + * kmem_cache_free(...)
27749 + *
27750 + * @obj: object previously allocated using qi_cache_alloc()
27751 + */
27752 +void qi_cache_free(void *obj);
27753 +
27754 +#endif /* __QI_H__ */
27755 --- a/drivers/crypto/caam/regs.h
27756 +++ b/drivers/crypto/caam/regs.h
27757 @@ -2,6 +2,7 @@
27758 * CAAM hardware register-level view
27759 *
27760 * Copyright 2008-2011 Freescale Semiconductor, Inc.
27761 + * Copyright 2017 NXP
27762 */
27763
27764 #ifndef REGS_H
27765 @@ -67,6 +68,7 @@
27766 */
27767
27768 extern bool caam_little_end;
27769 +extern bool caam_imx;
27770
27771 #define caam_to_cpu(len) \
27772 static inline u##len caam##len ## _to_cpu(u##len val) \
27773 @@ -154,13 +156,10 @@ static inline u64 rd_reg64(void __iomem
27774 #else /* CONFIG_64BIT */
27775 static inline void wr_reg64(void __iomem *reg, u64 data)
27776 {
27777 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27778 - if (caam_little_end) {
27779 + if (!caam_imx && caam_little_end) {
27780 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
27781 wr_reg32((u32 __iomem *)(reg), data);
27782 - } else
27783 -#endif
27784 - {
27785 + } else {
27786 wr_reg32((u32 __iomem *)(reg), data >> 32);
27787 wr_reg32((u32 __iomem *)(reg) + 1, data);
27788 }
27789 @@ -168,41 +167,40 @@ static inline void wr_reg64(void __iomem
27790
27791 static inline u64 rd_reg64(void __iomem *reg)
27792 {
27793 -#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27794 - if (caam_little_end)
27795 + if (!caam_imx && caam_little_end)
27796 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
27797 (u64)rd_reg32((u32 __iomem *)(reg)));
27798 - else
27799 -#endif
27800 - return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
27801 - (u64)rd_reg32((u32 __iomem *)(reg) + 1));
27802 +
27803 + return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
27804 + (u64)rd_reg32((u32 __iomem *)(reg) + 1));
27805 }
27806 #endif /* CONFIG_64BIT */
27807
27808 +static inline u64 cpu_to_caam_dma64(dma_addr_t value)
27809 +{
27810 + if (caam_imx)
27811 + return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
27812 + (u64)cpu_to_caam32(upper_32_bits(value)));
27813 +
27814 + return cpu_to_caam64(value);
27815 +}
27816 +
27817 +static inline u64 caam_dma64_to_cpu(u64 value)
27818 +{
27819 + if (caam_imx)
27820 + return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
27821 + (u64)caam32_to_cpu(upper_32_bits(value)));
27822 +
27823 + return caam64_to_cpu(value);
27824 +}
27825 +
27826 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
27827 -#ifdef CONFIG_SOC_IMX7D
27828 -#define cpu_to_caam_dma(value) \
27829 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
27830 - (u64)cpu_to_caam32(upper_32_bits(value)))
27831 -#define caam_dma_to_cpu(value) \
27832 - (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
27833 - (u64)caam32_to_cpu(upper_32_bits(value)))
27834 -#else
27835 -#define cpu_to_caam_dma(value) cpu_to_caam64(value)
27836 -#define caam_dma_to_cpu(value) caam64_to_cpu(value)
27837 -#endif /* CONFIG_SOC_IMX7D */
27838 +#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
27839 +#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
27840 #else
27841 #define cpu_to_caam_dma(value) cpu_to_caam32(value)
27842 #define caam_dma_to_cpu(value) caam32_to_cpu(value)
27843 -#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
27844 -
27845 -#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
27846 -#define cpu_to_caam_dma64(value) \
27847 - (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
27848 - (u64)cpu_to_caam32(upper_32_bits(value)))
27849 -#else
27850 -#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
27851 -#endif
27852 +#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
27853
27854 /*
27855 * jr_outentry
27856 @@ -293,6 +291,7 @@ struct caam_perfmon {
27857 u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
27858 #define CTPR_MS_QI_SHIFT 25
27859 #define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
27860 +#define CTPR_MS_DPAA2 BIT(13)
27861 #define CTPR_MS_VIRT_EN_INCL 0x00000001
27862 #define CTPR_MS_VIRT_EN_POR 0x00000002
27863 #define CTPR_MS_PG_SZ_MASK 0x10
27864 @@ -628,6 +627,8 @@ struct caam_job_ring {
27865 #define JRSTA_DECOERR_INVSIGN 0x86
27866 #define JRSTA_DECOERR_DSASIGN 0x87
27867
27868 +#define JRSTA_QIERR_ERROR_MASK 0x00ff
27869 +
27870 #define JRSTA_CCBERR_JUMP 0x08000000
27871 #define JRSTA_CCBERR_INDEX_MASK 0xff00
27872 #define JRSTA_CCBERR_INDEX_SHIFT 8
27873 --- /dev/null
27874 +++ b/drivers/crypto/caam/sg_sw_qm.h
27875 @@ -0,0 +1,126 @@
27876 +/*
27877 + * Copyright 2013-2016 Freescale Semiconductor, Inc.
27878 + * Copyright 2016-2017 NXP
27879 + *
27880 + * Redistribution and use in source and binary forms, with or without
27881 + * modification, are permitted provided that the following conditions are met:
27882 + * * Redistributions of source code must retain the above copyright
27883 + * notice, this list of conditions and the following disclaimer.
27884 + * * Redistributions in binary form must reproduce the above copyright
27885 + * notice, this list of conditions and the following disclaimer in the
27886 + * documentation and/or other materials provided with the distribution.
27887 + * * Neither the name of Freescale Semiconductor nor the
27888 + * names of its contributors may be used to endorse or promote products
27889 + * derived from this software without specific prior written permission.
27890 + *
27891 + *
27892 + * ALTERNATIVELY, this software may be distributed under the terms of the
27893 + * GNU General Public License ("GPL") as published by the Free Software
27894 + * Foundation, either version 2 of that License or (at your option) any
27895 + * later version.
27896 + *
27897 + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
27898 + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27899 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27900 + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27901 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27902 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27903 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27904 + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27905 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27906 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27907 + */
27908 +
27909 +#ifndef __SG_SW_QM_H
27910 +#define __SG_SW_QM_H
27911 +
27912 +#include <linux/fsl_qman.h>
27913 +#include "regs.h"
27914 +
27915 +static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
27916 +{
27917 + dma_addr_t addr = qm_sg_ptr->opaque;
27918 +
27919 + qm_sg_ptr->opaque = cpu_to_caam64(addr);
27920 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
27921 +}
27922 +
27923 +static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
27924 + u32 len, u16 offset)
27925 +{
27926 + qm_sg_ptr->addr = dma;
27927 + qm_sg_ptr->length = len;
27928 + qm_sg_ptr->__reserved2 = 0;
27929 + qm_sg_ptr->bpid = 0;
27930 + qm_sg_ptr->__reserved3 = 0;
27931 + qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
27932 +
27933 + cpu_to_hw_sg(qm_sg_ptr);
27934 +}
27935 +
27936 +static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
27937 + dma_addr_t dma, u32 len, u16 offset)
27938 +{
27939 + qm_sg_ptr->extension = 0;
27940 + qm_sg_ptr->final = 0;
27941 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27942 +}
27943 +
27944 +static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
27945 + dma_addr_t dma, u32 len, u16 offset)
27946 +{
27947 + qm_sg_ptr->extension = 0;
27948 + qm_sg_ptr->final = 1;
27949 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27950 +}
27951 +
27952 +static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
27953 + dma_addr_t dma, u32 len, u16 offset)
27954 +{
27955 + qm_sg_ptr->extension = 1;
27956 + qm_sg_ptr->final = 0;
27957 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27958 +}
27959 +
27960 +static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
27961 + dma_addr_t dma, u32 len,
27962 + u16 offset)
27963 +{
27964 + qm_sg_ptr->extension = 1;
27965 + qm_sg_ptr->final = 1;
27966 + __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
27967 +}
27968 +
27969 +/*
27970 + * convert scatterlist to h/w link table format
27971 + * but does not have final bit; instead, returns last entry
27972 + */
27973 +static inline struct qm_sg_entry *
27974 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
27975 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
27976 +{
27977 + while (sg_count && sg) {
27978 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
27979 + sg_dma_len(sg), offset);
27980 + qm_sg_ptr++;
27981 + sg = sg_next(sg);
27982 + sg_count--;
27983 + }
27984 + return qm_sg_ptr - 1;
27985 +}
27986 +
27987 +/*
27988 + * convert scatterlist to h/w link table format
27989 + * scatterlist must have been previously dma mapped
27990 + */
27991 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
27992 + struct qm_sg_entry *qm_sg_ptr, u16 offset)
27993 +{
27994 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
27995 +
27996 + qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
27997 + qm_sg_ptr->final = 1;
27998 + qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
27999 +}
28000 +
28001 +#endif /* __SG_SW_QM_H */
28002 --- /dev/null
28003 +++ b/drivers/crypto/caam/sg_sw_qm2.h
28004 @@ -0,0 +1,81 @@
28005 +/*
28006 + * Copyright 2015-2016 Freescale Semiconductor, Inc.
28007 + * Copyright 2017 NXP
28008 + *
28009 + * Redistribution and use in source and binary forms, with or without
28010 + * modification, are permitted provided that the following conditions are met:
28011 + * * Redistributions of source code must retain the above copyright
28012 + * notice, this list of conditions and the following disclaimer.
28013 + * * Redistributions in binary form must reproduce the above copyright
28014 + * notice, this list of conditions and the following disclaimer in the
28015 + * documentation and/or other materials provided with the distribution.
28016 + * * Neither the names of the above-listed copyright holders nor the
28017 + * names of any contributors may be used to endorse or promote products
28018 + * derived from this software without specific prior written permission.
28019 + *
28020 + *
28021 + * ALTERNATIVELY, this software may be distributed under the terms of the
28022 + * GNU General Public License ("GPL") as published by the Free Software
28023 + * Foundation, either version 2 of that License or (at your option) any
28024 + * later version.
28025 + *
28026 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28027 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28028 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28029 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
28030 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28031 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28032 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28033 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28034 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28035 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28036 + * POSSIBILITY OF SUCH DAMAGE.
28037 + */
28038 +
28039 +#ifndef _SG_SW_QM2_H_
28040 +#define _SG_SW_QM2_H_
28041 +
28042 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
28043 +
28044 +static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
28045 + dma_addr_t dma, u32 len, u16 offset)
28046 +{
28047 + dpaa2_sg_set_addr(qm_sg_ptr, dma);
28048 + dpaa2_sg_set_format(qm_sg_ptr, dpaa2_sg_single);
28049 + dpaa2_sg_set_final(qm_sg_ptr, false);
28050 + dpaa2_sg_set_len(qm_sg_ptr, len);
28051 + dpaa2_sg_set_bpid(qm_sg_ptr, 0);
28052 + dpaa2_sg_set_offset(qm_sg_ptr, offset);
28053 +}
28054 +
28055 +/*
28056 + * convert scatterlist to h/w link table format
28057 + * but does not have final bit; instead, returns last entry
28058 + */
28059 +static inline struct dpaa2_sg_entry *
28060 +sg_to_qm_sg(struct scatterlist *sg, int sg_count,
28061 + struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
28062 +{
28063 + while (sg_count && sg) {
28064 + dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
28065 + sg_dma_len(sg), offset);
28066 + qm_sg_ptr++;
28067 + sg = sg_next(sg);
28068 + sg_count--;
28069 + }
28070 + return qm_sg_ptr - 1;
28071 +}
28072 +
28073 +/*
28074 + * convert scatterlist to h/w link table format
28075 + * scatterlist must have been previously dma mapped
28076 + */
28077 +static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count,
28078 + struct dpaa2_sg_entry *qm_sg_ptr,
28079 + u16 offset)
28080 +{
28081 + qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset);
28082 + dpaa2_sg_set_final(qm_sg_ptr, true);
28083 +}
28084 +
28085 +#endif /* _SG_SW_QM2_H_ */
28086 --- a/drivers/crypto/caam/sg_sw_sec4.h
28087 +++ b/drivers/crypto/caam/sg_sw_sec4.h
28088 @@ -5,9 +5,19 @@
28089 *
28090 */
28091
28092 +#ifndef _SG_SW_SEC4_H_
28093 +#define _SG_SW_SEC4_H_
28094 +
28095 +#include "ctrl.h"
28096 #include "regs.h"
28097 +#include "sg_sw_qm2.h"
28098 +#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
28099
28100 -struct sec4_sg_entry;
28101 +struct sec4_sg_entry {
28102 + u64 ptr;
28103 + u32 len;
28104 + u32 bpid_offset;
28105 +};
28106
28107 /*
28108 * convert single dma address to h/w link table format
28109 @@ -15,9 +25,15 @@ struct sec4_sg_entry;
28110 static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
28111 dma_addr_t dma, u32 len, u16 offset)
28112 {
28113 - sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
28114 - sec4_sg_ptr->len = cpu_to_caam32(len);
28115 - sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset & SEC4_SG_OFFSET_MASK);
28116 + if (caam_dpaa2) {
28117 + dma_to_qm_sg_one((struct dpaa2_sg_entry *)sec4_sg_ptr, dma, len,
28118 + offset);
28119 + } else {
28120 + sec4_sg_ptr->ptr = cpu_to_caam_dma64(dma);
28121 + sec4_sg_ptr->len = cpu_to_caam32(len);
28122 + sec4_sg_ptr->bpid_offset = cpu_to_caam32(offset &
28123 + SEC4_SG_OFFSET_MASK);
28124 + }
28125 #ifdef DEBUG
28126 print_hex_dump(KERN_ERR, "sec4_sg_ptr@: ",
28127 DUMP_PREFIX_ADDRESS, 16, 4, sec4_sg_ptr,
28128 @@ -43,6 +59,14 @@ sg_to_sec4_sg(struct scatterlist *sg, in
28129 return sec4_sg_ptr - 1;
28130 }
28131
28132 +static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
28133 +{
28134 + if (caam_dpaa2)
28135 + dpaa2_sg_set_final((struct dpaa2_sg_entry *)sec4_sg_ptr, true);
28136 + else
28137 + sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
28138 +}
28139 +
28140 /*
28141 * convert scatterlist to h/w link table format
28142 * scatterlist must have been previously dma mapped
28143 @@ -52,31 +76,7 @@ static inline void sg_to_sec4_sg_last(st
28144 u16 offset)
28145 {
28146 sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset);
28147 - sec4_sg_ptr->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
28148 -}
28149 -
28150 -static inline struct sec4_sg_entry *sg_to_sec4_sg_len(
28151 - struct scatterlist *sg, unsigned int total,
28152 - struct sec4_sg_entry *sec4_sg_ptr)
28153 -{
28154 - do {
28155 - unsigned int len = min(sg_dma_len(sg), total);
28156 -
28157 - dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), len, 0);
28158 - sec4_sg_ptr++;
28159 - sg = sg_next(sg);
28160 - total -= len;
28161 - } while (total);
28162 - return sec4_sg_ptr - 1;
28163 + sg_to_sec4_set_last(sec4_sg_ptr);
28164 }
28165
28166 -/* derive number of elements in scatterlist, but return 0 for 1 */
28167 -static inline int sg_count(struct scatterlist *sg_list, int nbytes)
28168 -{
28169 - int sg_nents = sg_nents_for_len(sg_list, nbytes);
28170 -
28171 - if (likely(sg_nents == 1))
28172 - return 0;
28173 -
28174 - return sg_nents;
28175 -}
28176 +#endif /* _SG_SW_SEC4_H_ */
28177 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c
28178 +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
28179 @@ -516,7 +516,7 @@ err:
28180
28181 /**
28182 * rsi_disconnect() - This function performs the reverse of the probe function,
28183 - * it deintialize the driver structure.
28184 + * it deinitialize the driver structure.
28185 * @pfunction: Pointer to the USB interface structure.
28186 *
28187 * Return: None.
28188 --- a/drivers/staging/wilc1000/linux_wlan.c
28189 +++ b/drivers/staging/wilc1000/linux_wlan.c
28190 @@ -211,7 +211,7 @@ static void deinit_irq(struct net_device
28191 vif = netdev_priv(dev);
28192 wilc = vif->wilc;
28193
28194 - /* Deintialize IRQ */
28195 + /* Deinitialize IRQ */
28196 if (wilc->dev_irq_num) {
28197 free_irq(wilc->dev_irq_num, wilc);
28198 gpio_free(wilc->gpio);
28199 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
28200 +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
28201 @@ -2359,7 +2359,7 @@ int wilc_deinit_host_int(struct net_devi
28202 del_timer_sync(&wilc_during_ip_timer);
28203
28204 if (s32Error)
28205 - netdev_err(net, "Error while deintializing host interface\n");
28206 + netdev_err(net, "Error while deinitializing host interface\n");
28207
28208 return s32Error;
28209 }
28210 --- /dev/null
28211 +++ b/include/crypto/acompress.h
28212 @@ -0,0 +1,269 @@
28213 +/*
28214 + * Asynchronous Compression operations
28215 + *
28216 + * Copyright (c) 2016, Intel Corporation
28217 + * Authors: Weigang Li <weigang.li@intel.com>
28218 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28219 + *
28220 + * This program is free software; you can redistribute it and/or modify it
28221 + * under the terms of the GNU General Public License as published by the Free
28222 + * Software Foundation; either version 2 of the License, or (at your option)
28223 + * any later version.
28224 + *
28225 + */
28226 +#ifndef _CRYPTO_ACOMP_H
28227 +#define _CRYPTO_ACOMP_H
28228 +#include <linux/crypto.h>
28229 +
28230 +#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001
28231 +
28232 +/**
28233 + * struct acomp_req - asynchronous (de)compression request
28234 + *
28235 + * @base: Common attributes for asynchronous crypto requests
28236 + * @src: Source Data
28237 + * @dst: Destination data
28238 + * @slen: Size of the input buffer
28239 + * @dlen: Size of the output buffer and number of bytes produced
28240 + * @flags: Internal flags
28241 + * @__ctx: Start of private context data
28242 + */
28243 +struct acomp_req {
28244 + struct crypto_async_request base;
28245 + struct scatterlist *src;
28246 + struct scatterlist *dst;
28247 + unsigned int slen;
28248 + unsigned int dlen;
28249 + u32 flags;
28250 + void *__ctx[] CRYPTO_MINALIGN_ATTR;
28251 +};
28252 +
28253 +/**
28254 + * struct crypto_acomp - user-instantiated objects which encapsulate
28255 + * algorithms and core processing logic
28256 + *
28257 + * @compress: Function performs a compress operation
28258 + * @decompress: Function performs a de-compress operation
28259 + * @dst_free: Frees destination buffer if allocated inside the
28260 + * algorithm
28261 + * @reqsize: Context size for (de)compression requests
28262 + * @base: Common crypto API algorithm data structure
28263 + */
28264 +struct crypto_acomp {
28265 + int (*compress)(struct acomp_req *req);
28266 + int (*decompress)(struct acomp_req *req);
28267 + void (*dst_free)(struct scatterlist *dst);
28268 + unsigned int reqsize;
28269 + struct crypto_tfm base;
28270 +};
28271 +
28272 +/**
28273 + * struct acomp_alg - asynchronous compression algorithm
28274 + *
28275 + * @compress: Function performs a compress operation
28276 + * @decompress: Function performs a de-compress operation
28277 + * @dst_free: Frees destination buffer if allocated inside the algorithm
28278 + * @init: Initialize the cryptographic transformation object.
28279 + * This function is used to initialize the cryptographic
28280 + * transformation object. This function is called only once at
28281 + * the instantiation time, right after the transformation context
28282 + * was allocated. In case the cryptographic hardware has some
28283 + * special requirements which need to be handled by software, this
28284 + * function shall check for the precise requirement of the
28285 + * transformation and put any software fallbacks in place.
28286 + * @exit: Deinitialize the cryptographic transformation object. This is a
28287 + * counterpart to @init, used to remove various changes set in
28288 + * @init.
28289 + *
28290 + * @reqsize: Context size for (de)compression requests
28291 + * @base: Common crypto API algorithm data structure
28292 + */
28293 +struct acomp_alg {
28294 + int (*compress)(struct acomp_req *req);
28295 + int (*decompress)(struct acomp_req *req);
28296 + void (*dst_free)(struct scatterlist *dst);
28297 + int (*init)(struct crypto_acomp *tfm);
28298 + void (*exit)(struct crypto_acomp *tfm);
28299 + unsigned int reqsize;
28300 + struct crypto_alg base;
28301 +};
28302 +
28303 +/**
28304 + * DOC: Asynchronous Compression API
28305 + *
28306 + * The Asynchronous Compression API is used with the algorithms of type
28307 + * CRYPTO_ALG_TYPE_ACOMPRESS (listed as type "acomp" in /proc/crypto)
28308 + */
28309 +
28310 +/**
28311 + * crypto_alloc_acomp() -- allocate ACOMPRESS tfm handle
28312 + * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
28313 + * compression algorithm e.g. "deflate"
28314 + * @type: specifies the type of the algorithm
28315 + * @mask: specifies the mask for the algorithm
28316 + *
28317 + * Allocate a handle for a compression algorithm. The returned struct
28318 + * crypto_acomp is the handle that is required for any subsequent
28319 + * API invocation for the compression operations.
28320 + *
28321 + * Return: allocated handle in case of success; IS_ERR() is true in case
28322 + * of an error, PTR_ERR() returns the error code.
28323 + */
28324 +struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
28325 + u32 mask);
28326 +
28327 +static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
28328 +{
28329 + return &tfm->base;
28330 +}
28331 +
28332 +static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
28333 +{
28334 + return container_of(alg, struct acomp_alg, base);
28335 +}
28336 +
28337 +static inline struct crypto_acomp *__crypto_acomp_tfm(struct crypto_tfm *tfm)
28338 +{
28339 + return container_of(tfm, struct crypto_acomp, base);
28340 +}
28341 +
28342 +static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
28343 +{
28344 + return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
28345 +}
28346 +
28347 +static inline unsigned int crypto_acomp_reqsize(struct crypto_acomp *tfm)
28348 +{
28349 + return tfm->reqsize;
28350 +}
28351 +
28352 +static inline void acomp_request_set_tfm(struct acomp_req *req,
28353 + struct crypto_acomp *tfm)
28354 +{
28355 + req->base.tfm = crypto_acomp_tfm(tfm);
28356 +}
28357 +
28358 +static inline struct crypto_acomp *crypto_acomp_reqtfm(struct acomp_req *req)
28359 +{
28360 + return __crypto_acomp_tfm(req->base.tfm);
28361 +}
28362 +
28363 +/**
28364 + * crypto_free_acomp() -- free ACOMPRESS tfm handle
28365 + *
28366 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
28367 + */
28368 +static inline void crypto_free_acomp(struct crypto_acomp *tfm)
28369 +{
28370 + crypto_destroy_tfm(tfm, crypto_acomp_tfm(tfm));
28371 +}
28372 +
28373 +static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask)
28374 +{
28375 + type &= ~CRYPTO_ALG_TYPE_MASK;
28376 + type |= CRYPTO_ALG_TYPE_ACOMPRESS;
28377 + mask |= CRYPTO_ALG_TYPE_MASK;
28378 +
28379 + return crypto_has_alg(alg_name, type, mask);
28380 +}
28381 +
28382 +/**
28383 + * acomp_request_alloc() -- allocates asynchronous (de)compression request
28384 + *
28385 + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp()
28386 + *
28387 + * Return: allocated handle in case of success or NULL in case of an error
28388 + */
28389 +struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm);
28390 +
28391 +/**
28392 + * acomp_request_free() -- zeroize and free asynchronous (de)compression
28393 + * request as well as the output buffer if allocated
28394 + * inside the algorithm
28395 + *
28396 + * @req: request to free
28397 + */
28398 +void acomp_request_free(struct acomp_req *req);
28399 +
28400 +/**
28401 + * acomp_request_set_callback() -- Sets an asynchronous callback
28402 + *
28403 + * Callback will be called when an asynchronous operation on a given
28404 + * request is finished.
28405 + *
28406 + * @req: request that the callback will be set for
28407 + * @flgs: specify for instance if the operation may backlog
28408 + * @cmlp: callback which will be called
28409 + * @data: private data used by the caller
28410 + */
28411 +static inline void acomp_request_set_callback(struct acomp_req *req,
28412 + u32 flgs,
28413 + crypto_completion_t cmpl,
28414 + void *data)
28415 +{
28416 + req->base.complete = cmpl;
28417 + req->base.data = data;
28418 + req->base.flags = flgs;
28419 +}
28420 +
28421 +/**
28422 + * acomp_request_set_params() -- Sets request parameters
28423 + *
28424 + * Sets parameters required by an acomp operation
28425 + *
28426 + * @req: asynchronous compress request
28427 + * @src: pointer to input buffer scatterlist
28428 + * @dst: pointer to output buffer scatterlist. If this is NULL, the
28429 + * acomp layer will allocate the output memory
28430 + * @slen: size of the input buffer
28431 + * @dlen: size of the output buffer. If dst is NULL, this can be used by
28432 + * the user to specify the maximum amount of memory to allocate
28433 + */
28434 +static inline void acomp_request_set_params(struct acomp_req *req,
28435 + struct scatterlist *src,
28436 + struct scatterlist *dst,
28437 + unsigned int slen,
28438 + unsigned int dlen)
28439 +{
28440 + req->src = src;
28441 + req->dst = dst;
28442 + req->slen = slen;
28443 + req->dlen = dlen;
28444 +
28445 + if (!req->dst)
28446 + req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
28447 +}
28448 +
28449 +/**
28450 + * crypto_acomp_compress() -- Invoke asynchronous compress operation
28451 + *
28452 + * Function invokes the asynchronous compress operation
28453 + *
28454 + * @req: asynchronous compress request
28455 + *
28456 + * Return: zero on success; error code in case of error
28457 + */
28458 +static inline int crypto_acomp_compress(struct acomp_req *req)
28459 +{
28460 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
28461 +
28462 + return tfm->compress(req);
28463 +}
28464 +
28465 +/**
28466 + * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
28467 + *
28468 + * Function invokes the asynchronous decompress operation
28469 + *
28470 + * @req: asynchronous compress request
28471 + *
28472 + * Return: zero on success; error code in case of error
28473 + */
28474 +static inline int crypto_acomp_decompress(struct acomp_req *req)
28475 +{
28476 + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
28477 +
28478 + return tfm->decompress(req);
28479 +}
28480 +
28481 +#endif
28482 --- /dev/null
28483 +++ b/include/crypto/internal/acompress.h
28484 @@ -0,0 +1,81 @@
28485 +/*
28486 + * Asynchronous Compression operations
28487 + *
28488 + * Copyright (c) 2016, Intel Corporation
28489 + * Authors: Weigang Li <weigang.li@intel.com>
28490 + * Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28491 + *
28492 + * This program is free software; you can redistribute it and/or modify it
28493 + * under the terms of the GNU General Public License as published by the Free
28494 + * Software Foundation; either version 2 of the License, or (at your option)
28495 + * any later version.
28496 + *
28497 + */
28498 +#ifndef _CRYPTO_ACOMP_INT_H
28499 +#define _CRYPTO_ACOMP_INT_H
28500 +#include <crypto/acompress.h>
28501 +
28502 +/*
28503 + * Transform internal helpers.
28504 + */
28505 +static inline void *acomp_request_ctx(struct acomp_req *req)
28506 +{
28507 + return req->__ctx;
28508 +}
28509 +
28510 +static inline void *acomp_tfm_ctx(struct crypto_acomp *tfm)
28511 +{
28512 + return tfm->base.__crt_ctx;
28513 +}
28514 +
28515 +static inline void acomp_request_complete(struct acomp_req *req,
28516 + int err)
28517 +{
28518 + req->base.complete(&req->base, err);
28519 +}
28520 +
28521 +static inline const char *acomp_alg_name(struct crypto_acomp *tfm)
28522 +{
28523 + return crypto_acomp_tfm(tfm)->__crt_alg->cra_name;
28524 +}
28525 +
28526 +static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm)
28527 +{
28528 + struct acomp_req *req;
28529 +
28530 + req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL);
28531 + if (likely(req))
28532 + acomp_request_set_tfm(req, tfm);
28533 + return req;
28534 +}
28535 +
28536 +static inline void __acomp_request_free(struct acomp_req *req)
28537 +{
28538 + kzfree(req);
28539 +}
28540 +
28541 +/**
28542 + * crypto_register_acomp() -- Register asynchronous compression algorithm
28543 + *
28544 + * Function registers an implementation of an asynchronous
28545 + * compression algorithm
28546 + *
28547 + * @alg: algorithm definition
28548 + *
28549 + * Return: zero on success; error code in case of error
28550 + */
28551 +int crypto_register_acomp(struct acomp_alg *alg);
28552 +
28553 +/**
28554 + * crypto_unregister_acomp() -- Unregister asynchronous compression algorithm
28555 + *
28556 + * Function unregisters an implementation of an asynchronous
28557 + * compression algorithm
28558 + *
28559 + * @alg: algorithm definition
28560 + *
28561 + * Return: zero on success; error code in case of error
28562 + */
28563 +int crypto_unregister_acomp(struct acomp_alg *alg);
28564 +
28565 +#endif
28566 --- /dev/null
28567 +++ b/include/crypto/internal/scompress.h
28568 @@ -0,0 +1,136 @@
28569 +/*
28570 + * Synchronous Compression operations
28571 + *
28572 + * Copyright 2015 LG Electronics Inc.
28573 + * Copyright (c) 2016, Intel Corporation
28574 + * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
28575 + *
28576 + * This program is free software; you can redistribute it and/or modify it
28577 + * under the terms of the GNU General Public License as published by the Free
28578 + * Software Foundation; either version 2 of the License, or (at your option)
28579 + * any later version.
28580 + *
28581 + */
28582 +#ifndef _CRYPTO_SCOMP_INT_H
28583 +#define _CRYPTO_SCOMP_INT_H
28584 +#include <linux/crypto.h>
28585 +
28586 +#define SCOMP_SCRATCH_SIZE 131072
28587 +
28588 +struct crypto_scomp {
28589 + struct crypto_tfm base;
28590 +};
28591 +
28592 +/**
28593 + * struct scomp_alg - synchronous compression algorithm
28594 + *
28595 + * @alloc_ctx: Function allocates algorithm specific context
28596 + * @free_ctx: Function frees context allocated with alloc_ctx
28597 + * @compress: Function performs a compress operation
28598 + * @decompress: Function performs a de-compress operation
28599 + * @init: Initialize the cryptographic transformation object.
28600 + * This function is used to initialize the cryptographic
28601 + * transformation object. This function is called only once at
28602 + * the instantiation time, right after the transformation context
28603 + * was allocated. In case the cryptographic hardware has some
28604 + * special requirements which need to be handled by software, this
28605 + * function shall check for the precise requirement of the
28606 + * transformation and put any software fallbacks in place.
28607 + * @exit: Deinitialize the cryptographic transformation object. This is a
28608 + * counterpart to @init, used to remove various changes set in
28609 + * @init.
28610 + * @base: Common crypto API algorithm data structure
28611 + */
28612 +struct scomp_alg {
28613 + void *(*alloc_ctx)(struct crypto_scomp *tfm);
28614 + void (*free_ctx)(struct crypto_scomp *tfm, void *ctx);
28615 + int (*compress)(struct crypto_scomp *tfm, const u8 *src,
28616 + unsigned int slen, u8 *dst, unsigned int *dlen,
28617 + void *ctx);
28618 + int (*decompress)(struct crypto_scomp *tfm, const u8 *src,
28619 + unsigned int slen, u8 *dst, unsigned int *dlen,
28620 + void *ctx);
28621 + struct crypto_alg base;
28622 +};
28623 +
28624 +static inline struct scomp_alg *__crypto_scomp_alg(struct crypto_alg *alg)
28625 +{
28626 + return container_of(alg, struct scomp_alg, base);
28627 +}
28628 +
28629 +static inline struct crypto_scomp *__crypto_scomp_tfm(struct crypto_tfm *tfm)
28630 +{
28631 + return container_of(tfm, struct crypto_scomp, base);
28632 +}
28633 +
28634 +static inline struct crypto_tfm *crypto_scomp_tfm(struct crypto_scomp *tfm)
28635 +{
28636 + return &tfm->base;
28637 +}
28638 +
28639 +static inline void crypto_free_scomp(struct crypto_scomp *tfm)
28640 +{
28641 + crypto_destroy_tfm(tfm, crypto_scomp_tfm(tfm));
28642 +}
28643 +
28644 +static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm)
28645 +{
28646 + return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg);
28647 +}
28648 +
28649 +static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm)
28650 +{
28651 + return crypto_scomp_alg(tfm)->alloc_ctx(tfm);
28652 +}
28653 +
28654 +static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm,
28655 + void *ctx)
28656 +{
28657 + return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx);
28658 +}
28659 +
28660 +static inline int crypto_scomp_compress(struct crypto_scomp *tfm,
28661 + const u8 *src, unsigned int slen,
28662 + u8 *dst, unsigned int *dlen, void *ctx)
28663 +{
28664 + return crypto_scomp_alg(tfm)->compress(tfm, src, slen, dst, dlen, ctx);
28665 +}
28666 +
28667 +static inline int crypto_scomp_decompress(struct crypto_scomp *tfm,
28668 + const u8 *src, unsigned int slen,
28669 + u8 *dst, unsigned int *dlen,
28670 + void *ctx)
28671 +{
28672 + return crypto_scomp_alg(tfm)->decompress(tfm, src, slen, dst, dlen,
28673 + ctx);
28674 +}
28675 +
28676 +int crypto_init_scomp_ops_async(struct crypto_tfm *tfm);
28677 +struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req);
28678 +void crypto_acomp_scomp_free_ctx(struct acomp_req *req);
28679 +
28680 +/**
28681 + * crypto_register_scomp() -- Register synchronous compression algorithm
28682 + *
28683 + * Function registers an implementation of a synchronous
28684 + * compression algorithm
28685 + *
28686 + * @alg: algorithm definition
28687 + *
28688 + * Return: zero on success; error code in case of error
28689 + */
28690 +int crypto_register_scomp(struct scomp_alg *alg);
28691 +
28692 +/**
28693 + * crypto_unregister_scomp() -- Unregister synchronous compression algorithm
28694 + *
28695 + * Function unregisters an implementation of a synchronous
28696 + * compression algorithm
28697 + *
28698 + * @alg: algorithm definition
28699 + *
28700 + * Return: zero on success; error code in case of error
28701 + */
28702 +int crypto_unregister_scomp(struct scomp_alg *alg);
28703 +
28704 +#endif
28705 --- a/include/linux/crypto.h
28706 +++ b/include/linux/crypto.h
28707 @@ -50,6 +50,8 @@
28708 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
28709 #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
28710 #define CRYPTO_ALG_TYPE_KPP 0x00000008
28711 +#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
28712 +#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
28713 #define CRYPTO_ALG_TYPE_RNG 0x0000000c
28714 #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d
28715 #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e
28716 @@ -60,6 +62,7 @@
28717 #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
28718 #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
28719 #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
28720 +#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
28721
28722 #define CRYPTO_ALG_LARVAL 0x00000010
28723 #define CRYPTO_ALG_DEAD 0x00000020
28724 --- a/include/uapi/linux/cryptouser.h
28725 +++ b/include/uapi/linux/cryptouser.h
28726 @@ -46,6 +46,7 @@ enum crypto_attr_type_t {
28727 CRYPTOCFGA_REPORT_CIPHER, /* struct crypto_report_cipher */
28728 CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
28729 CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
28730 + CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
28731 __CRYPTOCFGA_MAX
28732
28733 #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
28734 @@ -112,5 +113,9 @@ struct crypto_report_kpp {
28735 char type[CRYPTO_MAX_NAME];
28736 };
28737
28738 +struct crypto_report_acomp {
28739 + char type[CRYPTO_MAX_NAME];
28740 +};
28741 +
28742 #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \
28743 sizeof(struct crypto_report_blkcipher))
28744 --- a/scripts/spelling.txt
28745 +++ b/scripts/spelling.txt
28746 @@ -305,6 +305,9 @@ defintion||definition
28747 defintions||definitions
28748 defualt||default
28749 defult||default
28750 +deintializing||deinitializing
28751 +deintialize||deinitialize
28752 +deintialized||deinitialized
28753 deivce||device
28754 delared||declared
28755 delare||declare
28756 --- a/sound/soc/amd/acp-pcm-dma.c
28757 +++ b/sound/soc/amd/acp-pcm-dma.c
28758 @@ -506,7 +506,7 @@ static int acp_init(void __iomem *acp_mm
28759 return 0;
28760 }
28761
28762 -/* Deintialize ACP */
28763 +/* Deinitialize ACP */
28764 static int acp_deinit(void __iomem *acp_mmio)
28765 {
28766 u32 val;