1 From f441873642eebf20566c18d2966a8cd4b433ec1c Mon Sep 17 00:00:00 2001
2 From: Ard Biesheuvel <ardb@kernel.org>
3 Date: Tue, 5 Nov 2019 14:28:17 +0100
4 Subject: [PATCH] crypto: qce - switch to skcipher API
6 Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface")
7 dated 20 august 2015 introduced the new skcipher API which is supposed to
8 replace both blkcipher and ablkcipher. While all consumers of the API have
9 been converted long ago, some producers of the ablkcipher remain, forcing
10 us to keep the ablkcipher support routines alive, along with the matching
11 code to expose [a]blkciphers via the skcipher API.
13 So switch this driver to the skcipher API, allowing us to finally drop the
14 blkcipher code in the near future.
16 Reviewed-by: Stanimir Varbanov <stanimir.varbanov@linaro.org>
17 Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
18 Backported-to-4.19-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
20 --- a/drivers/crypto/qce/Makefile
21 +++ b/drivers/crypto/qce/Makefile
22 @@ -4,4 +4,4 @@ qcrypto-objs := core.o \
28 --- a/drivers/crypto/qce/cipher.h
29 +++ b/drivers/crypto/qce/cipher.h
30 @@ -45,12 +45,12 @@ struct qce_cipher_reqctx {
31 unsigned int cryptlen;
34 -static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
35 +static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
37 - struct crypto_alg *alg = tfm->__crt_alg;
38 - return container_of(alg, struct qce_alg_template, alg.crypto);
39 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
40 + return container_of(alg, struct qce_alg_template, alg.skcipher);
43 -extern const struct qce_algo_ops ablkcipher_ops;
44 +extern const struct qce_algo_ops skcipher_ops;
46 #endif /* _CIPHER_H_ */
47 --- a/drivers/crypto/qce/common.c
48 +++ b/drivers/crypto/qce/common.c
49 @@ -304,13 +304,13 @@ go_proc:
53 -static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
54 +static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
55 u32 totallen, u32 offset)
57 - struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
58 - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
59 + struct skcipher_request *req = skcipher_request_cast(async_req);
60 + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
61 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
62 - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
63 + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
64 struct qce_device *qce = tmpl->qce;
65 __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
66 __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
67 @@ -389,8 +389,8 @@ int qce_start(struct crypto_async_reques
71 - case CRYPTO_ALG_TYPE_ABLKCIPHER:
72 - return qce_setup_regs_ablkcipher(async_req, totallen, offset);
73 + case CRYPTO_ALG_TYPE_SKCIPHER:
74 + return qce_setup_regs_skcipher(async_req, totallen, offset);
75 case CRYPTO_ALG_TYPE_AHASH:
76 return qce_setup_regs_ahash(async_req, totallen, offset);
78 --- a/drivers/crypto/qce/common.h
79 +++ b/drivers/crypto/qce/common.h
81 #include <linux/types.h>
82 #include <crypto/aes.h>
83 #include <crypto/hash.h>
84 +#include <crypto/internal/skcipher.h>
86 /* key size in bytes */
87 #define QCE_SHA_HMAC_KEY_SIZE 64
88 @@ -79,7 +80,7 @@ struct qce_alg_template {
89 unsigned long alg_flags;
92 - struct crypto_alg crypto;
93 + struct skcipher_alg skcipher;
94 struct ahash_alg ahash;
96 struct qce_device *qce;
97 --- a/drivers/crypto/qce/core.c
98 +++ b/drivers/crypto/qce/core.c
100 #define QCE_QUEUE_LENGTH 1
102 static const struct qce_algo_ops *qce_ops[] = {
108 --- a/drivers/crypto/qce/ablkcipher.c
111 -// SPDX-License-Identifier: GPL-2.0-only
113 - * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
116 -#include <linux/device.h>
117 -#include <linux/interrupt.h>
118 -#include <linux/types.h>
119 -#include <crypto/aes.h>
120 -#include <crypto/internal/des.h>
121 -#include <crypto/internal/skcipher.h>
125 -static LIST_HEAD(ablkcipher_algs);
127 -static void qce_ablkcipher_done(void *data)
129 - struct crypto_async_request *async_req = data;
130 - struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
131 - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
132 - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
133 - struct qce_device *qce = tmpl->qce;
134 - enum dma_data_direction dir_src, dir_dst;
139 - diff_dst = (req->src != req->dst) ? true : false;
140 - dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
141 - dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
143 - error = qce_dma_terminate_all(&qce->dma);
145 - dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
149 - dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
150 - dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
152 - sg_free_table(&rctx->dst_tbl);
154 - error = qce_check_status(qce, &status);
156 - dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
158 - qce->async_req_done(tmpl->qce, error);
162 -qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
164 - struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
165 - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
166 - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
167 - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
168 - struct qce_device *qce = tmpl->qce;
169 - enum dma_data_direction dir_src, dir_dst;
170 - struct scatterlist *sg;
175 - rctx->iv = req->info;
176 - rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
177 - rctx->cryptlen = req->nbytes;
179 - diff_dst = (req->src != req->dst) ? true : false;
180 - dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
181 - dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
183 - rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
185 - rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
187 - rctx->dst_nents = rctx->src_nents;
188 - if (rctx->src_nents < 0) {
189 - dev_err(qce->dev, "Invalid numbers of src SG.\n");
190 - return rctx->src_nents;
192 - if (rctx->dst_nents < 0) {
193 - dev_err(qce->dev, "Invalid numbers of dst SG.\n");
194 - return -rctx->dst_nents;
197 - rctx->dst_nents += 1;
199 - gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
200 - GFP_KERNEL : GFP_ATOMIC;
202 - ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
206 - sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
208 - sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
214 - sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
221 - rctx->dst_sg = rctx->dst_tbl.sgl;
223 - ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
228 - ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
230 - goto error_unmap_dst;
231 - rctx->src_sg = req->src;
233 - rctx->src_sg = rctx->dst_sg;
236 - ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
237 - rctx->dst_sg, rctx->dst_nents,
238 - qce_ablkcipher_done, async_req);
240 - goto error_unmap_src;
242 - qce_dma_issue_pending(&qce->dma);
244 - ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
246 - goto error_terminate;
251 - qce_dma_terminate_all(&qce->dma);
254 - dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
256 - dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
258 - sg_free_table(&rctx->dst_tbl);
262 -static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
263 - unsigned int keylen)
265 - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
266 - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
269 - if (!key || !keylen)
273 - case AES_KEYSIZE_128:
274 - case AES_KEYSIZE_256:
280 - ctx->enc_keylen = keylen;
281 - memcpy(ctx->enc_key, key, keylen);
284 - ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
286 - ctx->enc_keylen = keylen;
290 -static int qce_des_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
291 - unsigned int keylen)
293 - struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
296 - err = verify_ablkcipher_des_key(ablk, key);
300 - ctx->enc_keylen = keylen;
301 - memcpy(ctx->enc_key, key, keylen);
305 -static int qce_des3_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
306 - unsigned int keylen)
308 - struct qce_cipher_ctx *ctx = crypto_ablkcipher_ctx(ablk);
311 - err = verify_ablkcipher_des3_key(ablk, key);
315 - ctx->enc_keylen = keylen;
316 - memcpy(ctx->enc_key, key, keylen);
320 -static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
322 - struct crypto_tfm *tfm =
323 - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
324 - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
325 - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
326 - struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
329 - rctx->flags = tmpl->alg_flags;
330 - rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
332 - if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
333 - ctx->enc_keylen != AES_KEYSIZE_256) {
334 - SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
336 - skcipher_request_set_sync_tfm(subreq, ctx->fallback);
337 - skcipher_request_set_callback(subreq, req->base.flags,
339 - skcipher_request_set_crypt(subreq, req->src, req->dst,
340 - req->nbytes, req->info);
341 - ret = encrypt ? crypto_skcipher_encrypt(subreq) :
342 - crypto_skcipher_decrypt(subreq);
343 - skcipher_request_zero(subreq);
347 - return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
350 -static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
352 - return qce_ablkcipher_crypt(req, 1);
355 -static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
357 - return qce_ablkcipher_crypt(req, 0);
360 -static int qce_ablkcipher_init(struct crypto_tfm *tfm)
362 - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
364 - memset(ctx, 0, sizeof(*ctx));
365 - tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
367 - ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
368 - 0, CRYPTO_ALG_NEED_FALLBACK);
369 - return PTR_ERR_OR_ZERO(ctx->fallback);
372 -static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
374 - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
376 - crypto_free_sync_skcipher(ctx->fallback);
379 -struct qce_ablkcipher_def {
380 - unsigned long flags;
382 - const char *drv_name;
383 - unsigned int blocksize;
384 - unsigned int ivsize;
385 - unsigned int min_keysize;
386 - unsigned int max_keysize;
389 -static const struct qce_ablkcipher_def ablkcipher_def[] = {
391 - .flags = QCE_ALG_AES | QCE_MODE_ECB,
392 - .name = "ecb(aes)",
393 - .drv_name = "ecb-aes-qce",
394 - .blocksize = AES_BLOCK_SIZE,
395 - .ivsize = AES_BLOCK_SIZE,
396 - .min_keysize = AES_MIN_KEY_SIZE,
397 - .max_keysize = AES_MAX_KEY_SIZE,
400 - .flags = QCE_ALG_AES | QCE_MODE_CBC,
401 - .name = "cbc(aes)",
402 - .drv_name = "cbc-aes-qce",
403 - .blocksize = AES_BLOCK_SIZE,
404 - .ivsize = AES_BLOCK_SIZE,
405 - .min_keysize = AES_MIN_KEY_SIZE,
406 - .max_keysize = AES_MAX_KEY_SIZE,
409 - .flags = QCE_ALG_AES | QCE_MODE_CTR,
410 - .name = "ctr(aes)",
411 - .drv_name = "ctr-aes-qce",
412 - .blocksize = AES_BLOCK_SIZE,
413 - .ivsize = AES_BLOCK_SIZE,
414 - .min_keysize = AES_MIN_KEY_SIZE,
415 - .max_keysize = AES_MAX_KEY_SIZE,
418 - .flags = QCE_ALG_AES | QCE_MODE_XTS,
419 - .name = "xts(aes)",
420 - .drv_name = "xts-aes-qce",
421 - .blocksize = AES_BLOCK_SIZE,
422 - .ivsize = AES_BLOCK_SIZE,
423 - .min_keysize = AES_MIN_KEY_SIZE,
424 - .max_keysize = AES_MAX_KEY_SIZE,
427 - .flags = QCE_ALG_DES | QCE_MODE_ECB,
428 - .name = "ecb(des)",
429 - .drv_name = "ecb-des-qce",
430 - .blocksize = DES_BLOCK_SIZE,
432 - .min_keysize = DES_KEY_SIZE,
433 - .max_keysize = DES_KEY_SIZE,
436 - .flags = QCE_ALG_DES | QCE_MODE_CBC,
437 - .name = "cbc(des)",
438 - .drv_name = "cbc-des-qce",
439 - .blocksize = DES_BLOCK_SIZE,
440 - .ivsize = DES_BLOCK_SIZE,
441 - .min_keysize = DES_KEY_SIZE,
442 - .max_keysize = DES_KEY_SIZE,
445 - .flags = QCE_ALG_3DES | QCE_MODE_ECB,
446 - .name = "ecb(des3_ede)",
447 - .drv_name = "ecb-3des-qce",
448 - .blocksize = DES3_EDE_BLOCK_SIZE,
450 - .min_keysize = DES3_EDE_KEY_SIZE,
451 - .max_keysize = DES3_EDE_KEY_SIZE,
454 - .flags = QCE_ALG_3DES | QCE_MODE_CBC,
455 - .name = "cbc(des3_ede)",
456 - .drv_name = "cbc-3des-qce",
457 - .blocksize = DES3_EDE_BLOCK_SIZE,
458 - .ivsize = DES3_EDE_BLOCK_SIZE,
459 - .min_keysize = DES3_EDE_KEY_SIZE,
460 - .max_keysize = DES3_EDE_KEY_SIZE,
464 -static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
465 - struct qce_device *qce)
467 - struct qce_alg_template *tmpl;
468 - struct crypto_alg *alg;
471 - tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
475 - alg = &tmpl->alg.crypto;
477 - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
478 - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
481 - alg->cra_blocksize = def->blocksize;
482 - alg->cra_ablkcipher.ivsize = def->ivsize;
483 - alg->cra_ablkcipher.min_keysize = def->min_keysize;
484 - alg->cra_ablkcipher.max_keysize = def->max_keysize;
485 - alg->cra_ablkcipher.setkey = IS_3DES(def->flags) ? qce_des3_setkey :
486 - IS_DES(def->flags) ? qce_des_setkey :
487 - qce_ablkcipher_setkey;
488 - alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
489 - alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
491 - alg->cra_priority = 300;
492 - alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
493 - CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY;
494 - alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
495 - alg->cra_alignmask = 0;
496 - alg->cra_type = &crypto_ablkcipher_type;
497 - alg->cra_module = THIS_MODULE;
498 - alg->cra_init = qce_ablkcipher_init;
499 - alg->cra_exit = qce_ablkcipher_exit;
501 - INIT_LIST_HEAD(&tmpl->entry);
502 - tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
503 - tmpl->alg_flags = def->flags;
506 - ret = crypto_register_alg(alg);
509 - dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
513 - list_add_tail(&tmpl->entry, &ablkcipher_algs);
514 - dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
518 -static void qce_ablkcipher_unregister(struct qce_device *qce)
520 - struct qce_alg_template *tmpl, *n;
522 - list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
523 - crypto_unregister_alg(&tmpl->alg.crypto);
524 - list_del(&tmpl->entry);
529 -static int qce_ablkcipher_register(struct qce_device *qce)
533 - for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
534 - ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
541 - qce_ablkcipher_unregister(qce);
545 -const struct qce_algo_ops ablkcipher_ops = {
546 - .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
547 - .register_algs = qce_ablkcipher_register,
548 - .unregister_algs = qce_ablkcipher_unregister,
549 - .async_req_handle = qce_ablkcipher_async_req_handle,
552 +++ b/drivers/crypto/qce/skcipher.c
554 +// SPDX-License-Identifier: GPL-2.0-only
556 + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
559 +#include <linux/device.h>
560 +#include <linux/interrupt.h>
561 +#include <linux/types.h>
562 +#include <crypto/aes.h>
563 +#include <crypto/internal/des.h>
564 +#include <crypto/internal/skcipher.h>
568 +static LIST_HEAD(skcipher_algs);
570 +static void qce_skcipher_done(void *data)
572 + struct crypto_async_request *async_req = data;
573 + struct skcipher_request *req = skcipher_request_cast(async_req);
574 + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
575 + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
576 + struct qce_device *qce = tmpl->qce;
577 + enum dma_data_direction dir_src, dir_dst;
582 + diff_dst = (req->src != req->dst) ? true : false;
583 + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
584 + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
586 + error = qce_dma_terminate_all(&qce->dma);
588 + dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
592 + dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
593 + dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
595 + sg_free_table(&rctx->dst_tbl);
597 + error = qce_check_status(qce, &status);
599 + dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
601 + qce->async_req_done(tmpl->qce, error);
605 +qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
607 + struct skcipher_request *req = skcipher_request_cast(async_req);
608 + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
609 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
610 + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
611 + struct qce_device *qce = tmpl->qce;
612 + enum dma_data_direction dir_src, dir_dst;
613 + struct scatterlist *sg;
618 + rctx->iv = req->iv;
619 + rctx->ivsize = crypto_skcipher_ivsize(skcipher);
620 + rctx->cryptlen = req->cryptlen;
622 + diff_dst = (req->src != req->dst) ? true : false;
623 + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
624 + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
626 + rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
628 + rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
630 + rctx->dst_nents = rctx->src_nents;
631 + if (rctx->src_nents < 0) {
632 + dev_err(qce->dev, "Invalid numbers of src SG.\n");
633 + return rctx->src_nents;
635 + if (rctx->dst_nents < 0) {
636 + dev_err(qce->dev, "Invalid numbers of dst SG.\n");
637 + return -rctx->dst_nents;
640 + rctx->dst_nents += 1;
642 + gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
643 + GFP_KERNEL : GFP_ATOMIC;
645 + ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
649 + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
651 + sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
657 + sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
664 + rctx->dst_sg = rctx->dst_tbl.sgl;
666 + ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
671 + ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
673 + goto error_unmap_dst;
674 + rctx->src_sg = req->src;
676 + rctx->src_sg = rctx->dst_sg;
679 + ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
680 + rctx->dst_sg, rctx->dst_nents,
681 + qce_skcipher_done, async_req);
683 + goto error_unmap_src;
685 + qce_dma_issue_pending(&qce->dma);
687 + ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
689 + goto error_terminate;
694 + qce_dma_terminate_all(&qce->dma);
697 + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
699 + dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
701 + sg_free_table(&rctx->dst_tbl);
705 +static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
706 + unsigned int keylen)
708 + struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
709 + struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
712 + if (!key || !keylen)
716 + case AES_KEYSIZE_128:
717 + case AES_KEYSIZE_256:
723 + ctx->enc_keylen = keylen;
724 + memcpy(ctx->enc_key, key, keylen);
727 + ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
729 + ctx->enc_keylen = keylen;
733 +static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
734 + unsigned int keylen)
736 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
739 + err = verify_skcipher_des_key(ablk, key);
743 + ctx->enc_keylen = keylen;
744 + memcpy(ctx->enc_key, key, keylen);
748 +static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
749 + unsigned int keylen)
751 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
754 + err = verify_skcipher_des3_key(ablk, key);
758 + ctx->enc_keylen = keylen;
759 + memcpy(ctx->enc_key, key, keylen);
763 +static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
765 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
766 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
767 + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
768 + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
771 + rctx->flags = tmpl->alg_flags;
772 + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
774 + if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
775 + ctx->enc_keylen != AES_KEYSIZE_256) {
776 + SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
778 + skcipher_request_set_sync_tfm(subreq, ctx->fallback);
779 + skcipher_request_set_callback(subreq, req->base.flags,
781 + skcipher_request_set_crypt(subreq, req->src, req->dst,
782 + req->cryptlen, req->iv);
783 + ret = encrypt ? crypto_skcipher_encrypt(subreq) :
784 + crypto_skcipher_decrypt(subreq);
785 + skcipher_request_zero(subreq);
789 + return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
792 +static int qce_skcipher_encrypt(struct skcipher_request *req)
794 + return qce_skcipher_crypt(req, 1);
797 +static int qce_skcipher_decrypt(struct skcipher_request *req)
799 + return qce_skcipher_crypt(req, 0);
802 +static int qce_skcipher_init(struct crypto_skcipher *tfm)
804 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
806 + memset(ctx, 0, sizeof(*ctx));
807 + crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
809 + ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
810 + 0, CRYPTO_ALG_NEED_FALLBACK);
811 + return PTR_ERR_OR_ZERO(ctx->fallback);
814 +static void qce_skcipher_exit(struct crypto_skcipher *tfm)
816 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
818 + crypto_free_sync_skcipher(ctx->fallback);
821 +struct qce_skcipher_def {
822 + unsigned long flags;
824 + const char *drv_name;
825 + unsigned int blocksize;
826 + unsigned int ivsize;
827 + unsigned int min_keysize;
828 + unsigned int max_keysize;
831 +static const struct qce_skcipher_def skcipher_def[] = {
833 + .flags = QCE_ALG_AES | QCE_MODE_ECB,
834 + .name = "ecb(aes)",
835 + .drv_name = "ecb-aes-qce",
836 + .blocksize = AES_BLOCK_SIZE,
837 + .ivsize = AES_BLOCK_SIZE,
838 + .min_keysize = AES_MIN_KEY_SIZE,
839 + .max_keysize = AES_MAX_KEY_SIZE,
842 + .flags = QCE_ALG_AES | QCE_MODE_CBC,
843 + .name = "cbc(aes)",
844 + .drv_name = "cbc-aes-qce",
845 + .blocksize = AES_BLOCK_SIZE,
846 + .ivsize = AES_BLOCK_SIZE,
847 + .min_keysize = AES_MIN_KEY_SIZE,
848 + .max_keysize = AES_MAX_KEY_SIZE,
851 + .flags = QCE_ALG_AES | QCE_MODE_CTR,
852 + .name = "ctr(aes)",
853 + .drv_name = "ctr-aes-qce",
854 + .blocksize = AES_BLOCK_SIZE,
855 + .ivsize = AES_BLOCK_SIZE,
856 + .min_keysize = AES_MIN_KEY_SIZE,
857 + .max_keysize = AES_MAX_KEY_SIZE,
860 + .flags = QCE_ALG_AES | QCE_MODE_XTS,
861 + .name = "xts(aes)",
862 + .drv_name = "xts-aes-qce",
863 + .blocksize = AES_BLOCK_SIZE,
864 + .ivsize = AES_BLOCK_SIZE,
865 + .min_keysize = AES_MIN_KEY_SIZE,
866 + .max_keysize = AES_MAX_KEY_SIZE,
869 + .flags = QCE_ALG_DES | QCE_MODE_ECB,
870 + .name = "ecb(des)",
871 + .drv_name = "ecb-des-qce",
872 + .blocksize = DES_BLOCK_SIZE,
874 + .min_keysize = DES_KEY_SIZE,
875 + .max_keysize = DES_KEY_SIZE,
878 + .flags = QCE_ALG_DES | QCE_MODE_CBC,
879 + .name = "cbc(des)",
880 + .drv_name = "cbc-des-qce",
881 + .blocksize = DES_BLOCK_SIZE,
882 + .ivsize = DES_BLOCK_SIZE,
883 + .min_keysize = DES_KEY_SIZE,
884 + .max_keysize = DES_KEY_SIZE,
887 + .flags = QCE_ALG_3DES | QCE_MODE_ECB,
888 + .name = "ecb(des3_ede)",
889 + .drv_name = "ecb-3des-qce",
890 + .blocksize = DES3_EDE_BLOCK_SIZE,
892 + .min_keysize = DES3_EDE_KEY_SIZE,
893 + .max_keysize = DES3_EDE_KEY_SIZE,
896 + .flags = QCE_ALG_3DES | QCE_MODE_CBC,
897 + .name = "cbc(des3_ede)",
898 + .drv_name = "cbc-3des-qce",
899 + .blocksize = DES3_EDE_BLOCK_SIZE,
900 + .ivsize = DES3_EDE_BLOCK_SIZE,
901 + .min_keysize = DES3_EDE_KEY_SIZE,
902 + .max_keysize = DES3_EDE_KEY_SIZE,
906 +static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
907 + struct qce_device *qce)
909 + struct qce_alg_template *tmpl;
910 + struct skcipher_alg *alg;
913 + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
917 + alg = &tmpl->alg.skcipher;
919 + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
920 + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
923 + alg->base.cra_blocksize = def->blocksize;
924 + alg->ivsize = def->ivsize;
925 + alg->min_keysize = def->min_keysize;
926 + alg->max_keysize = def->max_keysize;
927 + alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
928 + IS_DES(def->flags) ? qce_des_setkey :
929 + qce_skcipher_setkey;
930 + alg->encrypt = qce_skcipher_encrypt;
931 + alg->decrypt = qce_skcipher_decrypt;
933 + alg->base.cra_priority = 300;
934 + alg->base.cra_flags = CRYPTO_ALG_ASYNC |
935 + CRYPTO_ALG_NEED_FALLBACK |
936 + CRYPTO_ALG_KERN_DRIVER_ONLY;
937 + alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
938 + alg->base.cra_alignmask = 0;
939 + alg->base.cra_module = THIS_MODULE;
941 + alg->init = qce_skcipher_init;
942 + alg->exit = qce_skcipher_exit;
944 + INIT_LIST_HEAD(&tmpl->entry);
945 + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
946 + tmpl->alg_flags = def->flags;
949 + ret = crypto_register_skcipher(alg);
952 + dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
956 + list_add_tail(&tmpl->entry, &skcipher_algs);
957 + dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
961 +static void qce_skcipher_unregister(struct qce_device *qce)
963 + struct qce_alg_template *tmpl, *n;
965 + list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
966 + crypto_unregister_skcipher(&tmpl->alg.skcipher);
967 + list_del(&tmpl->entry);
972 +static int qce_skcipher_register(struct qce_device *qce)
976 + for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
977 + ret = qce_skcipher_register_one(&skcipher_def[i], qce);
984 + qce_skcipher_unregister(qce);
988 +const struct qce_algo_ops skcipher_ops = {
989 + .type = CRYPTO_ALG_TYPE_SKCIPHER,
990 + .register_algs = qce_skcipher_register,
991 + .unregister_algs = qce_skcipher_unregister,
992 + .async_req_handle = qce_skcipher_async_req_handle,