1 From f441873642eebf20566c18d2966a8cd4b433ec1c Mon Sep 17 00:00:00 2001
2 From: Ard Biesheuvel <ardb@kernel.org>
3 Date: Tue, 5 Nov 2019 14:28:17 +0100
4 Subject: [PATCH] crypto: qce - switch to skcipher API
6 Commit 7a7ffe65c8c5 ("crypto: skcipher - Add top-level skcipher interface")
7 dated 20 august 2015 introduced the new skcipher API which is supposed to
8 replace both blkcipher and ablkcipher. While all consumers of the API have
9 been converted long ago, some producers of the ablkcipher remain, forcing
10 us to keep the ablkcipher support routines alive, along with the matching
11 code to expose [a]blkciphers via the skcipher API.
13 So switch this driver to the skcipher API, allowing us to finally drop the
14 blkcipher code in the near future.
16 Reviewed-by: Stanimir Varbanov <stanimir.varbanov@linaro.org>
17 Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
18 Backported-to-4.19-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
20 --- a/drivers/crypto/qce/Makefile
21 +++ b/drivers/crypto/qce/Makefile
22 @@ -4,4 +4,4 @@ qcrypto-objs := core.o \
28 --- a/drivers/crypto/qce/cipher.h
29 +++ b/drivers/crypto/qce/cipher.h
30 @@ -53,12 +53,12 @@ struct qce_cipher_reqctx {
31 unsigned int cryptlen;
34 -static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
35 +static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_skcipher *tfm)
37 - struct crypto_alg *alg = tfm->__crt_alg;
38 - return container_of(alg, struct qce_alg_template, alg.crypto);
39 + struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
40 + return container_of(alg, struct qce_alg_template, alg.skcipher);
43 -extern const struct qce_algo_ops ablkcipher_ops;
44 +extern const struct qce_algo_ops skcipher_ops;
46 #endif /* _CIPHER_H_ */
47 --- a/drivers/crypto/qce/common.c
48 +++ b/drivers/crypto/qce/common.c
49 @@ -312,13 +312,13 @@ go_proc:
53 -static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
54 +static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
55 u32 totallen, u32 offset)
57 - struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
58 - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
59 + struct skcipher_request *req = skcipher_request_cast(async_req);
60 + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
61 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
62 - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
63 + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
64 struct qce_device *qce = tmpl->qce;
65 __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
66 __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
67 @@ -397,8 +397,8 @@ int qce_start(struct crypto_async_reques
71 - case CRYPTO_ALG_TYPE_ABLKCIPHER:
72 - return qce_setup_regs_ablkcipher(async_req, totallen, offset);
73 + case CRYPTO_ALG_TYPE_SKCIPHER:
74 + return qce_setup_regs_skcipher(async_req, totallen, offset);
75 case CRYPTO_ALG_TYPE_AHASH:
76 return qce_setup_regs_ahash(async_req, totallen, offset);
78 --- a/drivers/crypto/qce/common.h
79 +++ b/drivers/crypto/qce/common.h
81 #include <linux/types.h>
82 #include <crypto/aes.h>
83 #include <crypto/hash.h>
84 +#include <crypto/internal/skcipher.h>
86 /* key size in bytes */
87 #define QCE_SHA_HMAC_KEY_SIZE 64
88 @@ -87,7 +88,7 @@ struct qce_alg_template {
89 unsigned long alg_flags;
92 - struct crypto_alg crypto;
93 + struct skcipher_alg skcipher;
94 struct ahash_alg ahash;
96 struct qce_device *qce;
97 --- a/drivers/crypto/qce/core.c
98 +++ b/drivers/crypto/qce/core.c
100 #define QCE_QUEUE_LENGTH 1
102 static const struct qce_algo_ops *qce_ops[] = {
108 --- a/drivers/crypto/qce/ablkcipher.c
112 - * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
114 - * This program is free software; you can redistribute it and/or modify
115 - * it under the terms of the GNU General Public License version 2 and
116 - * only version 2 as published by the Free Software Foundation.
118 - * This program is distributed in the hope that it will be useful,
119 - * but WITHOUT ANY WARRANTY; without even the implied warranty of
120 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
121 - * GNU General Public License for more details.
124 -#include <linux/device.h>
125 -#include <linux/interrupt.h>
126 -#include <linux/types.h>
127 -#include <crypto/aes.h>
128 -#include <crypto/des.h>
129 -#include <crypto/internal/skcipher.h>
133 -static LIST_HEAD(ablkcipher_algs);
135 -static void qce_ablkcipher_done(void *data)
137 - struct crypto_async_request *async_req = data;
138 - struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
139 - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
140 - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
141 - struct qce_device *qce = tmpl->qce;
142 - enum dma_data_direction dir_src, dir_dst;
147 - diff_dst = (req->src != req->dst) ? true : false;
148 - dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
149 - dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
151 - error = qce_dma_terminate_all(&qce->dma);
153 - dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
157 - dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
158 - dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
160 - sg_free_table(&rctx->dst_tbl);
162 - error = qce_check_status(qce, &status);
164 - dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
166 - qce->async_req_done(tmpl->qce, error);
170 -qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
172 - struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
173 - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
174 - struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
175 - struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
176 - struct qce_device *qce = tmpl->qce;
177 - enum dma_data_direction dir_src, dir_dst;
178 - struct scatterlist *sg;
183 - rctx->iv = req->info;
184 - rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
185 - rctx->cryptlen = req->nbytes;
187 - diff_dst = (req->src != req->dst) ? true : false;
188 - dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
189 - dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
191 - rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
193 - rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
195 - rctx->dst_nents = rctx->src_nents;
196 - if (rctx->src_nents < 0) {
197 - dev_err(qce->dev, "Invalid numbers of src SG.\n");
198 - return rctx->src_nents;
200 - if (rctx->dst_nents < 0) {
201 - dev_err(qce->dev, "Invalid numbers of dst SG.\n");
202 - return -rctx->dst_nents;
205 - rctx->dst_nents += 1;
207 - gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
208 - GFP_KERNEL : GFP_ATOMIC;
210 - ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
214 - sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
216 - sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
222 - sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
229 - rctx->dst_sg = rctx->dst_tbl.sgl;
231 - ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
236 - ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
238 - goto error_unmap_dst;
239 - rctx->src_sg = req->src;
241 - rctx->src_sg = rctx->dst_sg;
244 - ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
245 - rctx->dst_sg, rctx->dst_nents,
246 - qce_ablkcipher_done, async_req);
248 - goto error_unmap_src;
250 - qce_dma_issue_pending(&qce->dma);
252 - ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
254 - goto error_terminate;
259 - qce_dma_terminate_all(&qce->dma);
262 - dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
264 - dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
266 - sg_free_table(&rctx->dst_tbl);
270 -static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
271 - unsigned int keylen)
273 - struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
274 - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
275 - unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
278 - if (!key || !keylen)
281 - if (IS_AES(flags)) {
283 - case AES_KEYSIZE_128:
284 - case AES_KEYSIZE_256:
289 - } else if (IS_DES(flags)) {
290 - u32 tmp[DES_EXPKEY_WORDS];
292 - ret = des_ekey(tmp, key);
293 - if (!ret && crypto_ablkcipher_get_flags(ablk) &
294 - CRYPTO_TFM_REQ_WEAK_KEY)
298 - ctx->enc_keylen = keylen;
299 - memcpy(ctx->enc_key, key, keylen);
302 - ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
304 - ctx->enc_keylen = keylen;
307 - crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
311 -static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
313 - struct crypto_tfm *tfm =
314 - crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
315 - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
316 - struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
317 - struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
320 - rctx->flags = tmpl->alg_flags;
321 - rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
323 - if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
324 - ctx->enc_keylen != AES_KEYSIZE_256) {
325 - SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
327 - skcipher_request_set_tfm(subreq, ctx->fallback);
328 - skcipher_request_set_callback(subreq, req->base.flags,
330 - skcipher_request_set_crypt(subreq, req->src, req->dst,
331 - req->nbytes, req->info);
332 - ret = encrypt ? crypto_skcipher_encrypt(subreq) :
333 - crypto_skcipher_decrypt(subreq);
334 - skcipher_request_zero(subreq);
338 - return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
341 -static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
343 - return qce_ablkcipher_crypt(req, 1);
346 -static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
348 - return qce_ablkcipher_crypt(req, 0);
351 -static int qce_ablkcipher_init(struct crypto_tfm *tfm)
353 - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
355 - memset(ctx, 0, sizeof(*ctx));
356 - tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
358 - ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
360 - CRYPTO_ALG_NEED_FALLBACK);
361 - return PTR_ERR_OR_ZERO(ctx->fallback);
364 -static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
366 - struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
368 - crypto_free_skcipher(ctx->fallback);
371 -struct qce_ablkcipher_def {
372 - unsigned long flags;
374 - const char *drv_name;
375 - unsigned int blocksize;
376 - unsigned int ivsize;
377 - unsigned int min_keysize;
378 - unsigned int max_keysize;
381 -static const struct qce_ablkcipher_def ablkcipher_def[] = {
383 - .flags = QCE_ALG_AES | QCE_MODE_ECB,
384 - .name = "ecb(aes)",
385 - .drv_name = "ecb-aes-qce",
386 - .blocksize = AES_BLOCK_SIZE,
387 - .ivsize = AES_BLOCK_SIZE,
388 - .min_keysize = AES_MIN_KEY_SIZE,
389 - .max_keysize = AES_MAX_KEY_SIZE,
392 - .flags = QCE_ALG_AES | QCE_MODE_CBC,
393 - .name = "cbc(aes)",
394 - .drv_name = "cbc-aes-qce",
395 - .blocksize = AES_BLOCK_SIZE,
396 - .ivsize = AES_BLOCK_SIZE,
397 - .min_keysize = AES_MIN_KEY_SIZE,
398 - .max_keysize = AES_MAX_KEY_SIZE,
401 - .flags = QCE_ALG_AES | QCE_MODE_CTR,
402 - .name = "ctr(aes)",
403 - .drv_name = "ctr-aes-qce",
404 - .blocksize = AES_BLOCK_SIZE,
405 - .ivsize = AES_BLOCK_SIZE,
406 - .min_keysize = AES_MIN_KEY_SIZE,
407 - .max_keysize = AES_MAX_KEY_SIZE,
410 - .flags = QCE_ALG_AES | QCE_MODE_XTS,
411 - .name = "xts(aes)",
412 - .drv_name = "xts-aes-qce",
413 - .blocksize = AES_BLOCK_SIZE,
414 - .ivsize = AES_BLOCK_SIZE,
415 - .min_keysize = AES_MIN_KEY_SIZE,
416 - .max_keysize = AES_MAX_KEY_SIZE,
419 - .flags = QCE_ALG_DES | QCE_MODE_ECB,
420 - .name = "ecb(des)",
421 - .drv_name = "ecb-des-qce",
422 - .blocksize = DES_BLOCK_SIZE,
424 - .min_keysize = DES_KEY_SIZE,
425 - .max_keysize = DES_KEY_SIZE,
428 - .flags = QCE_ALG_DES | QCE_MODE_CBC,
429 - .name = "cbc(des)",
430 - .drv_name = "cbc-des-qce",
431 - .blocksize = DES_BLOCK_SIZE,
432 - .ivsize = DES_BLOCK_SIZE,
433 - .min_keysize = DES_KEY_SIZE,
434 - .max_keysize = DES_KEY_SIZE,
437 - .flags = QCE_ALG_3DES | QCE_MODE_ECB,
438 - .name = "ecb(des3_ede)",
439 - .drv_name = "ecb-3des-qce",
440 - .blocksize = DES3_EDE_BLOCK_SIZE,
442 - .min_keysize = DES3_EDE_KEY_SIZE,
443 - .max_keysize = DES3_EDE_KEY_SIZE,
446 - .flags = QCE_ALG_3DES | QCE_MODE_CBC,
447 - .name = "cbc(des3_ede)",
448 - .drv_name = "cbc-3des-qce",
449 - .blocksize = DES3_EDE_BLOCK_SIZE,
450 - .ivsize = DES3_EDE_BLOCK_SIZE,
451 - .min_keysize = DES3_EDE_KEY_SIZE,
452 - .max_keysize = DES3_EDE_KEY_SIZE,
456 -static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
457 - struct qce_device *qce)
459 - struct qce_alg_template *tmpl;
460 - struct crypto_alg *alg;
463 - tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
467 - alg = &tmpl->alg.crypto;
469 - snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
470 - snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
473 - alg->cra_blocksize = def->blocksize;
474 - alg->cra_ablkcipher.ivsize = def->ivsize;
475 - alg->cra_ablkcipher.min_keysize = def->min_keysize;
476 - alg->cra_ablkcipher.max_keysize = def->max_keysize;
477 - alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
478 - alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
479 - alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
481 - alg->cra_priority = 300;
482 - alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
483 - CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_KERN_DRIVER_ONLY;
484 - alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
485 - alg->cra_alignmask = 0;
486 - alg->cra_type = &crypto_ablkcipher_type;
487 - alg->cra_module = THIS_MODULE;
488 - alg->cra_init = qce_ablkcipher_init;
489 - alg->cra_exit = qce_ablkcipher_exit;
490 - INIT_LIST_HEAD(&alg->cra_list);
492 - INIT_LIST_HEAD(&tmpl->entry);
493 - tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
494 - tmpl->alg_flags = def->flags;
497 - ret = crypto_register_alg(alg);
500 - dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
504 - list_add_tail(&tmpl->entry, &ablkcipher_algs);
505 - dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
509 -static void qce_ablkcipher_unregister(struct qce_device *qce)
511 - struct qce_alg_template *tmpl, *n;
513 - list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
514 - crypto_unregister_alg(&tmpl->alg.crypto);
515 - list_del(&tmpl->entry);
520 -static int qce_ablkcipher_register(struct qce_device *qce)
524 - for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
525 - ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
532 - qce_ablkcipher_unregister(qce);
536 -const struct qce_algo_ops ablkcipher_ops = {
537 - .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
538 - .register_algs = qce_ablkcipher_register,
539 - .unregister_algs = qce_ablkcipher_unregister,
540 - .async_req_handle = qce_ablkcipher_async_req_handle,
543 +++ b/drivers/crypto/qce/skcipher.c
546 + * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
548 + * This program is free software; you can redistribute it and/or modify
549 + * it under the terms of the GNU General Public License version 2 and
550 + * only version 2 as published by the Free Software Foundation.
552 + * This program is distributed in the hope that it will be useful,
553 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
554 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
555 + * GNU General Public License for more details.
558 +#include <linux/device.h>
559 +#include <linux/interrupt.h>
560 +#include <linux/types.h>
561 +#include <crypto/aes.h>
562 +#include <crypto/des.h>
563 +#include <crypto/internal/skcipher.h>
567 +static LIST_HEAD(skcipher_algs);
569 +static void qce_skcipher_done(void *data)
571 + struct crypto_async_request *async_req = data;
572 + struct skcipher_request *req = skcipher_request_cast(async_req);
573 + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
574 + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
575 + struct qce_device *qce = tmpl->qce;
576 + enum dma_data_direction dir_src, dir_dst;
581 + diff_dst = (req->src != req->dst) ? true : false;
582 + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
583 + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
585 + error = qce_dma_terminate_all(&qce->dma);
587 + dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
591 + dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
592 + dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
594 + sg_free_table(&rctx->dst_tbl);
596 + error = qce_check_status(qce, &status);
598 + dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
600 + qce->async_req_done(tmpl->qce, error);
604 +qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
606 + struct skcipher_request *req = skcipher_request_cast(async_req);
607 + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
608 + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
609 + struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
610 + struct qce_device *qce = tmpl->qce;
611 + enum dma_data_direction dir_src, dir_dst;
612 + struct scatterlist *sg;
617 + rctx->iv = req->iv;
618 + rctx->ivsize = crypto_skcipher_ivsize(skcipher);
619 + rctx->cryptlen = req->cryptlen;
621 + diff_dst = (req->src != req->dst) ? true : false;
622 + dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
623 + dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
625 + rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
627 + rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
629 + rctx->dst_nents = rctx->src_nents;
630 + if (rctx->src_nents < 0) {
631 + dev_err(qce->dev, "Invalid numbers of src SG.\n");
632 + return rctx->src_nents;
634 + if (rctx->dst_nents < 0) {
635 + dev_err(qce->dev, "Invalid numbers of dst SG.\n");
636 + return -rctx->dst_nents;
639 + rctx->dst_nents += 1;
641 + gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
642 + GFP_KERNEL : GFP_ATOMIC;
644 + ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
648 + sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
650 + sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
656 + sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
663 + rctx->dst_sg = rctx->dst_tbl.sgl;
665 + ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
670 + ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
672 + goto error_unmap_dst;
673 + rctx->src_sg = req->src;
675 + rctx->src_sg = rctx->dst_sg;
678 + ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
679 + rctx->dst_sg, rctx->dst_nents,
680 + qce_skcipher_done, async_req);
682 + goto error_unmap_src;
684 + qce_dma_issue_pending(&qce->dma);
686 + ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
688 + goto error_terminate;
693 + qce_dma_terminate_all(&qce->dma);
696 + dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
698 + dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
700 + sg_free_table(&rctx->dst_tbl);
704 +static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
705 + unsigned int keylen)
707 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
708 + unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
711 + if (!key || !keylen)
714 + if (IS_AES(flags)) {
716 + case AES_KEYSIZE_128:
717 + case AES_KEYSIZE_256:
722 + } else if (IS_DES(flags)) {
723 + u32 tmp[DES_EXPKEY_WORDS];
725 + ret = des_ekey(tmp, key);
726 + if (!ret && crypto_skcipher_get_flags(ablk) &
727 + CRYPTO_TFM_REQ_WEAK_KEY)
731 + ctx->enc_keylen = keylen;
732 + memcpy(ctx->enc_key, key, keylen);
735 + ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
737 + ctx->enc_keylen = keylen;
740 + crypto_skcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
744 +static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
746 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
747 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
748 + struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
749 + struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
752 + rctx->flags = tmpl->alg_flags;
753 + rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
755 + if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
756 + ctx->enc_keylen != AES_KEYSIZE_256) {
757 + SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
759 + skcipher_request_set_tfm(subreq, ctx->fallback);
760 + skcipher_request_set_callback(subreq, req->base.flags,
762 + skcipher_request_set_crypt(subreq, req->src, req->dst,
763 + req->cryptlen, req->iv);
764 + ret = encrypt ? crypto_skcipher_encrypt(subreq) :
765 + crypto_skcipher_decrypt(subreq);
766 + skcipher_request_zero(subreq);
770 + return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
773 +static int qce_skcipher_encrypt(struct skcipher_request *req)
775 + return qce_skcipher_crypt(req, 1);
778 +static int qce_skcipher_decrypt(struct skcipher_request *req)
780 + return qce_skcipher_crypt(req, 0);
783 +static int qce_skcipher_init(struct crypto_skcipher *tfm)
785 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
787 + memset(ctx, 0, sizeof(*ctx));
788 + crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx));
790 + ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
791 + 0, CRYPTO_ALG_ASYNC |
792 + CRYPTO_ALG_NEED_FALLBACK);
793 + return PTR_ERR_OR_ZERO(ctx->fallback);
796 +static void qce_skcipher_exit(struct crypto_skcipher *tfm)
798 + struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
800 + crypto_free_skcipher(ctx->fallback);
803 +struct qce_skcipher_def {
804 + unsigned long flags;
806 + const char *drv_name;
807 + unsigned int blocksize;
808 + unsigned int ivsize;
809 + unsigned int min_keysize;
810 + unsigned int max_keysize;
813 +static const struct qce_skcipher_def skcipher_def[] = {
815 + .flags = QCE_ALG_AES | QCE_MODE_ECB,
816 + .name = "ecb(aes)",
817 + .drv_name = "ecb-aes-qce",
818 + .blocksize = AES_BLOCK_SIZE,
819 + .ivsize = AES_BLOCK_SIZE,
820 + .min_keysize = AES_MIN_KEY_SIZE,
821 + .max_keysize = AES_MAX_KEY_SIZE,
824 + .flags = QCE_ALG_AES | QCE_MODE_CBC,
825 + .name = "cbc(aes)",
826 + .drv_name = "cbc-aes-qce",
827 + .blocksize = AES_BLOCK_SIZE,
828 + .ivsize = AES_BLOCK_SIZE,
829 + .min_keysize = AES_MIN_KEY_SIZE,
830 + .max_keysize = AES_MAX_KEY_SIZE,
833 + .flags = QCE_ALG_AES | QCE_MODE_CTR,
834 + .name = "ctr(aes)",
835 + .drv_name = "ctr-aes-qce",
836 + .blocksize = AES_BLOCK_SIZE,
837 + .ivsize = AES_BLOCK_SIZE,
838 + .min_keysize = AES_MIN_KEY_SIZE,
839 + .max_keysize = AES_MAX_KEY_SIZE,
842 + .flags = QCE_ALG_AES | QCE_MODE_XTS,
843 + .name = "xts(aes)",
844 + .drv_name = "xts-aes-qce",
845 + .blocksize = AES_BLOCK_SIZE,
846 + .ivsize = AES_BLOCK_SIZE,
847 + .min_keysize = AES_MIN_KEY_SIZE,
848 + .max_keysize = AES_MAX_KEY_SIZE,
851 + .flags = QCE_ALG_DES | QCE_MODE_ECB,
852 + .name = "ecb(des)",
853 + .drv_name = "ecb-des-qce",
854 + .blocksize = DES_BLOCK_SIZE,
856 + .min_keysize = DES_KEY_SIZE,
857 + .max_keysize = DES_KEY_SIZE,
860 + .flags = QCE_ALG_DES | QCE_MODE_CBC,
861 + .name = "cbc(des)",
862 + .drv_name = "cbc-des-qce",
863 + .blocksize = DES_BLOCK_SIZE,
864 + .ivsize = DES_BLOCK_SIZE,
865 + .min_keysize = DES_KEY_SIZE,
866 + .max_keysize = DES_KEY_SIZE,
869 + .flags = QCE_ALG_3DES | QCE_MODE_ECB,
870 + .name = "ecb(des3_ede)",
871 + .drv_name = "ecb-3des-qce",
872 + .blocksize = DES3_EDE_BLOCK_SIZE,
874 + .min_keysize = DES3_EDE_KEY_SIZE,
875 + .max_keysize = DES3_EDE_KEY_SIZE,
878 + .flags = QCE_ALG_3DES | QCE_MODE_CBC,
879 + .name = "cbc(des3_ede)",
880 + .drv_name = "cbc-3des-qce",
881 + .blocksize = DES3_EDE_BLOCK_SIZE,
882 + .ivsize = DES3_EDE_BLOCK_SIZE,
883 + .min_keysize = DES3_EDE_KEY_SIZE,
884 + .max_keysize = DES3_EDE_KEY_SIZE,
888 +static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
889 + struct qce_device *qce)
891 + struct qce_alg_template *tmpl;
892 + struct skcipher_alg *alg;
895 + tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
899 + alg = &tmpl->alg.skcipher;
901 + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
902 + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
905 + alg->base.cra_blocksize = def->blocksize;
906 + alg->ivsize = def->ivsize;
907 + alg->min_keysize = def->min_keysize;
908 + alg->max_keysize = def->max_keysize;
909 + alg->setkey = qce_skcipher_setkey;
910 + alg->encrypt = qce_skcipher_encrypt;
911 + alg->decrypt = qce_skcipher_decrypt;
913 + alg->base.cra_priority = 300;
914 + alg->base.cra_flags = CRYPTO_ALG_ASYNC |
915 + CRYPTO_ALG_NEED_FALLBACK |
916 + CRYPTO_ALG_KERN_DRIVER_ONLY;
917 + alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
918 + alg->base.cra_alignmask = 0;
919 + alg->base.cra_module = THIS_MODULE;
921 + alg->init = qce_skcipher_init;
922 + alg->exit = qce_skcipher_exit;
924 + INIT_LIST_HEAD(&tmpl->entry);
925 + tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
926 + tmpl->alg_flags = def->flags;
929 + ret = crypto_register_skcipher(alg);
932 + dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
936 + list_add_tail(&tmpl->entry, &skcipher_algs);
937 + dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
941 +static void qce_skcipher_unregister(struct qce_device *qce)
943 + struct qce_alg_template *tmpl, *n;
945 + list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
946 + crypto_unregister_skcipher(&tmpl->alg.skcipher);
947 + list_del(&tmpl->entry);
952 +static int qce_skcipher_register(struct qce_device *qce)
956 + for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
957 + ret = qce_skcipher_register_one(&skcipher_def[i], qce);
964 + qce_skcipher_unregister(qce);
968 +const struct qce_algo_ops skcipher_ops = {
969 + .type = CRYPTO_ALG_TYPE_SKCIPHER,
970 + .register_algs = qce_skcipher_register,
971 + .unregister_algs = qce_skcipher_unregister,
972 + .async_req_handle = qce_skcipher_async_req_handle,