1 From 62134842498927a0fcc19798a615340a7a6a9e62 Mon Sep 17 00:00:00 2001
2 From: Eneas U de Queiroz <cotequeiroz@gmail.com>
3 Date: Mon, 28 Oct 2019 15:17:19 -0300
4 Subject: [PATCH] crypto: qce - allow building only hashes/ciphers
6 Signed-off-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
8 --- a/drivers/crypto/Kconfig
9 +++ b/drivers/crypto/Kconfig
10 @@ -573,6 +573,14 @@ config CRYPTO_DEV_QCE
11 tristate "Qualcomm crypto engine accelerator"
12 depends on ARCH_QCOM || COMPILE_TEST
15 + This driver supports Qualcomm crypto engine accelerator
16 + hardware. To compile this driver as a module, choose M here. The
17 + module will be called qcrypto.
19 +config CRYPTO_DEV_QCE_SKCIPHER
21 + depends on CRYPTO_DEV_QCE
25 @@ -580,10 +588,57 @@ config CRYPTO_DEV_QCE
28 select CRYPTO_BLKCIPHER
30 +config CRYPTO_DEV_QCE_SHA
32 + depends on CRYPTO_DEV_QCE
35 + prompt "Algorithms enabled for QCE acceleration"
36 + default CRYPTO_DEV_QCE_ENABLE_ALL
37 + depends on CRYPTO_DEV_QCE
39 - This driver supports Qualcomm crypto engine accelerator
40 - hardware. To compile this driver as a module, choose M here. The
41 - module will be called qcrypto.
42 + This option allows to choose whether to build support for all algorihtms
43 + (default), hashes-only, or skciphers-only.
45 + The QCE engine does not appear to scale as well as the CPU to handle
46 + multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
47 + QCE handles only 2 requests in parallel.
49 + Ipsec throughput seems to improve when disabling either family of
50 + algorithms, sharing the load with the CPU. Enabling skciphers-only
51 + appears to work best.
53 + config CRYPTO_DEV_QCE_ENABLE_ALL
54 + bool "All supported algorithms"
55 + select CRYPTO_DEV_QCE_SKCIPHER
56 + select CRYPTO_DEV_QCE_SHA
58 + Enable all supported algorithms:
59 + - AES (CBC, CTR, ECB, XTS)
63 + - SHA256, HMAC-SHA256
65 + config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
66 + bool "Symmetric-key ciphers only"
67 + select CRYPTO_DEV_QCE_SKCIPHER
69 + Enable symmetric-key ciphers only:
70 + - AES (CBC, CTR, ECB, XTS)
74 + config CRYPTO_DEV_QCE_ENABLE_SHA
75 + bool "Hash/HMAC only"
76 + select CRYPTO_DEV_QCE_SHA
78 + Enable hashes/HMAC algorithms only:
80 + - SHA256, HMAC-SHA256
84 config CRYPTO_DEV_QCE_SW_MAX_LEN
85 int "Default maximum request size to use software for AES"
86 --- a/drivers/crypto/qce/Makefile
87 +++ b/drivers/crypto/qce/Makefile
89 obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
90 qcrypto-objs := core.o \
97 +qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
98 +qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
99 --- a/drivers/crypto/qce/common.c
100 +++ b/drivers/crypto/qce/common.c
101 @@ -51,52 +51,56 @@ qce_clear_array(struct qce_device *qce,
102 qce_write(qce, offset + i * sizeof(u32), 0);
105 -static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
106 +static u32 qce_config_reg(struct qce_device *qce, int little)
109 + u32 beats = (qce->burst_size >> 3) - 1;
110 + u32 pipe_pair = qce->pipe_pair_id;
113 - if (IS_AES(flags)) {
114 - if (aes_key_size == AES_KEYSIZE_128)
115 - cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
116 - else if (aes_key_size == AES_KEYSIZE_256)
117 - cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
119 + config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
120 + config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
121 + BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
122 + config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
123 + config &= ~HIGH_SPD_EN_N_SHIFT;
126 - cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
127 - else if (IS_DES(flags) || IS_3DES(flags))
128 - cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
130 + config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
133 - cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
137 - if (IS_3DES(flags))
138 - cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
139 +void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
145 - switch (flags & QCE_MODE_MASK) {
147 - cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
150 - cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
153 - cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
156 - cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
159 - cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
160 - cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
164 + n = len / sizeof(u32);
165 + for (; n > 0; n--) {
166 + *d = cpu_to_be32p((const __u32 *) s);
167 + s += sizeof(__u32);
173 +static void qce_setup_config(struct qce_device *qce)
177 + /* get big endianness */
178 + config = qce_config_reg(qce, 0);
181 + qce_write(qce, REG_STATUS, 0);
182 + qce_write(qce, REG_CONFIG, config);
185 +static inline void qce_crypto_go(struct qce_device *qce)
187 + qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
190 +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
191 static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
194 @@ -143,88 +147,6 @@ static u32 qce_auth_cfg(unsigned long fl
198 -static u32 qce_config_reg(struct qce_device *qce, int little)
200 - u32 beats = (qce->burst_size >> 3) - 1;
201 - u32 pipe_pair = qce->pipe_pair_id;
204 - config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
205 - config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
206 - BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
207 - config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
208 - config &= ~HIGH_SPD_EN_N_SHIFT;
211 - config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
216 -void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
222 - n = len / sizeof(u32);
223 - for (; n > 0; n--) {
224 - *d = cpu_to_be32p((const __u32 *) s);
225 - s += sizeof(__u32);
230 -static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
232 - u8 swap[QCE_AES_IV_LENGTH];
235 - if (ivsize > QCE_AES_IV_LENGTH)
238 - memset(swap, 0, QCE_AES_IV_LENGTH);
240 - for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
241 - i < QCE_AES_IV_LENGTH; i++, j--)
244 - qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
247 -static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
248 - unsigned int enckeylen, unsigned int cryptlen)
250 - u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
251 - unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
252 - unsigned int xtsdusize;
254 - qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
256 - qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
258 - /* xts du size 512B */
259 - xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
260 - qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
263 -static void qce_setup_config(struct qce_device *qce)
267 - /* get big endianness */
268 - config = qce_config_reg(qce, 0);
271 - qce_write(qce, REG_STATUS, 0);
272 - qce_write(qce, REG_CONFIG, config);
275 -static inline void qce_crypto_go(struct qce_device *qce)
277 - qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
280 static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
281 u32 totallen, u32 offset)
283 @@ -309,6 +231,87 @@ go_proc:
289 +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
290 +static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
294 + if (IS_AES(flags)) {
295 + if (aes_key_size == AES_KEYSIZE_128)
296 + cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
297 + else if (aes_key_size == AES_KEYSIZE_256)
298 + cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
302 + cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
303 + else if (IS_DES(flags) || IS_3DES(flags))
304 + cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
307 + cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
309 + if (IS_3DES(flags))
310 + cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
312 + switch (flags & QCE_MODE_MASK) {
314 + cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
317 + cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
320 + cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
323 + cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
326 + cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
327 + cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
336 +static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
338 + u8 swap[QCE_AES_IV_LENGTH];
341 + if (ivsize > QCE_AES_IV_LENGTH)
344 + memset(swap, 0, QCE_AES_IV_LENGTH);
346 + for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
347 + i < QCE_AES_IV_LENGTH; i++, j--)
350 + qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
353 +static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
354 + unsigned int enckeylen, unsigned int cryptlen)
356 + u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
357 + unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
358 + unsigned int xtsdusize;
360 + qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
362 + qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
364 + /* xts du size 512B */
365 + xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
366 + qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
369 static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
370 u32 totallen, u32 offset)
371 @@ -390,15 +393,20 @@ static int qce_setup_regs_skcipher(struc
377 int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
381 +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
382 case CRYPTO_ALG_TYPE_SKCIPHER:
383 return qce_setup_regs_skcipher(async_req, totallen, offset);
385 +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
386 case CRYPTO_ALG_TYPE_AHASH:
387 return qce_setup_regs_ahash(async_req, totallen, offset);
392 --- a/drivers/crypto/qce/core.c
393 +++ b/drivers/crypto/qce/core.c
395 #define QCE_QUEUE_LENGTH 1
397 static const struct qce_algo_ops *qce_ops[] = {
398 +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
401 +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
406 static void qce_unregister_algs(struct qce_device *qce)