ipq40xx: qce - add fixes for AES ciphers
[openwrt/staging/jogo.git] / target / linux / ipq40xx / patches-4.19 / 051-crypto-qce-allow-building-only-hashes-ciphers.patch
1 From 62134842498927a0fcc19798a615340a7a6a9e62 Mon Sep 17 00:00:00 2001
2 From: Eneas U de Queiroz <cotequeiroz@gmail.com>
3 Date: Mon, 28 Oct 2019 15:17:19 -0300
4 Subject: [PATCH] crypto: qce - allow building only hashes/ciphers
5
6 Signed-off-by: Eneas U de Queiroz <cotequeiroz@gmail.com>
7
8 --- a/drivers/crypto/Kconfig
9 +++ b/drivers/crypto/Kconfig
10 @@ -573,6 +573,14 @@ config CRYPTO_DEV_QCE
11 tristate "Qualcomm crypto engine accelerator"
12 depends on ARCH_QCOM || COMPILE_TEST
13 depends on HAS_IOMEM
14 + help
15 + This driver supports Qualcomm crypto engine accelerator
16 + hardware. To compile this driver as a module, choose M here. The
17 + module will be called qcrypto.
18 +
19 +config CRYPTO_DEV_QCE_SKCIPHER
20 + bool
21 + depends on CRYPTO_DEV_QCE
22 select CRYPTO_AES
23 select CRYPTO_DES
24 select CRYPTO_ECB
25 @@ -580,10 +588,57 @@ config CRYPTO_DEV_QCE
26 select CRYPTO_XTS
27 select CRYPTO_CTR
28 select CRYPTO_BLKCIPHER
29 +
30 +config CRYPTO_DEV_QCE_SHA
31 + bool
32 + depends on CRYPTO_DEV_QCE
33 +
34 +choice
35 + prompt "Algorithms enabled for QCE acceleration"
36 + default CRYPTO_DEV_QCE_ENABLE_ALL
37 + depends on CRYPTO_DEV_QCE
38 help
39 - This driver supports Qualcomm crypto engine accelerator
40 - hardware. To compile this driver as a module, choose M here. The
41 - module will be called qcrypto.
42 + This option allows to choose whether to build support for all algorihtms
43 + (default), hashes-only, or skciphers-only.
44 +
45 + The QCE engine does not appear to scale as well as the CPU to handle
46 + multiple crypto requests. While the ipq40xx chips have 4-core CPUs, the
47 + QCE handles only 2 requests in parallel.
48 +
49 + Ipsec throughput seems to improve when disabling either family of
50 + algorithms, sharing the load with the CPU. Enabling skciphers-only
51 + appears to work best.
52 +
53 + config CRYPTO_DEV_QCE_ENABLE_ALL
54 + bool "All supported algorithms"
55 + select CRYPTO_DEV_QCE_SKCIPHER
56 + select CRYPTO_DEV_QCE_SHA
57 + help
58 + Enable all supported algorithms:
59 + - AES (CBC, CTR, ECB, XTS)
60 + - 3DES (CBC, ECB)
61 + - DES (CBC, ECB)
62 + - SHA1, HMAC-SHA1
63 + - SHA256, HMAC-SHA256
64 +
65 + config CRYPTO_DEV_QCE_ENABLE_SKCIPHER
66 + bool "Symmetric-key ciphers only"
67 + select CRYPTO_DEV_QCE_SKCIPHER
68 + help
69 + Enable symmetric-key ciphers only:
70 + - AES (CBC, CTR, ECB, XTS)
71 + - 3DES (ECB, CBC)
72 + - DES (ECB, CBC)
73 +
74 + config CRYPTO_DEV_QCE_ENABLE_SHA
75 + bool "Hash/HMAC only"
76 + select CRYPTO_DEV_QCE_SHA
77 + help
78 + Enable hashes/HMAC algorithms only:
79 + - SHA1, HMAC-SHA1
80 + - SHA256, HMAC-SHA256
81 +
82 +endchoice
83
84 config CRYPTO_DEV_QCE_SW_MAX_LEN
85 int "Default maximum request size to use software for AES"
86 --- a/drivers/crypto/qce/Makefile
87 +++ b/drivers/crypto/qce/Makefile
88 @@ -2,6 +2,7 @@
89 obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
90 qcrypto-objs := core.o \
91 common.o \
92 - dma.o \
93 - sha.o \
94 - skcipher.o
95 + dma.o
96 +
97 +qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SHA) += sha.o
98 +qcrypto-$(CONFIG_CRYPTO_DEV_QCE_SKCIPHER) += skcipher.o
99 --- a/drivers/crypto/qce/common.c
100 +++ b/drivers/crypto/qce/common.c
101 @@ -51,52 +51,56 @@ qce_clear_array(struct qce_device *qce,
102 qce_write(qce, offset + i * sizeof(u32), 0);
103 }
104
105 -static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
106 +static u32 qce_config_reg(struct qce_device *qce, int little)
107 {
108 - u32 cfg = 0;
109 + u32 beats = (qce->burst_size >> 3) - 1;
110 + u32 pipe_pair = qce->pipe_pair_id;
111 + u32 config;
112
113 - if (IS_AES(flags)) {
114 - if (aes_key_size == AES_KEYSIZE_128)
115 - cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
116 - else if (aes_key_size == AES_KEYSIZE_256)
117 - cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
118 - }
119 + config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
120 + config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
121 + BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
122 + config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
123 + config &= ~HIGH_SPD_EN_N_SHIFT;
124
125 - if (IS_AES(flags))
126 - cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
127 - else if (IS_DES(flags) || IS_3DES(flags))
128 - cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
129 + if (little)
130 + config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
131
132 - if (IS_DES(flags))
133 - cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
134 + return config;
135 +}
136
137 - if (IS_3DES(flags))
138 - cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
139 +void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
140 +{
141 + __be32 *d = dst;
142 + const u8 *s = src;
143 + unsigned int n;
144
145 - switch (flags & QCE_MODE_MASK) {
146 - case QCE_MODE_ECB:
147 - cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
148 - break;
149 - case QCE_MODE_CBC:
150 - cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
151 - break;
152 - case QCE_MODE_CTR:
153 - cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
154 - break;
155 - case QCE_MODE_XTS:
156 - cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
157 - break;
158 - case QCE_MODE_CCM:
159 - cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
160 - cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
161 - break;
162 - default:
163 - return ~0;
164 + n = len / sizeof(u32);
165 + for (; n > 0; n--) {
166 + *d = cpu_to_be32p((const __u32 *) s);
167 + s += sizeof(__u32);
168 + d++;
169 }
170 +}
171
172 - return cfg;
173 +static void qce_setup_config(struct qce_device *qce)
174 +{
175 + u32 config;
176 +
177 + /* get big endianness */
178 + config = qce_config_reg(qce, 0);
179 +
180 + /* clear status */
181 + qce_write(qce, REG_STATUS, 0);
182 + qce_write(qce, REG_CONFIG, config);
183 +}
184 +
185 +static inline void qce_crypto_go(struct qce_device *qce)
186 +{
187 + qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
188 }
189
190 +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
191 static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
192 {
193 u32 cfg = 0;
194 @@ -143,88 +147,6 @@ static u32 qce_auth_cfg(unsigned long fl
195 return cfg;
196 }
197
198 -static u32 qce_config_reg(struct qce_device *qce, int little)
199 -{
200 - u32 beats = (qce->burst_size >> 3) - 1;
201 - u32 pipe_pair = qce->pipe_pair_id;
202 - u32 config;
203 -
204 - config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
205 - config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
206 - BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
207 - config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
208 - config &= ~HIGH_SPD_EN_N_SHIFT;
209 -
210 - if (little)
211 - config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
212 -
213 - return config;
214 -}
215 -
216 -void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
217 -{
218 - __be32 *d = dst;
219 - const u8 *s = src;
220 - unsigned int n;
221 -
222 - n = len / sizeof(u32);
223 - for (; n > 0; n--) {
224 - *d = cpu_to_be32p((const __u32 *) s);
225 - s += sizeof(__u32);
226 - d++;
227 - }
228 -}
229 -
230 -static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
231 -{
232 - u8 swap[QCE_AES_IV_LENGTH];
233 - u32 i, j;
234 -
235 - if (ivsize > QCE_AES_IV_LENGTH)
236 - return;
237 -
238 - memset(swap, 0, QCE_AES_IV_LENGTH);
239 -
240 - for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
241 - i < QCE_AES_IV_LENGTH; i++, j--)
242 - swap[i] = src[j];
243 -
244 - qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
245 -}
246 -
247 -static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
248 - unsigned int enckeylen, unsigned int cryptlen)
249 -{
250 - u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
251 - unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
252 - unsigned int xtsdusize;
253 -
254 - qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
255 - enckeylen / 2);
256 - qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
257 -
258 - /* xts du size 512B */
259 - xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
260 - qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
261 -}
262 -
263 -static void qce_setup_config(struct qce_device *qce)
264 -{
265 - u32 config;
266 -
267 - /* get big endianness */
268 - config = qce_config_reg(qce, 0);
269 -
270 - /* clear status */
271 - qce_write(qce, REG_STATUS, 0);
272 - qce_write(qce, REG_CONFIG, config);
273 -}
274 -
275 -static inline void qce_crypto_go(struct qce_device *qce)
276 -{
277 - qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
278 -}
279 -
280 static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
281 u32 totallen, u32 offset)
282 {
283 @@ -309,6 +231,87 @@ go_proc:
284
285 return 0;
286 }
287 +#endif
288 +
289 +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
290 +static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
291 +{
292 + u32 cfg = 0;
293 +
294 + if (IS_AES(flags)) {
295 + if (aes_key_size == AES_KEYSIZE_128)
296 + cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
297 + else if (aes_key_size == AES_KEYSIZE_256)
298 + cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
299 + }
300 +
301 + if (IS_AES(flags))
302 + cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
303 + else if (IS_DES(flags) || IS_3DES(flags))
304 + cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
305 +
306 + if (IS_DES(flags))
307 + cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
308 +
309 + if (IS_3DES(flags))
310 + cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
311 +
312 + switch (flags & QCE_MODE_MASK) {
313 + case QCE_MODE_ECB:
314 + cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
315 + break;
316 + case QCE_MODE_CBC:
317 + cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
318 + break;
319 + case QCE_MODE_CTR:
320 + cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
321 + break;
322 + case QCE_MODE_XTS:
323 + cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
324 + break;
325 + case QCE_MODE_CCM:
326 + cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
327 + cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
328 + break;
329 + default:
330 + return ~0;
331 + }
332 +
333 + return cfg;
334 +}
335 +
336 +static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
337 +{
338 + u8 swap[QCE_AES_IV_LENGTH];
339 + u32 i, j;
340 +
341 + if (ivsize > QCE_AES_IV_LENGTH)
342 + return;
343 +
344 + memset(swap, 0, QCE_AES_IV_LENGTH);
345 +
346 + for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
347 + i < QCE_AES_IV_LENGTH; i++, j--)
348 + swap[i] = src[j];
349 +
350 + qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
351 +}
352 +
353 +static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
354 + unsigned int enckeylen, unsigned int cryptlen)
355 +{
356 + u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
357 + unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
358 + unsigned int xtsdusize;
359 +
360 + qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
361 + enckeylen / 2);
362 + qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
363 +
364 + /* xts du size 512B */
365 + xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
366 + qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
367 +}
368
369 static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
370 u32 totallen, u32 offset)
371 @@ -390,15 +393,20 @@ static int qce_setup_regs_skcipher(struc
372
373 return 0;
374 }
375 +#endif
376
377 int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
378 u32 offset)
379 {
380 switch (type) {
381 +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
382 case CRYPTO_ALG_TYPE_SKCIPHER:
383 return qce_setup_regs_skcipher(async_req, totallen, offset);
384 +#endif
385 +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
386 case CRYPTO_ALG_TYPE_AHASH:
387 return qce_setup_regs_ahash(async_req, totallen, offset);
388 +#endif
389 default:
390 return -EINVAL;
391 }
392 --- a/drivers/crypto/qce/core.c
393 +++ b/drivers/crypto/qce/core.c
394 @@ -30,8 +30,12 @@
395 #define QCE_QUEUE_LENGTH 1
396
397 static const struct qce_algo_ops *qce_ops[] = {
398 +#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
399 &skcipher_ops,
400 +#endif
401 +#ifdef CONFIG_CRYPTO_DEV_QCE_SHA
402 &ahash_ops,
403 +#endif
404 };
405
406 static void qce_unregister_algs(struct qce_device *qce)