[ubicom32]: move new files out from platform support patch
[openwrt/svn-archive/archive.git] / target / linux / ubicom32 / files / arch / ubicom32 / crypto / aes_ubicom32.c
1 /*
2 * arch/ubicom32/crypto/aes_ubicom32.c
3 * Ubicom32 implementation of the AES Cipher Algorithm.
4 *
5 * (C) Copyright 2009, Ubicom, Inc.
6 *
7 * This file is part of the Ubicom32 Linux Kernel Port.
8 *
9 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10 * it and/or modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with the Ubicom32 Linux Kernel Port. If not,
21 * see <http://www.gnu.org/licenses/>.
22 *
23 * Ubicom32 implementation derived from (with many thanks):
24 * arch/m68knommu
25 * arch/blackfin
26 * arch/parisc
27 */
28 #include <crypto/aes.h>
29 #include <crypto/algapi.h>
30 #include <linux/err.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include "crypto_ubicom32.h"
35 #include <asm/linkage.h>
36
37 struct ubicom32_aes_ctx {
38 u8 key[AES_MAX_KEY_SIZE];
39 u32 ctrl;
40 int key_len;
41 };
42
43 static inline void aes_hw_set_key(const u8 *key, u8 key_len)
44 {
45 /*
46 * switch case has more overhead than 4 move.4 instructions, so just copy 256 bits
47 */
48 SEC_SET_KEY_256(key);
49 }
50
51 static inline void aes_hw_set_iv(const u8 *iv)
52 {
53 SEC_SET_IV_4W(iv);
54 }
55
56 static inline void aes_hw_cipher(u8 *out, const u8 *in)
57 {
58 SEC_SET_INPUT_4W(in);
59
60 asm volatile (
61 " ; start AES by writing 0x40(SECURITY_BASE) \n\t"
62 " move.4 0x40(%0), #0x01 \n\t"
63 " pipe_flush 0 \n\t"
64 " \n\t"
65 " ; wait for the module to calculate the output \n\t"
66 " btst 0x04(%0), #0 \n\t"
67 " jmpne.f .-4 \n\t"
68 :
69 : "a" (SEC_BASE)
70 : "cc"
71 );
72
73 SEC_GET_OUTPUT_4W(out);
74 }
75
76 static int __ocm_text aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
77 unsigned int key_len)
78 {
79 struct ubicom32_aes_ctx *uctx = crypto_tfm_ctx(tfm);
80
81 uctx->key_len = key_len;
82 memcpy(uctx->key, in_key, key_len);
83
84 /*
85 * leave out HASH_ALG (none = 0), CBC (no = 0), DIR (unknown) yet
86 */
87 switch (uctx->key_len) {
88 case 16:
89 uctx->ctrl = SEC_KEY_128_BITS | SEC_ALG_AES;
90 break;
91 case 24:
92 uctx->ctrl = SEC_KEY_192_BITS | SEC_ALG_AES;
93 break;
94 case 32:
95 uctx->ctrl = SEC_KEY_256_BITS | SEC_ALG_AES;
96 break;
97 }
98
99 return 0;
100 }
101
102 static inline void aes_cipher(struct crypto_tfm *tfm, u8 *out, const u8 *in, u32 extra_flags)
103 {
104 const struct ubicom32_aes_ctx *uctx = crypto_tfm_ctx(tfm);
105
106 hw_crypto_lock();
107 hw_crypto_check();
108 hw_crypto_set_ctrl(uctx->ctrl | extra_flags);
109
110 aes_hw_set_key(uctx->key, uctx->key_len);
111 aes_hw_cipher(out, in);
112
113 hw_crypto_unlock();
114 }
115
116 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
117 {
118 aes_cipher(tfm, out, in, SEC_DIR_ENCRYPT);
119 }
120
121 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
122 {
123 aes_cipher(tfm, out, in, SEC_DIR_DECRYPT);
124 }
125
126 static struct crypto_alg aes_alg = {
127 .cra_name = "aes",
128 .cra_driver_name = "aes-ubicom32",
129 .cra_priority = CRYPTO_UBICOM32_PRIORITY,
130 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
131 .cra_blocksize = AES_BLOCK_SIZE,
132 .cra_ctxsize = sizeof(struct ubicom32_aes_ctx),
133 .cra_alignmask = CRYPTO_UBICOM32_ALIGNMENT - 1,
134 .cra_module = THIS_MODULE,
135 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
136 .cra_u = {
137 .cipher = {
138 .cia_min_keysize = AES_MIN_KEY_SIZE,
139 .cia_max_keysize = AES_MAX_KEY_SIZE,
140 .cia_setkey = aes_set_key,
141 .cia_encrypt = aes_encrypt,
142 .cia_decrypt = aes_decrypt,
143 }
144 }
145 };
146
147 static void __ocm_text ecb_aes_crypt_loop(u8 *out, u8 *in, unsigned int n)
148 {
149 while (likely(n)) {
150 aes_hw_cipher(out, in);
151 out += AES_BLOCK_SIZE;
152 in += AES_BLOCK_SIZE;
153 n -= AES_BLOCK_SIZE;
154 }
155 }
156
157 static int __ocm_text ecb_aes_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
158 struct scatterlist *src, unsigned int nbytes, u32 extra_flags)
159 {
160 const struct ubicom32_aes_ctx *uctx = crypto_blkcipher_ctx(desc->tfm);
161 int ret;
162
163 struct blkcipher_walk walk;
164 blkcipher_walk_init(&walk, dst, src, nbytes);
165 ret = blkcipher_walk_virt(desc, &walk);
166 if (ret) {
167 return ret;
168 }
169
170 hw_crypto_lock();
171 hw_crypto_check();
172
173 hw_crypto_set_ctrl(uctx->ctrl | extra_flags);
174 aes_hw_set_key(uctx->key, uctx->key_len);
175
176 while (likely((nbytes = walk.nbytes))) {
177 /* only use complete blocks */
178 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
179 u8 *out = walk.dst.virt.addr;
180 u8 *in = walk.src.virt.addr;
181
182 /* finish n/16 blocks */
183 ecb_aes_crypt_loop(out, in, n);
184
185 nbytes &= AES_BLOCK_SIZE - 1;
186 ret = blkcipher_walk_done(desc, &walk, nbytes);
187 }
188
189 hw_crypto_unlock();
190 return ret;
191 }
192
193 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
194 struct scatterlist *dst, struct scatterlist *src,
195 unsigned int nbytes)
196 {
197 return ecb_aes_crypt(desc, dst, src, nbytes, SEC_DIR_ENCRYPT);
198 }
199
200 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
201 struct scatterlist *dst, struct scatterlist *src,
202 unsigned int nbytes)
203 {
204 return ecb_aes_crypt(desc, dst, src, nbytes, SEC_DIR_DECRYPT);
205 }
206
207 static struct crypto_alg ecb_aes_alg = {
208 .cra_name = "ecb(aes)",
209 .cra_driver_name = "ecb-aes-ubicom32",
210 .cra_priority = CRYPTO_UBICOM32_COMPOSITE_PRIORITY,
211 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
212 .cra_blocksize = AES_BLOCK_SIZE,
213 .cra_ctxsize = sizeof(struct ubicom32_aes_ctx),
214 .cra_alignmask = CRYPTO_UBICOM32_ALIGNMENT - 1,
215 .cra_type = &crypto_blkcipher_type,
216 .cra_module = THIS_MODULE,
217 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
218 .cra_u = {
219 .blkcipher = {
220 .min_keysize = AES_MIN_KEY_SIZE,
221 .max_keysize = AES_MAX_KEY_SIZE,
222 .setkey = aes_set_key,
223 .encrypt = ecb_aes_encrypt,
224 .decrypt = ecb_aes_decrypt,
225 }
226 }
227 };
228
229 #if CRYPTO_UBICOM32_LOOP_ASM
230 void __ocm_text cbc_aes_encrypt_loop(u8 *out, u8 *in, u8 *iv, unsigned int n)
231 {
232 asm volatile (
233 "; set init. iv 4w \n\t"
234 " move.4 0x50(%0), 0x0(%3) \n\t"
235 " move.4 0x54(%0), 0x4(%3) \n\t"
236 " move.4 0x58(%0), 0x8(%3) \n\t"
237 " move.4 0x5c(%0), 0xc(%3) \n\t"
238 " \n\t"
239 "; we know n > 0, so we can always \n\t"
240 "; load the first block \n\t"
241 "; set input 4w \n\t"
242 " move.4 0x30(%0), 0x0(%2) \n\t"
243 " move.4 0x34(%0), 0x4(%2) \n\t"
244 " move.4 0x38(%0), 0x8(%2) \n\t"
245 " move.4 0x3c(%0), 0xc(%2) \n\t"
246 " \n\t"
247 "; kickoff hw \n\t"
248 " move.4 0x40(%0), %2 \n\t"
249 " \n\t"
250 "; update n & flush \n\t"
251 " add.4 %4, #-16, %4 \n\t"
252 " pipe_flush 0 \n\t"
253 " \n\t"
254 "; while (n): work on 2nd block \n\t"
255 " 1: lsl.4 d15, %4, #0x0 \n\t"
256 " jmpeq.f 5f \n\t"
257 " \n\t"
258 "; set input 4w (2nd) \n\t"
259 " move.4 0x30(%0), 0x10(%2) \n\t"
260 " move.4 0x34(%0), 0x14(%2) \n\t"
261 " move.4 0x38(%0), 0x18(%2) \n\t"
262 " move.4 0x3c(%0), 0x1c(%2) \n\t"
263 " \n\t"
264 "; update n/in asap while waiting \n\t"
265 " add.4 %4, #-16, %4 \n\t"
266 " move.4 d15, 16(%2)++ \n\t"
267 " \n\t"
268 "; wait for the previous output \n\t"
269 " btst 0x04(%0), #0 \n\t"
270 " jmpne.f -4 \n\t"
271 " \n\t"
272 "; read previous output \n\t"
273 " move.4 0x0(%1), 0x50(%0) \n\t"
274 " move.4 0x4(%1), 0x54(%0) \n\t"
275 " move.4 0x8(%1), 0x58(%0) \n\t"
276 " move.4 0xc(%1), 0x5c(%0) \n\t"
277 " \n\t"
278 "; kick off hw for 2nd input \n\t"
279 " move.4 0x40(%0), %2 \n\t"
280 " \n\t"
281 "; update out asap \n\t"
282 " move.4 d15, 16(%1)++ \n\t"
283 " \n\t"
284 "; go back to loop \n\t"
285 " jmpt 1b \n\t"
286 " \n\t"
287 "; wait for last output \n\t"
288 " 5: btst 0x04(%0), #0 \n\t"
289 " jmpne.f -4 \n\t"
290 " \n\t"
291 "; read last output \n\t"
292 " move.4 0x0(%1), 0x50(%0) \n\t"
293 " move.4 0x4(%1), 0x54(%0) \n\t"
294 " move.4 0x8(%1), 0x58(%0) \n\t"
295 " move.4 0xc(%1), 0x5c(%0) \n\t"
296 " \n\t"
297 "; copy out iv \n\t"
298 " move.4 0x0(%3), 0x50(%0) \n\t"
299 " move.4 0x4(%3), 0x54(%0) \n\t"
300 " move.4 0x8(%3), 0x58(%0) \n\t"
301 " move.4 0xc(%3), 0x5c(%0) \n\t"
302 " \n\t"
303 :
304 : "a" (SEC_BASE), "a" (out), "a" (in), "a" (iv), "d" (n)
305 : "d15", "cc"
306 );
307 }
308
309 #else
310
311 static void __ocm_text cbc_aes_encrypt_loop(u8 *out, u8 *in, u8 *iv, unsigned int n)
312 {
313 aes_hw_set_iv(iv);
314 while (likely(n)) {
315 aes_hw_cipher(out, in);
316 out += AES_BLOCK_SIZE;
317 in += AES_BLOCK_SIZE;
318 n -= AES_BLOCK_SIZE;
319 }
320 SEC_COPY_4W(iv, out - AES_BLOCK_SIZE);
321 }
322
323 #endif
324
325 static void __ocm_text cbc_aes_decrypt_loop(u8 *out, u8 *in, u8 *iv, unsigned int n)
326 {
327 while (likely(n)) {
328 aes_hw_set_iv(iv);
329 SEC_COPY_4W(iv, in);
330 aes_hw_cipher(out, in);
331 out += AES_BLOCK_SIZE;
332 in += AES_BLOCK_SIZE;
333 n -= AES_BLOCK_SIZE;
334 }
335 }
336
337 static int __ocm_text cbc_aes_crypt(struct blkcipher_desc *desc,
338 struct scatterlist *dst, struct scatterlist *src,
339 unsigned int nbytes, u32 extra_flags)
340 {
341 struct ubicom32_aes_ctx *uctx = crypto_blkcipher_ctx(desc->tfm);
342 int ret;
343
344 struct blkcipher_walk walk;
345 blkcipher_walk_init(&walk, dst, src, nbytes);
346 ret = blkcipher_walk_virt(desc, &walk);
347 if (unlikely(ret)) {
348 return ret;
349 }
350
351 hw_crypto_lock();
352 hw_crypto_check();
353
354 hw_crypto_set_ctrl(uctx->ctrl | extra_flags);
355 aes_hw_set_key(uctx->key, uctx->key_len);
356
357 while (likely((nbytes = walk.nbytes))) {
358 /* only use complete blocks */
359 unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
360 if (likely(n)) {
361 u8 *out = walk.dst.virt.addr;
362 u8 *in = walk.src.virt.addr;
363
364 if (extra_flags & SEC_DIR_ENCRYPT) {
365 cbc_aes_encrypt_loop(out, in, walk.iv, n);
366 } else {
367 cbc_aes_decrypt_loop(out, in, walk.iv, n);
368 }
369 }
370
371 nbytes &= AES_BLOCK_SIZE - 1;
372 ret = blkcipher_walk_done(desc, &walk, nbytes);
373 }
374 hw_crypto_unlock();
375
376 return ret;
377 }
378
379 static int __ocm_text cbc_aes_encrypt(struct blkcipher_desc *desc,
380 struct scatterlist *dst, struct scatterlist *src,
381 unsigned int nbytes)
382 {
383 return cbc_aes_crypt(desc, dst, src, nbytes, SEC_DIR_ENCRYPT | SEC_CBC_SET);
384 }
385
386 static int __ocm_text cbc_aes_decrypt(struct blkcipher_desc *desc,
387 struct scatterlist *dst, struct scatterlist *src,
388 unsigned int nbytes)
389 {
390 return cbc_aes_crypt(desc, dst, src, nbytes, SEC_DIR_DECRYPT | SEC_CBC_SET);
391 }
392
393 static struct crypto_alg cbc_aes_alg = {
394 .cra_name = "cbc(aes)",
395 .cra_driver_name = "cbc-aes-ubicom32",
396 .cra_priority = CRYPTO_UBICOM32_COMPOSITE_PRIORITY,
397 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
398 .cra_blocksize = AES_BLOCK_SIZE,
399 .cra_ctxsize = sizeof(struct ubicom32_aes_ctx),
400 .cra_alignmask = CRYPTO_UBICOM32_ALIGNMENT - 1,
401 .cra_type = &crypto_blkcipher_type,
402 .cra_module = THIS_MODULE,
403 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
404 .cra_u = {
405 .blkcipher = {
406 .min_keysize = AES_MIN_KEY_SIZE,
407 .max_keysize = AES_MAX_KEY_SIZE,
408 .ivsize = AES_BLOCK_SIZE,
409 .setkey = aes_set_key,
410 .encrypt = cbc_aes_encrypt,
411 .decrypt = cbc_aes_decrypt,
412 }
413 }
414 };
415
416 static int __init aes_init(void)
417 {
418 int ret;
419
420 hw_crypto_init();
421
422 ret = crypto_register_alg(&aes_alg);
423 if (ret)
424 goto aes_err;
425
426 ret = crypto_register_alg(&ecb_aes_alg);
427 if (ret)
428 goto ecb_aes_err;
429
430 ret = crypto_register_alg(&cbc_aes_alg);
431 if (ret)
432 goto cbc_aes_err;
433
434 out:
435 return ret;
436
437 cbc_aes_err:
438 crypto_unregister_alg(&ecb_aes_alg);
439 ecb_aes_err:
440 crypto_unregister_alg(&aes_alg);
441 aes_err:
442 goto out;
443 }
444
445 static void __exit aes_fini(void)
446 {
447 crypto_unregister_alg(&cbc_aes_alg);
448 crypto_unregister_alg(&ecb_aes_alg);
449 crypto_unregister_alg(&aes_alg);
450 }
451
452 module_init(aes_init);
453 module_exit(aes_fini);
454
455 MODULE_ALIAS("aes");
456
457 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
458 MODULE_LICENSE("GPL");