kernel: add missing check for TCP GRO
[openwrt/openwrt.git] / package / kernel / lantiq / ltq-deu / src / ifxmips_aes.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_aes.c
4 ** PROJECT : IFX UEIP
5 ** MODULES : DEU Module
6 **
7 ** DATE : September 8, 2009
8 ** AUTHOR : Mohammad Firdaus
9 ** DESCRIPTION : Data Encryption Unit Driver for AES Algorithm
10 ** COPYRIGHT : Copyright (c) 2009
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 08,Sept 2009 Mohammad Firdaus Initial UEIP release
22 *******************************************************************************/
23 /*!
24 \defgroup IFX_DEU IFX_DEU_DRIVERS
25 \ingroup API
26 \brief ifx DEU driver module
27 */
28
29 /*!
30 \file ifxmips_aes.c
31 \ingroup IFX_DEU
32 \brief AES Encryption Driver main file
33 */
34
35 /*!
36 \defgroup IFX_AES_FUNCTIONS IFX_AES_FUNCTIONS
37 \ingroup IFX_DEU
38 \brief IFX AES driver Functions
39 */
40
41
42 /* Project Header Files */
43 #if defined(CONFIG_MODVERSIONS)
44 #define MODVERSIONS
45 #include <linux/modeversions>
46 #endif
47
48 #include <linux/version.h>
49 #include <linux/module.h>
50 #include <linux/init.h>
51 #include <linux/proc_fs.h>
52 #include <linux/fs.h>
53 #include <linux/types.h>
54 #include <linux/errno.h>
55 #include <linux/crypto.h>
56 #include <linux/interrupt.h>
57 #include <linux/delay.h>
58 #include <asm/byteorder.h>
59 #include <crypto/algapi.h>
60 #include <crypto/b128ops.h>
61 #include <crypto/gcm.h>
62 #include <crypto/gf128mul.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/xts.h>
65 #include <crypto/internal/aead.h>
66 #include <crypto/internal/hash.h>
67 #include <crypto/internal/skcipher.h>
68
69 #include "ifxmips_deu.h"
70
71 #if defined(CONFIG_DANUBE)
72 #include "ifxmips_deu_danube.h"
73 extern int ifx_danube_pre_1_4;
74 #elif defined(CONFIG_AR9)
75 #include "ifxmips_deu_ar9.h"
76 #elif defined(CONFIG_VR9) || defined(CONFIG_AR10)
77 #include "ifxmips_deu_vr9.h"
78 #else
79 #error "Unkown platform"
80 #endif
81
82 /* DMA related header and variables */
83
84 spinlock_t aes_lock;
85 #define CRTCL_SECT_INIT spin_lock_init(&aes_lock)
86 #define CRTCL_SECT_START spin_lock_irqsave(&aes_lock, flag)
87 #define CRTCL_SECT_END spin_unlock_irqrestore(&aes_lock, flag)
88
89 /* Definition of constants */
90 #define AES_START IFX_AES_CON
91 #define AES_MIN_KEY_SIZE 16
92 #define AES_MAX_KEY_SIZE 32
93 #define AES_BLOCK_SIZE 16
94 #define AES_BLOCK_WORDS 4
95 #define CTR_RFC3686_NONCE_SIZE 4
96 #define CTR_RFC3686_IV_SIZE 8
97 #define CTR_RFC3686_MIN_KEY_SIZE (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
98 #define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
99 #define AES_CBCMAC_DBN_TEMP_SIZE 128
100
101 #ifdef CRYPTO_DEBUG
102 extern char debug_level;
103 #define DPRINTF(level, format, args...) if (level < debug_level) printk(KERN_INFO "[%s %s %d]: " format, __FILE__, __func__, __LINE__, ##args);
104 #else
105 #define DPRINTF(level, format, args...)
106 #endif /* CRYPTO_DEBUG */
107
108 /* Function decleration */
109 int aes_chip_init(void);
110 u32 endian_swap(u32 input);
111 u32 input_swap(u32 input);
112 u32* memory_alignment(const u8 *arg, u32 *buff_alloc, int in_out, int nbytes);
113 void aes_dma_memory_copy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes);
114 void des_dma_memory_copy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes);
115 int aes_memory_allocate(int value);
116 int des_memory_allocate(int value);
117 void memory_release(u32 *addr);
118
119
120 extern void ifx_deu_aes (void *ctx_arg, uint8_t *out_arg, const uint8_t *in_arg,
121 uint8_t *iv_arg, size_t nbytes, int encdec, int mode);
122 /* End of function decleration */
123
124 struct aes_ctx {
125 int key_length;
126 u8 buf[AES_MAX_KEY_SIZE];
127 u8 tweakkey[AES_MAX_KEY_SIZE];
128 u8 nonce[CTR_RFC3686_NONCE_SIZE];
129 u8 lastbuffer[4 * XTS_BLOCK_SIZE];
130 int use_tweak;
131 u32 byte_count;
132 u32 dbn;
133 int started;
134 u32 (*temp)[AES_BLOCK_WORDS];
135 u8 block[AES_BLOCK_SIZE];
136 u8 hash[AES_BLOCK_SIZE];
137 struct gf128mul_4k *gf128;
138 };
139
140 extern int disable_deudma;
141 extern int disable_multiblock;
142
143 /*! \fn int aes_set_key (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
144 * \ingroup IFX_AES_FUNCTIONS
145 * \brief sets the AES keys
146 * \param tfm linux crypto algo transform
147 * \param in_key input key
148 * \param key_len key lengths of 16, 24 and 32 bytes supported
149 * \return -EINVAL - bad key length, 0 - SUCCESS
150 */
151 int aes_set_key (struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
152 {
153 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
154
155 //printk("set_key in %s\n", __FILE__);
156
157 //aes_chip_init();
158
159 if (key_len != 16 && key_len != 24 && key_len != 32) {
160 return -EINVAL;
161 }
162
163 ctx->key_length = key_len;
164 ctx->use_tweak = 0;
165 DPRINTF(0, "ctx @%p, key_len %d, ctx->key_length %d\n", ctx, key_len, ctx->key_length);
166 memcpy ((u8 *) (ctx->buf), in_key, key_len);
167
168 return 0;
169 }
170
171
172 /*! \fn int aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
173 * \ingroup IFX_AES_FUNCTIONS
174 * \brief sets the AES keys for skcipher
175 * \param tfm linux crypto skcipher
176 * \param in_key input key
177 * \param key_len key lengths of 16, 24 and 32 bytes supported
178 * \return -EINVAL - bad key length, 0 - SUCCESS
179 */
180 int aes_set_key_skcipher (struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len)
181 {
182 return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
183 }
184
185
186 /*! \fn void aes_set_key_skcipher (void *ctx_arg)
187 * \ingroup IFX_AES_FUNCTIONS
188 * \brief sets the AES key to the hardware, requires spinlock to be set by caller
189 * \param ctx_arg crypto algo context
190 * \return
191 */
192 void aes_set_key_hw (void *ctx_arg)
193 {
194 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
195 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
196 struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
197 u8 *in_key = ctx->buf;
198 int key_len = ctx->key_length;
199 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
200
201 if (ctx->use_tweak) in_key = ctx->tweakkey;
202
203 /* 128, 192 or 256 bit key length */
204 aes->controlr.K = key_len / 8 - 2;
205 if (key_len == 128 / 8) {
206 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
207 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
208 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
209 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
210 }
211 else if (key_len == 192 / 8) {
212 aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
213 aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
214 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
215 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
216 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
217 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
218 }
219 else if (key_len == 256 / 8) {
220 aes->K7R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
221 aes->K6R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
222 aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
223 aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
224 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
225 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
226 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 6));
227 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 7));
228 }
229 else {
230 printk (KERN_ERR "[%s %s %d]: Invalid key_len : %d\n", __FILE__, __func__, __LINE__, key_len);
231 return; //-EINVAL;
232 }
233
234 /* let HW pre-process DEcryption key in any case (even if
235 ENcryption is used). Key Valid (KV) bit is then only
236 checked in decryption routine! */
237 aes->controlr.PNK = 1;
238
239 }
240
241
242 /*! \fn void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode)
243 * \ingroup IFX_AES_FUNCTIONS
244 * \brief main interface to AES hardware
245 * \param ctx_arg crypto algo context
246 * \param out_arg output bytestream
247 * \param in_arg input bytestream
248 * \param iv_arg initialization vector
249 * \param nbytes length of bytestream
250 * \param encdec 1 for encrypt; 0 for decrypt
251 * \param mode operation mode such as ebc, cbc, ctr
252 *
253 */
254 void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
255 u8 *iv_arg, size_t nbytes, int encdec, int mode)
256
257 {
258 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
259 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
260 //struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
261 unsigned long flag;
262 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
263 int i = 0;
264 int byte_cnt = nbytes;
265
266 CRTCL_SECT_START;
267
268 aes_set_key_hw (ctx_arg);
269
270 aes->controlr.E_D = !encdec; //encryption
271 aes->controlr.O = mode; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
272
273 //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
274 if (mode > 0) {
275 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
276 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
277 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
278 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
279 };
280
281
282 i = 0;
283 while (byte_cnt >= 16) {
284
285 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 0));
286 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 1));
287 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 2));
288 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 3)); /* start crypto */
289
290 while (aes->controlr.BUS) {
291 // this will not take long
292 }
293
294 *((volatile u32 *) out_arg + (i * 4) + 0) = aes->OD3R;
295 *((volatile u32 *) out_arg + (i * 4) + 1) = aes->OD2R;
296 *((volatile u32 *) out_arg + (i * 4) + 2) = aes->OD1R;
297 *((volatile u32 *) out_arg + (i * 4) + 3) = aes->OD0R;
298
299 i++;
300 byte_cnt -= 16;
301 }
302
303 /* To handle all non-aligned bytes (not aligned to 16B size) */
304 if (byte_cnt) {
305 u8 temparea[16] = {0,};
306
307 memcpy(temparea, ((u32 *) in_arg + (i * 4)), byte_cnt);
308
309 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 0));
310 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 1));
311 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 2));
312 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 3)); /* start crypto */
313
314 while (aes->controlr.BUS) {
315 }
316
317 *((volatile u32 *) temparea + 0) = aes->OD3R;
318 *((volatile u32 *) temparea + 1) = aes->OD2R;
319 *((volatile u32 *) temparea + 2) = aes->OD1R;
320 *((volatile u32 *) temparea + 3) = aes->OD0R;
321
322 memcpy(((u32 *) out_arg + (i * 4)), temparea, byte_cnt);
323 }
324
325 //tc.chen : copy iv_arg back
326 if (mode > 0) {
327 *((u32 *) iv_arg) = DEU_ENDIAN_SWAP(aes->IV3R);
328 *((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(aes->IV2R);
329 *((u32 *) iv_arg + 2) = DEU_ENDIAN_SWAP(aes->IV1R);
330 *((u32 *) iv_arg + 3) = DEU_ENDIAN_SWAP(aes->IV0R);
331 }
332
333 CRTCL_SECT_END;
334 }
335
336 /*!
337 * \fn int ctr_rfc3686_aes_set_key (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
338 * \ingroup IFX_AES_FUNCTIONS
339 * \brief sets RFC3686 key
340 * \param tfm linux crypto algo transform
341 * \param in_key input key
342 * \param key_len key lengths of 20, 28 and 36 bytes supported; last 4 bytes is nonce
343 * \return 0 - SUCCESS
344 * -EINVAL - bad key length
345 */
346 int ctr_rfc3686_aes_set_key (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
347 {
348 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
349
350 //printk("ctr_rfc3686_aes_set_key in %s\n", __FILE__);
351
352 memcpy(ctx->nonce, in_key + (key_len - CTR_RFC3686_NONCE_SIZE),
353 CTR_RFC3686_NONCE_SIZE);
354
355 key_len -= CTR_RFC3686_NONCE_SIZE; // remove 4 bytes of nonce
356
357 if (key_len != 16 && key_len != 24 && key_len != 32) {
358 return -EINVAL;
359 }
360
361 ctx->key_length = key_len;
362 ctx->use_tweak = 0;
363
364 memcpy ((u8 *) (ctx->buf), in_key, key_len);
365
366 return 0;
367 }
368
369 /*!
370 * \fn int ctr_rfc3686_aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
371 * \ingroup IFX_AES_FUNCTIONS
372 * \brief sets RFC3686 key for skcipher
373 * \param tfm linux crypto skcipher
374 * \param in_key input key
375 * \param key_len key lengths of 20, 28 and 36 bytes supported; last 4 bytes is nonce
376 * \return 0 - SUCCESS
377 * -EINVAL - bad key length
378 */
379 int ctr_rfc3686_aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
380 {
381 return ctr_rfc3686_aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
382 }
383
384 /*! \fn void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
385 * \ingroup IFX_AES_FUNCTIONS
386 * \brief main interface with deu hardware in DMA mode
387 * \param ctx_arg crypto algo context
388 * \param out_arg output bytestream
389 * \param in_arg input bytestream
390 * \param iv_arg initialization vector
391 * \param nbytes length of bytestream
392 * \param encdec 1 for encrypt; 0 for decrypt
393 * \param mode operation mode such as ebc, cbc, ctr
394 */
395
396
397 //definitions from linux/include/crypto.h:
398 //#define CRYPTO_TFM_MODE_ECB 0x00000001
399 //#define CRYPTO_TFM_MODE_CBC 0x00000002
400 //#define CRYPTO_TFM_MODE_CFB 0x00000004
401 //#define CRYPTO_TFM_MODE_CTR 0x00000008
402 //#define CRYPTO_TFM_MODE_OFB 0x00000010 // not even defined
403 //but hardware definition: 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
404
405 /*! \fn void ifx_deu_aes_ecb (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
406 * \ingroup IFX_AES_FUNCTIONS
407 * \brief sets AES hardware to ECB mode
408 * \param ctx crypto algo context
409 * \param dst output bytestream
410 * \param src input bytestream
411 * \param iv initialization vector
412 * \param nbytes length of bytestream
413 * \param encdec 1 for encrypt; 0 for decrypt
414 * \param inplace not used
415 */
416 void ifx_deu_aes_ecb (void *ctx, uint8_t *dst, const uint8_t *src,
417 uint8_t *iv, size_t nbytes, int encdec, int inplace)
418 {
419 ifx_deu_aes (ctx, dst, src, NULL, nbytes, encdec, 0);
420 }
421
422 /*! \fn void ifx_deu_aes_cbc (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
423 * \ingroup IFX_AES_FUNCTIONS
424 * \brief sets AES hardware to CBC mode
425 * \param ctx crypto algo context
426 * \param dst output bytestream
427 * \param src input bytestream
428 * \param iv initialization vector
429 * \param nbytes length of bytestream
430 * \param encdec 1 for encrypt; 0 for decrypt
431 * \param inplace not used
432 */
433 void ifx_deu_aes_cbc (void *ctx, uint8_t *dst, const uint8_t *src,
434 uint8_t *iv, size_t nbytes, int encdec, int inplace)
435 {
436 ifx_deu_aes (ctx, dst, src, iv, nbytes, encdec, 1);
437 }
438
439 /*! \fn void ifx_deu_aes_ofb (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
440 * \ingroup IFX_AES_FUNCTIONS
441 * \brief sets AES hardware to OFB mode
442 * \param ctx crypto algo context
443 * \param dst output bytestream
444 * \param src input bytestream
445 * \param iv initialization vector
446 * \param nbytes length of bytestream
447 * \param encdec 1 for encrypt; 0 for decrypt
448 * \param inplace not used
449 */
450 void ifx_deu_aes_ofb (void *ctx, uint8_t *dst, const uint8_t *src,
451 uint8_t *iv, size_t nbytes, int encdec, int inplace)
452 {
453 ifx_deu_aes (ctx, dst, src, iv, nbytes, encdec, 2);
454 }
455
456 /*! \fn void ifx_deu_aes_cfb (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
457 * \ingroup IFX_AES_FUNCTIONS
458 * \brief sets AES hardware to CFB mode
459 * \param ctx crypto algo context
460 * \param dst output bytestream
461 * \param src input bytestream
462 * \param iv initialization vector
463 * \param nbytes length of bytestream
464 * \param encdec 1 for encrypt; 0 for decrypt
465 * \param inplace not used
466 */
467 void ifx_deu_aes_cfb (void *ctx, uint8_t *dst, const uint8_t *src,
468 uint8_t *iv, size_t nbytes, int encdec, int inplace)
469 {
470 ifx_deu_aes (ctx, dst, src, iv, nbytes, encdec, 3);
471 }
472
473 /*! \fn void ifx_deu_aes_ctr (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
474 * \ingroup IFX_AES_FUNCTIONS
475 * \brief sets AES hardware to CTR mode
476 * \param ctx crypto algo context
477 * \param dst output bytestream
478 * \param src input bytestream
479 * \param iv initialization vector
480 * \param nbytes length of bytestream
481 * \param encdec 1 for encrypt; 0 for decrypt
482 * \param inplace not used
483 */
484 void ifx_deu_aes_ctr (void *ctx, uint8_t *dst, const uint8_t *src,
485 uint8_t *iv, size_t nbytes, int encdec, int inplace)
486 {
487 ifx_deu_aes (ctx, dst, src, iv, nbytes, encdec, 4);
488 }
489
490 /*! \fn void aes_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
491 * \ingroup IFX_AES_FUNCTIONS
492 * \brief encrypt AES_BLOCK_SIZE of data
493 * \param tfm linux crypto algo transform
494 * \param out output bytestream
495 * \param in input bytestream
496 */
497 void aes_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
498 {
499 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
500 ifx_deu_aes (ctx, out, in, NULL, AES_BLOCK_SIZE,
501 CRYPTO_DIR_ENCRYPT, 0);
502 }
503
504 /*! \fn void aes_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
505 * \ingroup IFX_AES_FUNCTIONS
506 * \brief decrypt AES_BLOCK_SIZE of data
507 * \param tfm linux crypto algo transform
508 * \param out output bytestream
509 * \param in input bytestream
510 */
511 void aes_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
512 {
513 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
514 ifx_deu_aes (ctx, out, in, NULL, AES_BLOCK_SIZE,
515 CRYPTO_DIR_DECRYPT, 0);
516 }
517
518 /*
519 * \brief AES function mappings
520 */
521 struct crypto_alg ifxdeu_aes_alg = {
522 .cra_name = "aes",
523 .cra_driver_name = "ifxdeu-aes",
524 .cra_priority = 300,
525 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
526 .cra_blocksize = AES_BLOCK_SIZE,
527 .cra_ctxsize = sizeof(struct aes_ctx),
528 .cra_module = THIS_MODULE,
529 .cra_list = LIST_HEAD_INIT(ifxdeu_aes_alg.cra_list),
530 .cra_u = {
531 .cipher = {
532 .cia_min_keysize = AES_MIN_KEY_SIZE,
533 .cia_max_keysize = AES_MAX_KEY_SIZE,
534 .cia_setkey = aes_set_key,
535 .cia_encrypt = aes_encrypt,
536 .cia_decrypt = aes_decrypt,
537 }
538 }
539 };
540
541 /*! \fn int ecb_aes_encrypt(struct skcipher_req *req)
542 * \ingroup IFX_AES_FUNCTIONS
543 * \brief ECB AES encrypt using linux crypto skcipher
544 * \param req skcipher request
545 * \return err
546 */
547 int ecb_aes_encrypt(struct skcipher_request *req)
548 {
549 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
550 struct skcipher_walk walk;
551 int err;
552 unsigned int enc_bytes, nbytes;
553
554 err = skcipher_walk_virt(&walk, req, false);
555
556 while ((nbytes = enc_bytes = walk.nbytes)) {
557 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
558 ifx_deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
559 NULL, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
560 nbytes &= AES_BLOCK_SIZE - 1;
561 err = skcipher_walk_done(&walk, nbytes);
562 }
563
564 return err;
565 }
566
567 /*! \fn int ecb_aes_decrypt(struct skcipher_req *req)
568 * \ingroup IFX_AES_FUNCTIONS
569 * \brief ECB AES decrypt using linux crypto skcipher
570 * \param req skcipher request
571 * \return err
572 */
573 int ecb_aes_decrypt(struct skcipher_request *req)
574 {
575 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
576 struct skcipher_walk walk;
577 int err;
578 unsigned int dec_bytes, nbytes;
579
580 err = skcipher_walk_virt(&walk, req, false);
581
582 while ((nbytes = dec_bytes = walk.nbytes)) {
583 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
584 ifx_deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
585 NULL, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
586 nbytes &= AES_BLOCK_SIZE - 1;
587 err = skcipher_walk_done(&walk, nbytes);
588 }
589
590 return err;
591 }
592
593 /*
594 * \brief AES function mappings
595 */
596 struct skcipher_alg ifxdeu_ecb_aes_alg = {
597 .base.cra_name = "ecb(aes)",
598 .base.cra_driver_name = "ifxdeu-ecb(aes)",
599 .base.cra_priority = 400,
600 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
601 .base.cra_blocksize = AES_BLOCK_SIZE,
602 .base.cra_ctxsize = sizeof(struct aes_ctx),
603 .base.cra_module = THIS_MODULE,
604 .base.cra_list = LIST_HEAD_INIT(ifxdeu_ecb_aes_alg.base.cra_list),
605 .min_keysize = AES_MIN_KEY_SIZE,
606 .max_keysize = AES_MAX_KEY_SIZE,
607 .setkey = aes_set_key_skcipher,
608 .encrypt = ecb_aes_encrypt,
609 .decrypt = ecb_aes_decrypt,
610 };
611
612 /*! \fn int ecb_aes_encrypt(struct skcipher_req *req)
613 * \ingroup IFX_AES_FUNCTIONS
614 * \brief CBC AES encrypt using linux crypto skcipher
615 * \param req skcipher request
616 * \return err
617 */
618 int cbc_aes_encrypt(struct skcipher_request *req)
619 {
620 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
621 struct skcipher_walk walk;
622 int err;
623 unsigned int enc_bytes, nbytes;
624
625 err = skcipher_walk_virt(&walk, req, false);
626
627 while ((nbytes = enc_bytes = walk.nbytes)) {
628 u8 *iv = walk.iv;
629 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
630 ifx_deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
631 iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
632 nbytes &= AES_BLOCK_SIZE - 1;
633 err = skcipher_walk_done(&walk, nbytes);
634 }
635
636 return err;
637 }
638
639 /*! \fn int cbc_aes_decrypt(struct skcipher_req *req)
640 * \ingroup IFX_AES_FUNCTIONS
641 * \brief CBC AES decrypt using linux crypto skcipher
642 * \param req skcipher request
643 * \return err
644 */
645 int cbc_aes_decrypt(struct skcipher_request *req)
646 {
647 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
648 struct skcipher_walk walk;
649 int err;
650 unsigned int dec_bytes, nbytes;
651
652 err = skcipher_walk_virt(&walk, req, false);
653
654 while ((nbytes = dec_bytes = walk.nbytes)) {
655 u8 *iv = walk.iv;
656 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
657 ifx_deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
658 iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
659 nbytes &= AES_BLOCK_SIZE - 1;
660 err = skcipher_walk_done(&walk, nbytes);
661 }
662
663 return err;
664 }
665
666 /*
667 * \brief AES function mappings
668 */
669 struct skcipher_alg ifxdeu_cbc_aes_alg = {
670 .base.cra_name = "cbc(aes)",
671 .base.cra_driver_name = "ifxdeu-cbc(aes)",
672 .base.cra_priority = 400,
673 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
674 .base.cra_blocksize = AES_BLOCK_SIZE,
675 .base.cra_ctxsize = sizeof(struct aes_ctx),
676 .base.cra_module = THIS_MODULE,
677 .base.cra_list = LIST_HEAD_INIT(ifxdeu_cbc_aes_alg.base.cra_list),
678 .min_keysize = AES_MIN_KEY_SIZE,
679 .max_keysize = AES_MAX_KEY_SIZE,
680 .ivsize = AES_BLOCK_SIZE,
681 .setkey = aes_set_key_skcipher,
682 .encrypt = cbc_aes_encrypt,
683 .decrypt = cbc_aes_decrypt,
684 };
685
686 /*! \fn void ifx_deu_aes_xts (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec)
687 * \ingroup IFX_AES_FUNCTIONS
688 * \brief main interface to AES hardware for XTS impl
689 * \param ctx_arg crypto algo context
690 * \param out_arg output bytestream
691 * \param in_arg input bytestream
692 * \param iv_arg initialization vector
693 * \param nbytes length of bytestream
694 * \param encdec 1 for encrypt; 0 for decrypt
695 *
696 */
697 void ifx_deu_aes_xts (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
698 u8 *iv_arg, size_t nbytes, int encdec)
699 {
700 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
701 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
702 //struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
703 unsigned long flag;
704 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
705 u8 oldiv[16];
706 int i = 0;
707 int byte_cnt = nbytes;
708
709 CRTCL_SECT_START;
710
711 aes_set_key_hw (ctx_arg);
712
713 aes->controlr.E_D = !encdec; //encryption
714 aes->controlr.O = 1; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR - CBC mode for xts
715
716 i = 0;
717 while (byte_cnt >= 16) {
718
719 if (!encdec) {
720 if (((byte_cnt % 16) > 0) && (byte_cnt < (2*XTS_BLOCK_SIZE))) {
721 memcpy(oldiv, iv_arg, 16);
722 gf128mul_x_ble((le128 *)iv_arg, (le128 *)iv_arg);
723 }
724 u128_xor((u128 *)((u32 *) in_arg + (i * 4) + 0), (u128 *)((u32 *) in_arg + (i * 4) + 0), (u128 *)iv_arg);
725 }
726
727 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
728 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
729 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
730 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
731
732 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 0));
733 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 1));
734 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 2));
735 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 3)); /* start crypto */
736
737 while (aes->controlr.BUS) {
738 // this will not take long
739 }
740
741 *((volatile u32 *) out_arg + (i * 4) + 0) = aes->OD3R;
742 *((volatile u32 *) out_arg + (i * 4) + 1) = aes->OD2R;
743 *((volatile u32 *) out_arg + (i * 4) + 2) = aes->OD1R;
744 *((volatile u32 *) out_arg + (i * 4) + 3) = aes->OD0R;
745
746 if (encdec) {
747 u128_xor((u128 *)((volatile u32 *) out_arg + (i * 4) + 0), (u128 *)((volatile u32 *) out_arg + (i * 4) + 0), (u128 *)iv_arg);
748 }
749 gf128mul_x_ble((le128 *)iv_arg, (le128 *)iv_arg);
750 i++;
751 byte_cnt -= 16;
752 }
753
754 if (byte_cnt) {
755 u8 state[XTS_BLOCK_SIZE] = {0,};
756
757 if (!encdec) memcpy(iv_arg, oldiv, 16);
758
759 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
760 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
761 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
762 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
763
764 memcpy(state, ((u32 *) in_arg + (i * 4) + 0), byte_cnt);
765 memcpy((state + byte_cnt), (out_arg + ((i - 1) * 16) + byte_cnt), (XTS_BLOCK_SIZE - byte_cnt));
766 if (!encdec) {
767 u128_xor((u128 *)state, (u128 *)state, (u128 *)iv_arg);
768 }
769
770 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) state + 0));
771 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) state + 1));
772 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) state + 2));
773 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) state + 3)); /* start crypto */
774
775 memcpy(((u32 *) out_arg + (i * 4) + 0), ((u32 *) out_arg + ((i - 1) * 4) + 0), byte_cnt);
776
777 while (aes->controlr.BUS) {
778 // this will not take long
779 }
780
781 *((volatile u32 *) out_arg + ((i-1) * 4) + 0) = aes->OD3R;
782 *((volatile u32 *) out_arg + ((i-1) * 4) + 1) = aes->OD2R;
783 *((volatile u32 *) out_arg + ((i-1) * 4) + 2) = aes->OD1R;
784 *((volatile u32 *) out_arg + ((i-1) * 4) + 3) = aes->OD0R;
785
786 if (encdec) {
787 u128_xor((u128 *)((volatile u32 *) out_arg + ((i-1) * 4) + 0), (u128 *)((volatile u32 *) out_arg + ((i-1) * 4) + 0), (u128 *)iv_arg);
788 }
789 }
790
791 CRTCL_SECT_END;
792 }
793
794 /*! \fn int xts_aes_encrypt(struct skcipher_req *req)
795 * \ingroup IFX_AES_FUNCTIONS
796 * \brief XTS AES encrypt using linux crypto skcipher
797 * \param req skcipher request
798 * \return err
799 */
800 int xts_aes_encrypt(struct skcipher_request *req)
801 {
802 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
803 struct skcipher_walk walk;
804 int err;
805 unsigned int enc_bytes, nbytes, processed;
806
807 err = skcipher_walk_virt(&walk, req, false);
808
809 if (req->cryptlen < XTS_BLOCK_SIZE)
810 return -EINVAL;
811
812 ctx->use_tweak = 1;
813 aes_encrypt(req->base.tfm, walk.iv, walk.iv);
814 ctx->use_tweak = 0;
815 processed = 0;
816
817 while ((nbytes = walk.nbytes) && (walk.nbytes >= (XTS_BLOCK_SIZE * 2)) ) {
818 u8 *iv = walk.iv;
819 if (nbytes == walk.total) {
820 enc_bytes = nbytes;
821 } else {
822 enc_bytes = nbytes & ~(XTS_BLOCK_SIZE - 1);
823 if ((req->cryptlen - processed - enc_bytes) < (XTS_BLOCK_SIZE)) {
824 if (enc_bytes > (2 * XTS_BLOCK_SIZE)) {
825 enc_bytes -= XTS_BLOCK_SIZE;
826 } else {
827 break;
828 }
829 }
830 }
831 ifx_deu_aes_xts(ctx, walk.dst.virt.addr, walk.src.virt.addr,
832 iv, enc_bytes, CRYPTO_DIR_ENCRYPT);
833 err = skcipher_walk_done(&walk, nbytes - enc_bytes);
834 processed += enc_bytes;
835 }
836
837 if ((walk.nbytes)) {
838 u8 *iv = walk.iv;
839 nbytes = req->cryptlen - processed;
840 scatterwalk_map_and_copy(ctx->lastbuffer, req->src, (req->cryptlen - nbytes), nbytes, 0);
841 ifx_deu_aes_xts(ctx, ctx->lastbuffer, ctx->lastbuffer,
842 iv, nbytes, CRYPTO_DIR_ENCRYPT);
843 scatterwalk_map_and_copy(ctx->lastbuffer, req->dst, (req->cryptlen - nbytes), nbytes, 1);
844 skcipher_request_complete(req, 0);
845 }
846
847 return err;
848 }
849
850 /*! \fn int xts_aes_decrypt(struct skcipher_req *req)
851 * \ingroup IFX_AES_FUNCTIONS
852 * \brief XTS AES decrypt using linux crypto skcipher
853 * \param req skcipher request
854 * \return err
855 */
856 int xts_aes_decrypt(struct skcipher_request *req)
857 {
858 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
859 struct skcipher_walk walk;
860 int err;
861 unsigned int dec_bytes, nbytes, processed;
862
863 err = skcipher_walk_virt(&walk, req, false);
864
865 if (req->cryptlen < XTS_BLOCK_SIZE)
866 return -EINVAL;
867
868 ctx->use_tweak = 1;
869 aes_encrypt(req->base.tfm, walk.iv, walk.iv);
870 ctx->use_tweak = 0;
871 processed = 0;
872
873 while ((nbytes = walk.nbytes) && (walk.nbytes >= (XTS_BLOCK_SIZE * 2))) {
874 u8 *iv = walk.iv;
875 if (nbytes == walk.total) {
876 dec_bytes = nbytes;
877 } else {
878 dec_bytes = nbytes & ~(XTS_BLOCK_SIZE - 1);
879 if ((req->cryptlen - processed - dec_bytes) < (XTS_BLOCK_SIZE)) {
880 if (dec_bytes > (2 * XTS_BLOCK_SIZE)) {
881 dec_bytes -= XTS_BLOCK_SIZE;
882 } else {
883 break;
884 }
885 }
886 }
887 ifx_deu_aes_xts(ctx, walk.dst.virt.addr, walk.src.virt.addr,
888 iv, dec_bytes, CRYPTO_DIR_DECRYPT);
889 err = skcipher_walk_done(&walk, nbytes - dec_bytes);
890 processed += dec_bytes;
891 }
892
893 if ((walk.nbytes)) {
894 u8 *iv = walk.iv;
895 nbytes = req->cryptlen - processed;
896 scatterwalk_map_and_copy(ctx->lastbuffer, req->src, (req->cryptlen - nbytes), nbytes, 0);
897 ifx_deu_aes_xts(ctx, ctx->lastbuffer, ctx->lastbuffer,
898 iv, nbytes, CRYPTO_DIR_DECRYPT);
899 scatterwalk_map_and_copy(ctx->lastbuffer, req->dst, (req->cryptlen - nbytes), nbytes, 1);
900 skcipher_request_complete(req, 0);
901 }
902
903 return err;
904 }
905
906 /*! \fn int xts_aes_set_key_skcipher (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
907 * \ingroup IFX_AES_FUNCTIONS
908 * \brief sets the AES keys for XTS
909 * \param tfm linux crypto algo transform
910 * \param in_key input key
911 * \param key_len key lengths of 16, 24 and 32 bytes supported
912 * \return -EINVAL - bad key length, 0 - SUCCESS
913 */
914 int xts_aes_set_key_skcipher (struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len)
915 {
916 struct aes_ctx *ctx = crypto_tfm_ctx(crypto_skcipher_tfm(tfm));
917 unsigned int keylen = (key_len / 2);
918
919 if (key_len % 2) return -EINVAL;
920
921 if (keylen != 16 && keylen != 24 && keylen != 32) {
922 return -EINVAL;
923 }
924
925 ctx->key_length = keylen;
926 ctx->use_tweak = 0;
927 DPRINTF(0, "ctx @%p, key_len %d, ctx->key_length %d\n", ctx, key_len, ctx->key_length);
928 memcpy ((u8 *) (ctx->buf), in_key, keylen);
929 memcpy ((u8 *) (ctx->tweakkey), in_key + keylen, keylen);
930
931 return 0;
932 }
933
934 /*
935 * \brief AES function mappings
936 */
937 struct skcipher_alg ifxdeu_xts_aes_alg = {
938 .base.cra_name = "xts(aes)",
939 .base.cra_driver_name = "ifxdeu-xts(aes)",
940 .base.cra_priority = 400,
941 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
942 .base.cra_blocksize = XTS_BLOCK_SIZE,
943 .base.cra_ctxsize = sizeof(struct aes_ctx),
944 .base.cra_module = THIS_MODULE,
945 .base.cra_list = LIST_HEAD_INIT(ifxdeu_xts_aes_alg.base.cra_list),
946 .min_keysize = AES_MIN_KEY_SIZE * 2,
947 .max_keysize = AES_MAX_KEY_SIZE * 2,
948 .ivsize = XTS_BLOCK_SIZE,
949 .walksize = 2 * XTS_BLOCK_SIZE,
950 .setkey = xts_aes_set_key_skcipher,
951 .encrypt = xts_aes_encrypt,
952 .decrypt = xts_aes_decrypt,
953 };
954
955 /*! \fn int ofb_aes_encrypt(struct skcipher_req *req)
956 * \ingroup IFX_AES_FUNCTIONS
957 * \brief OFB AES encrypt using linux crypto skcipher
958 * \param req skcipher request
959 * \return err
960 */
961 int ofb_aes_encrypt(struct skcipher_request *req)
962 {
963 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
964 struct skcipher_walk walk;
965 int err;
966 unsigned int enc_bytes, nbytes;
967
968 err = skcipher_walk_virt(&walk, req, false);
969
970 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
971 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
972 ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
973 walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
974 nbytes &= AES_BLOCK_SIZE - 1;
975 err = skcipher_walk_done(&walk, nbytes);
976 }
977
978 /* to handle remaining bytes < AES_BLOCK_SIZE */
979 if (walk.nbytes) {
980 ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
981 walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
982 err = skcipher_walk_done(&walk, 0);
983 }
984
985 return err;
986 }
987
988 /*! \fn int ofb_aes_decrypt(struct skcipher_req *req)
989 * \ingroup IFX_AES_FUNCTIONS
990 * \brief OFB AES decrypt using linux crypto skcipher
991 * \param req skcipher request
992 * \return err
993 */
994 int ofb_aes_decrypt(struct skcipher_request *req)
995 {
996 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
997 struct skcipher_walk walk;
998 int err;
999 unsigned int dec_bytes, nbytes;
1000
1001 err = skcipher_walk_virt(&walk, req, false);
1002
1003 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1004 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1005 ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1006 walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1007 nbytes &= AES_BLOCK_SIZE - 1;
1008 err = skcipher_walk_done(&walk, nbytes);
1009 }
1010
1011 /* to handle remaining bytes < AES_BLOCK_SIZE */
1012 if (walk.nbytes) {
1013 ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1014 walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1015 err = skcipher_walk_done(&walk, 0);
1016 }
1017
1018 return err;
1019 }
1020
1021 /*
1022 * \brief AES function mappings
1023 */
1024 struct skcipher_alg ifxdeu_ofb_aes_alg = {
1025 .base.cra_name = "ofb(aes)",
1026 .base.cra_driver_name = "ifxdeu-ofb(aes)",
1027 .base.cra_priority = 400,
1028 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1029 .base.cra_blocksize = 1,
1030 .base.cra_ctxsize = sizeof(struct aes_ctx),
1031 .base.cra_module = THIS_MODULE,
1032 .base.cra_list = LIST_HEAD_INIT(ifxdeu_ofb_aes_alg.base.cra_list),
1033 .min_keysize = AES_MIN_KEY_SIZE,
1034 .max_keysize = AES_MAX_KEY_SIZE,
1035 .ivsize = AES_BLOCK_SIZE,
1036 .chunksize = AES_BLOCK_SIZE,
1037 .walksize = AES_BLOCK_SIZE,
1038 .setkey = aes_set_key_skcipher,
1039 .encrypt = ofb_aes_encrypt,
1040 .decrypt = ofb_aes_decrypt,
1041 };
1042
1043 /*! \fn int cfb_aes_encrypt(struct skcipher_req *req)
1044 * \ingroup IFX_AES_FUNCTIONS
1045 * \brief CFB AES encrypt using linux crypto skcipher
1046 * \param req skcipher request
1047 * \return err
1048 */
1049 int cfb_aes_encrypt(struct skcipher_request *req)
1050 {
1051 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1052 struct skcipher_walk walk;
1053 int err;
1054 unsigned int enc_bytes, nbytes;
1055
1056 err = skcipher_walk_virt(&walk, req, false);
1057
1058 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1059 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
1060 ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1061 walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
1062 nbytes &= AES_BLOCK_SIZE - 1;
1063 err = skcipher_walk_done(&walk, nbytes);
1064 }
1065
1066 /* to handle remaining bytes < AES_BLOCK_SIZE */
1067 if (walk.nbytes) {
1068 ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1069 walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
1070 err = skcipher_walk_done(&walk, 0);
1071 }
1072
1073 return err;
1074 }
1075
1076 /*! \fn int cfb_aes_decrypt(struct skcipher_req *req)
1077 * \ingroup IFX_AES_FUNCTIONS
1078 * \brief CFB AES decrypt using linux crypto skcipher
1079 * \param req skcipher request
1080 * \return err
1081 */
1082 int cfb_aes_decrypt(struct skcipher_request *req)
1083 {
1084 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1085 struct skcipher_walk walk;
1086 int err;
1087 unsigned int dec_bytes, nbytes;
1088
1089 err = skcipher_walk_virt(&walk, req, false);
1090
1091 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1092 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1093 ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1094 walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1095 nbytes &= AES_BLOCK_SIZE - 1;
1096 err = skcipher_walk_done(&walk, nbytes);
1097 }
1098
1099 /* to handle remaining bytes < AES_BLOCK_SIZE */
1100 if (walk.nbytes) {
1101 ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1102 walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1103 err = skcipher_walk_done(&walk, 0);
1104 }
1105
1106 return err;
1107 }
1108
1109 /*
1110 * \brief AES function mappings
1111 */
1112 struct skcipher_alg ifxdeu_cfb_aes_alg = {
1113 .base.cra_name = "cfb(aes)",
1114 .base.cra_driver_name = "ifxdeu-cfb(aes)",
1115 .base.cra_priority = 400,
1116 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1117 .base.cra_blocksize = 1,
1118 .base.cra_ctxsize = sizeof(struct aes_ctx),
1119 .base.cra_module = THIS_MODULE,
1120 .base.cra_list = LIST_HEAD_INIT(ifxdeu_cfb_aes_alg.base.cra_list),
1121 .min_keysize = AES_MIN_KEY_SIZE,
1122 .max_keysize = AES_MAX_KEY_SIZE,
1123 .ivsize = AES_BLOCK_SIZE,
1124 .chunksize = AES_BLOCK_SIZE,
1125 .walksize = AES_BLOCK_SIZE,
1126 .setkey = aes_set_key_skcipher,
1127 .encrypt = cfb_aes_encrypt,
1128 .decrypt = cfb_aes_decrypt,
1129 };
1130
1131 /*! \fn int ctr_basic_aes_encrypt(struct skcipher_req *req)
1132 * \ingroup IFX_AES_FUNCTIONS
1133 * \brief Counter mode AES encrypt using linux crypto skcipher
1134 * \param req skcipher request
1135 * \return err
1136 */
1137 int ctr_basic_aes_encrypt(struct skcipher_request *req)
1138 {
1139 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1140 struct skcipher_walk walk;
1141 int err;
1142 unsigned int enc_bytes, nbytes;
1143
1144 err = skcipher_walk_virt(&walk, req, false);
1145
1146 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1147 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
1148 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1149 walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
1150 nbytes &= AES_BLOCK_SIZE - 1;
1151 err = skcipher_walk_done(&walk, nbytes);
1152 }
1153
1154 /* to handle remaining bytes < AES_BLOCK_SIZE */
1155 if (walk.nbytes) {
1156 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1157 walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
1158 err = skcipher_walk_done(&walk, 0);
1159 }
1160
1161 return err;
1162 }
1163
1164 /*! \fn int ctr_basic_aes_encrypt(struct skcipher_req *req)
1165 * \ingroup IFX_AES_FUNCTIONS
1166 * \brief Counter mode AES decrypt using linux crypto skcipher
1167 * \param req skcipher request
1168 * \return err
1169 */
1170 int ctr_basic_aes_decrypt(struct skcipher_request *req)
1171 {
1172 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1173 struct skcipher_walk walk;
1174 int err;
1175 unsigned int dec_bytes, nbytes;
1176
1177 err = skcipher_walk_virt(&walk, req, false);
1178
1179 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1180 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1181 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1182 walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1183 nbytes &= AES_BLOCK_SIZE - 1;
1184 err = skcipher_walk_done(&walk, nbytes);
1185 }
1186
1187 /* to handle remaining bytes < AES_BLOCK_SIZE */
1188 if (walk.nbytes) {
1189 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1190 walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1191 err = skcipher_walk_done(&walk, 0);
1192 }
1193
1194 return err;
1195 }
1196
1197 /*
1198 * \brief AES function mappings
1199 */
1200 struct skcipher_alg ifxdeu_ctr_basic_aes_alg = {
1201 .base.cra_name = "ctr(aes)",
1202 .base.cra_driver_name = "ifxdeu-ctr(aes)",
1203 .base.cra_priority = 400,
1204 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1205 .base.cra_blocksize = 1,
1206 .base.cra_ctxsize = sizeof(struct aes_ctx),
1207 .base.cra_module = THIS_MODULE,
1208 .base.cra_list = LIST_HEAD_INIT(ifxdeu_ctr_basic_aes_alg.base.cra_list),
1209 .min_keysize = AES_MIN_KEY_SIZE,
1210 .max_keysize = AES_MAX_KEY_SIZE,
1211 .ivsize = AES_BLOCK_SIZE,
1212 .walksize = AES_BLOCK_SIZE,
1213 .setkey = aes_set_key_skcipher,
1214 .encrypt = ctr_basic_aes_encrypt,
1215 .decrypt = ctr_basic_aes_decrypt,
1216 };
1217
1218 /*! \fn int ctr_rfc3686_aes_encrypt(struct skcipher_req *req)
1219 * \ingroup IFX_AES_FUNCTIONS
1220 * \brief Counter mode AES (rfc3686) encrypt using linux crypto skcipher
1221 * \param req skcipher request
1222 * \return err
1223 */
1224 int ctr_rfc3686_aes_encrypt(struct skcipher_request *req)
1225 {
1226 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1227 struct skcipher_walk walk;
1228 unsigned int nbytes, enc_bytes;
1229 int err;
1230 u8 rfc3686_iv[16];
1231
1232 err = skcipher_walk_virt(&walk, req, false);
1233 nbytes = walk.nbytes;
1234
1235 /* set up counter block */
1236 memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1237 memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv, CTR_RFC3686_IV_SIZE);
1238
1239 /* initialize counter portion of counter block */
1240 *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1241 cpu_to_be32(1);
1242
1243 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1244 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
1245 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1246 rfc3686_iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
1247 nbytes &= AES_BLOCK_SIZE - 1;
1248 err = skcipher_walk_done(&walk, nbytes);
1249 }
1250
1251 /* to handle remaining bytes < AES_BLOCK_SIZE */
1252 if (walk.nbytes) {
1253 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1254 rfc3686_iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
1255 err = skcipher_walk_done(&walk, 0);
1256 }
1257
1258 return err;
1259 }
1260
1261 /*! \fn int ctr_rfc3686_aes_decrypt(struct skcipher_req *req)
1262 * \ingroup IFX_AES_FUNCTIONS
1263 * \brief Counter mode AES (rfc3686) decrypt using linux crypto skcipher
1264 * \param req skcipher request
1265 * \return err
1266 */
1267 int ctr_rfc3686_aes_decrypt(struct skcipher_request *req)
1268 {
1269 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1270 struct skcipher_walk walk;
1271 unsigned int nbytes, dec_bytes;
1272 int err;
1273 u8 rfc3686_iv[16];
1274
1275 err = skcipher_walk_virt(&walk, req, false);
1276 nbytes = walk.nbytes;
1277
1278 /* set up counter block */
1279 memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1280 memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv, CTR_RFC3686_IV_SIZE);
1281
1282 /* initialize counter portion of counter block */
1283 *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1284 cpu_to_be32(1);
1285
1286 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1287 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1288 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1289 rfc3686_iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1290 nbytes &= AES_BLOCK_SIZE - 1;
1291 err = skcipher_walk_done(&walk, nbytes);
1292 }
1293
1294 /* to handle remaining bytes < AES_BLOCK_SIZE */
1295 if (walk.nbytes) {
1296 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1297 rfc3686_iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1298 err = skcipher_walk_done(&walk, 0);
1299 }
1300
1301 return err;
1302 }
1303
1304 /*
1305 * \brief AES function mappings
1306 */
1307 struct skcipher_alg ifxdeu_ctr_rfc3686_aes_alg = {
1308 .base.cra_name = "rfc3686(ctr(aes))",
1309 .base.cra_driver_name = "ifxdeu-ctr-rfc3686(aes)",
1310 .base.cra_priority = 400,
1311 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1312 .base.cra_blocksize = 1,
1313 .base.cra_ctxsize = sizeof(struct aes_ctx),
1314 .base.cra_module = THIS_MODULE,
1315 .base.cra_list = LIST_HEAD_INIT(ifxdeu_ctr_rfc3686_aes_alg.base.cra_list),
1316 .min_keysize = CTR_RFC3686_MIN_KEY_SIZE,
1317 .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
1318 .ivsize = CTR_RFC3686_IV_SIZE,
1319 .walksize = AES_BLOCK_SIZE,
1320 .setkey = ctr_rfc3686_aes_set_key_skcipher,
1321 .encrypt = ctr_rfc3686_aes_encrypt,
1322 .decrypt = ctr_rfc3686_aes_decrypt,
1323 };
1324
1325 static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final);
1326
1327 /*! \fn static void aes_cbcmac_transform(struct shash_desc *desc, u8 const *in)
1328 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1329 * \brief save input block to context
1330 * \param desc linux crypto shash descriptor
1331 * \param in 16-byte block of input
1332 */
1333 static void aes_cbcmac_transform(struct shash_desc *desc, u8 const *in)
1334 {
1335 struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
1336
1337 if ( ((mctx->dbn)+1) > AES_CBCMAC_DBN_TEMP_SIZE )
1338 {
1339 //printk("aes_cbcmac_DBN_TEMP_SIZE exceeded\n");
1340 aes_cbcmac_final_impl(desc, (u8 *)mctx->hash, false);
1341 }
1342
1343 memcpy(&mctx->temp[mctx->dbn], in, 16); //dbn workaround
1344 mctx->dbn += 1;
1345 }
1346
1347 /*! \fn int aes_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
1348 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1349 * \brief sets cbcmac aes key
1350 * \param tfm linux crypto shash transform
1351 * \param key input key
1352 * \param keylen key
1353 */
1354 static int aes_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
1355 {
1356 return aes_set_key(crypto_shash_tfm(tfm), key, keylen);
1357
1358 return 0;
1359 }
1360
1361 /*! \fn void aes_cbcmac_init(struct shash_desc *desc)
1362 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1363 * \brief initialize md5 hmac context
1364 * \param desc linux crypto shash descriptor
1365 */
1366 static int aes_cbcmac_init(struct shash_desc *desc)
1367 {
1368
1369 struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
1370
1371 mctx->dbn = 0; //dbn workaround
1372 mctx->started = 0;
1373 mctx->byte_count = 0;
1374 memset(mctx->hash, 0, AES_BLOCK_SIZE);
1375
1376 return 0;
1377 }
1378
1379 /*! \fn void aes_cbcmac_update(struct shash_desc *desc, const u8 *data, unsigned int len)
1380 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1381 * \brief on-the-fly cbcmac aes computation
1382 * \param desc linux crypto shash descriptor
1383 * \param data input data
1384 * \param len size of input data
1385 */
1386 static int aes_cbcmac_update(struct shash_desc *desc, const u8 *data, unsigned int len)
1387 {
1388 struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
1389 const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x0f);
1390
1391 mctx->byte_count += len;
1392
1393 if (avail > len) {
1394 memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
1395 data, len);
1396 return 0;
1397 }
1398
1399 memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
1400 data, avail);
1401
1402 aes_cbcmac_transform(desc, mctx->block);
1403 data += avail;
1404 len -= avail;
1405
1406 while (len >= sizeof(mctx->block)) {
1407 memcpy(mctx->block, data, sizeof(mctx->block));
1408 aes_cbcmac_transform(desc, mctx->block);
1409 data += sizeof(mctx->block);
1410 len -= sizeof(mctx->block);
1411 }
1412
1413 memcpy(mctx->block, data, len);
1414 return 0;
1415 }
1416
1417 /*! \fn static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final)
1418 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1419 * \brief compute final or intermediate md5 hmac value
1420 * \param desc linux crypto shash descriptor
1421 * \param out final cbcmac aes output value
1422 * \param in finalize or intermediate processing
1423 */
1424 static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final)
1425 {
1426 struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
1427 const unsigned int offset = mctx->byte_count & 0x0f;
1428 char *p = (char *)mctx->block + offset;
1429 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
1430 unsigned long flag;
1431 int i = 0;
1432 int dbn;
1433 u32 *in = mctx->temp[0];
1434
1435 CRTCL_SECT_START;
1436
1437 aes_set_key_hw (mctx);
1438
1439 aes->controlr.E_D = !CRYPTO_DIR_ENCRYPT; //encryption
1440 aes->controlr.O = 1; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
1441
1442 //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
1443
1444 //printk("\ndbn = %d\n", mctx->dbn);
1445
1446 if (mctx->started) {
1447 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) mctx->hash);
1448 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 1));
1449 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 2));
1450 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 3));
1451 } else {
1452 mctx->started = 1;
1453 aes->IV3R = 0;
1454 aes->IV2R = 0;
1455 aes->IV1R = 0;
1456 aes->IV0R = 0;
1457 }
1458
1459 i = 0;
1460 for (dbn = 0; dbn < mctx->dbn; dbn++)
1461 {
1462 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 0));
1463 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 1));
1464 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 2));
1465 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 3)); /* start crypto */
1466
1467 while (aes->controlr.BUS) {
1468 // this will not take long
1469 }
1470
1471 in += 4;
1472 }
1473
1474 *((u32 *) mctx->hash) = DEU_ENDIAN_SWAP(aes->IV3R);
1475 *((u32 *) mctx->hash + 1) = DEU_ENDIAN_SWAP(aes->IV2R);
1476 *((u32 *) mctx->hash + 2) = DEU_ENDIAN_SWAP(aes->IV1R);
1477 *((u32 *) mctx->hash + 3) = DEU_ENDIAN_SWAP(aes->IV0R);
1478
1479 if (hash_final && offset) {
1480 aes->controlr.O = 0; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
1481 crypto_xor(mctx->block, mctx->hash, offset);
1482
1483 memcpy(p, mctx->hash + offset, (AES_BLOCK_SIZE - offset));
1484
1485 aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 0));
1486 aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 1));
1487 aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 2));
1488 aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 3)); /* start crypto */
1489
1490 while (aes->controlr.BUS) {
1491 // this will not take long
1492 }
1493
1494 *((u32 *) mctx->hash) = DEU_ENDIAN_SWAP(aes->OD3R);
1495 *((u32 *) mctx->hash + 1) = DEU_ENDIAN_SWAP(aes->OD2R);
1496 *((u32 *) mctx->hash + 2) = DEU_ENDIAN_SWAP(aes->OD1R);
1497 *((u32 *) mctx->hash + 3) = DEU_ENDIAN_SWAP(aes->OD0R);
1498 }
1499
1500 CRTCL_SECT_END;
1501
1502 if (hash_final) {
1503 memcpy(out, mctx->hash, AES_BLOCK_SIZE);
1504 /* reset the context after we finish with the hash */
1505 aes_cbcmac_init(desc);
1506 } else {
1507 mctx->dbn = 0;
1508 }
1509 return 0;
1510 }
1511
1512 /*! \fn static int aes_cbcmac_final(struct crypto_tfm *tfm, u8 *out)
1513 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1514 * \brief call aes_cbcmac_final_impl with hash_final true
1515 * \param tfm linux crypto algo transform
1516 * \param out final md5 hmac output value
1517 */
1518 static int aes_cbcmac_final(struct shash_desc *desc, u8 *out)
1519 {
1520 return aes_cbcmac_final_impl(desc, out, true);
1521 }
1522
1523 /*! \fn void aes_cbcmac_init_tfm(struct crypto_tfm *tfm)
1524 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1525 * \brief initialize pointers in aes_ctx
1526 * \param tfm linux crypto shash transform
1527 */
1528 static int aes_cbcmac_init_tfm(struct crypto_tfm *tfm)
1529 {
1530 struct aes_ctx *mctx = crypto_tfm_ctx(tfm);
1531 mctx->temp = kzalloc(AES_BLOCK_SIZE * AES_CBCMAC_DBN_TEMP_SIZE, GFP_KERNEL);
1532 if (IS_ERR(mctx->temp)) return PTR_ERR(mctx->temp);
1533
1534 return 0;
1535 }
1536
1537 /*! \fn void aes_cbcmac_exit_tfm(struct crypto_tfm *tfm)
1538 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1539 * \brief free pointers in aes_ctx
1540 * \param tfm linux crypto shash transform
1541 */
1542 static void aes_cbcmac_exit_tfm(struct crypto_tfm *tfm)
1543 {
1544 struct aes_ctx *mctx = crypto_tfm_ctx(tfm);
1545 kfree(mctx->temp);
1546 }
1547
1548 /*
1549 * \brief aes_cbcmac function mappings
1550 */
1551 static struct shash_alg ifxdeu_cbcmac_aes_alg = {
1552 .digestsize = AES_BLOCK_SIZE,
1553 .init = aes_cbcmac_init,
1554 .update = aes_cbcmac_update,
1555 .final = aes_cbcmac_final,
1556 .setkey = aes_cbcmac_setkey,
1557 .descsize = sizeof(struct aes_ctx),
1558 .base = {
1559 .cra_name = "cbcmac(aes)",
1560 .cra_driver_name= "ifxdeu-cbcmac(aes)",
1561 .cra_priority = 400,
1562 .cra_ctxsize = sizeof(struct aes_ctx),
1563 .cra_flags = CRYPTO_ALG_TYPE_HASH | CRYPTO_ALG_KERN_DRIVER_ONLY,
1564 .cra_blocksize = 1,
1565 .cra_module = THIS_MODULE,
1566 .cra_init = aes_cbcmac_init_tfm,
1567 .cra_exit = aes_cbcmac_exit_tfm,
1568 }
1569 };
1570
1571 /*! \fn int aes_set_key_aead (struct crypto_aead *aead, const uint8_t *in_key, unsigned int key_len)
1572 * \ingroup IFX_AES_FUNCTIONS
1573 * \brief sets the AES keys for aead gcm
1574 * \param aead linux crypto aead
1575 * \param in_key input key
1576 * \param key_len key lengths of 16, 24 and 32 bytes supported
1577 * \return -EINVAL - bad key length, 0 - SUCCESS
1578 */
1579 int aes_set_key_aead (struct crypto_aead *aead, const u8 *in_key, unsigned int key_len)
1580 {
1581 struct aes_ctx *ctx = crypto_aead_ctx(aead);
1582 int err;
1583
1584 err = aes_set_key(&aead->base, in_key, key_len);
1585 if (err) return err;
1586
1587 memset(ctx->block, 0, sizeof(ctx->block));
1588 memset(ctx->lastbuffer, 0, AES_BLOCK_SIZE);
1589 ifx_deu_aes_ctr(ctx, ctx->block, ctx->block,
1590 ctx->lastbuffer, AES_BLOCK_SIZE, CRYPTO_DIR_ENCRYPT, 0);
1591 if (ctx->gf128) gf128mul_free_4k(ctx->gf128);
1592 ctx->gf128 = gf128mul_init_4k_lle((be128 *)ctx->block);
1593
1594 return err;
1595 }
1596
1597 /*! \fn int gcm_aes_setauthsize (struct crypto_aead *aead, unsigned int authsize)
1598 * \ingroup IFX_AES_FUNCTIONS
1599 * \brief sets the AES keys for aead gcm
1600 * \param aead linux crypto aead
1601 * \param in_key input authsize
1602 * \return -EINVAL - bad authsize length, 0 - SUCCESS
1603 */
1604 int gcm_aes_setauthsize (struct crypto_aead *aead, unsigned int authsize)
1605 {
1606 return crypto_gcm_check_authsize(authsize);
1607 }
1608
1609 /*! \fn int gcm_aes_encrypt(struct aead_request *req)
1610 * \ingroup IFX_AES_FUNCTIONS
1611 * \brief GCM AES encrypt using linux crypto aead
1612 * \param req aead request
1613 * \return err
1614 */
1615 int gcm_aes_encrypt(struct aead_request *req)
1616 {
1617 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1618 struct skcipher_walk walk;
1619 struct skcipher_request request;
1620 int err;
1621 unsigned int enc_bytes, nbytes;
1622 be128 lengths;
1623 u8 iv[AES_BLOCK_SIZE];
1624
1625 lengths.a = cpu_to_be64(req->assoclen * 8);
1626 lengths.b = cpu_to_be64(req->cryptlen * 8);
1627
1628 memset(ctx->hash, 0, sizeof(ctx->hash));
1629 memset(ctx->block, 0, sizeof(ctx->block));
1630 memcpy(iv, req->iv, GCM_AES_IV_SIZE);
1631 *(__be32 *)((void *)iv + GCM_AES_IV_SIZE) = cpu_to_be32(1);
1632 ifx_deu_aes_ctr(ctx, ctx->block, ctx->block,
1633 iv, 16, CRYPTO_DIR_ENCRYPT, 0);
1634
1635 request.cryptlen = req->cryptlen + req->assoclen;
1636 request.src = req->src;
1637 request.dst = req->dst;
1638 request.base = req->base;
1639
1640 crypto_skcipher_alg(crypto_skcipher_reqtfm(&request))->walksize = AES_BLOCK_SIZE;
1641
1642 if (req->assoclen && (req->assoclen < AES_BLOCK_SIZE))
1643 crypto_skcipher_alg(crypto_skcipher_reqtfm(&request))->walksize = req->assoclen;
1644
1645 err = skcipher_walk_virt(&walk, &request, false);
1646
1647 //process assoc data if available
1648 if (req->assoclen > 0) {
1649 unsigned int assoc_remain, ghashlen;
1650
1651 assoc_remain = req->assoclen;
1652 ghashlen = min(req->assoclen, walk.nbytes);
1653 while ((nbytes = enc_bytes = ghashlen) && (ghashlen >= AES_BLOCK_SIZE)) {
1654 u8 *temp;
1655 if (nbytes > req->assoclen) nbytes = enc_bytes = req->assoclen;
1656 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
1657 memcpy(walk.dst.virt.addr, walk.src.virt.addr, enc_bytes);
1658 assoc_remain -= enc_bytes;
1659 temp = walk.dst.virt.addr;
1660 while (enc_bytes > 0) {
1661 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)temp);
1662 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1663 enc_bytes -= AES_BLOCK_SIZE;
1664 temp += 16;
1665 }
1666 if (assoc_remain < AES_BLOCK_SIZE) walk.stride = assoc_remain;
1667 if (assoc_remain == 0) walk.stride = AES_BLOCK_SIZE;
1668 enc_bytes = nbytes - (nbytes % AES_BLOCK_SIZE);
1669 err = skcipher_walk_done(&walk, (walk.nbytes - enc_bytes));
1670 ghashlen = min(assoc_remain, walk.nbytes);
1671 }
1672
1673 if ((enc_bytes = ghashlen)) {
1674 memcpy(ctx->lastbuffer, walk.src.virt.addr, enc_bytes);
1675 memset(ctx->lastbuffer + enc_bytes, 0, (AES_BLOCK_SIZE - enc_bytes));
1676 memcpy(walk.dst.virt.addr, walk.src.virt.addr, ghashlen);
1677 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->lastbuffer);
1678 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1679 walk.stride = AES_BLOCK_SIZE;
1680 err = skcipher_walk_done(&walk, (walk.nbytes - ghashlen));
1681 }
1682 }
1683
1684 //crypt and hash
1685 while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1686 u8 *temp;
1687 enc_bytes -= (nbytes % AES_BLOCK_SIZE);
1688 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1689 iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
1690 nbytes &= AES_BLOCK_SIZE - 1;
1691 temp = walk.dst.virt.addr;
1692 while (enc_bytes) {
1693 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)temp);
1694 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1695 enc_bytes -= AES_BLOCK_SIZE;
1696 temp += 16;
1697 }
1698 err = skcipher_walk_done(&walk, nbytes);
1699 }
1700
1701 /* crypt and hash remaining bytes < AES_BLOCK_SIZE */
1702 if ((enc_bytes = walk.nbytes)) {
1703 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1704 iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
1705 memcpy(ctx->lastbuffer, walk.dst.virt.addr, enc_bytes);
1706 memset(ctx->lastbuffer + enc_bytes, 0, (AES_BLOCK_SIZE - enc_bytes));
1707 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->lastbuffer);
1708 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1709 err = skcipher_walk_done(&walk, 0);
1710 }
1711
1712 //finalize and copy hash
1713 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)&lengths);
1714 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1715 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->block);
1716 scatterwalk_map_and_copy(ctx->hash, req->dst, req->cryptlen + req->assoclen, crypto_aead_authsize(crypto_aead_reqtfm(req)), 1);
1717
1718 aead_request_complete(req, 0);
1719
1720 return err;
1721 }
1722
1723 /*! \fn int gcm_aes_decrypt(struct aead_request *req)
1724 * \ingroup IFX_AES_FUNCTIONS
1725 * \brief GCM AES decrypt using linux crypto aead
1726 * \param req aead request
1727 * \return err
1728 */
1729 int gcm_aes_decrypt(struct aead_request *req)
1730 {
1731 struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1732 struct skcipher_walk walk;
1733 struct skcipher_request request;
1734 int err;
1735 unsigned int dec_bytes, nbytes, authsize;
1736 be128 lengths;
1737 u8 iv[AES_BLOCK_SIZE];
1738
1739 authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
1740
1741 lengths.a = cpu_to_be64(req->assoclen * 8);
1742 lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8);
1743
1744 memset(ctx->hash, 0, sizeof(ctx->hash));
1745 memset(ctx->block, 0, sizeof(ctx->block));
1746 memcpy(iv, req->iv, GCM_AES_IV_SIZE);
1747 *(__be32 *)((void *)iv + GCM_AES_IV_SIZE) = cpu_to_be32(1);
1748 ifx_deu_aes_ctr(ctx, ctx->block, ctx->block,
1749 iv, 16, CRYPTO_DIR_ENCRYPT, 0);
1750
1751 request.cryptlen = req->cryptlen + req->assoclen - authsize;
1752 request.src = req->src;
1753 request.dst = req->dst;
1754 request.base = req->base;
1755 crypto_skcipher_alg(crypto_skcipher_reqtfm(&request))->walksize = AES_BLOCK_SIZE;
1756
1757 if (req->assoclen && (req->assoclen < AES_BLOCK_SIZE))
1758 crypto_skcipher_alg(crypto_skcipher_reqtfm(&request))->walksize = req->assoclen;
1759
1760 err = skcipher_walk_virt(&walk, &request, false);
1761
1762 //process assoc data if available
1763 if (req->assoclen > 0) {
1764 unsigned int assoc_remain, ghashlen;
1765
1766 assoc_remain = req->assoclen;
1767 ghashlen = min(req->assoclen, walk.nbytes);
1768 while ((nbytes = dec_bytes = ghashlen) && (ghashlen >= AES_BLOCK_SIZE)) {
1769 u8 *temp;
1770 if (nbytes > req->assoclen) nbytes = dec_bytes = req->assoclen;
1771 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1772 memcpy(walk.dst.virt.addr, walk.src.virt.addr, dec_bytes);
1773 assoc_remain -= dec_bytes;
1774 temp = walk.dst.virt.addr;
1775 while (dec_bytes > 0) {
1776 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)temp);
1777 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1778 dec_bytes -= AES_BLOCK_SIZE;
1779 temp += 16;
1780 }
1781 if (assoc_remain < AES_BLOCK_SIZE) walk.stride = assoc_remain;
1782 if (assoc_remain == 0) walk.stride = AES_BLOCK_SIZE;
1783 dec_bytes = nbytes - (nbytes % AES_BLOCK_SIZE);
1784 err = skcipher_walk_done(&walk, (walk.nbytes - dec_bytes));
1785 ghashlen = min(assoc_remain, walk.nbytes);
1786 }
1787
1788 if ((dec_bytes = ghashlen)) {
1789 memcpy(ctx->lastbuffer, walk.src.virt.addr, dec_bytes);
1790 memset(ctx->lastbuffer + dec_bytes, 0, (AES_BLOCK_SIZE - dec_bytes));
1791 memcpy(walk.dst.virt.addr, walk.src.virt.addr, ghashlen);
1792 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->lastbuffer);
1793 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1794 walk.stride = AES_BLOCK_SIZE;
1795 err = skcipher_walk_done(&walk, (walk.nbytes - ghashlen));
1796 }
1797 }
1798
1799 //crypt and hash
1800 while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
1801 u8 *temp;
1802 dec_bytes -= (nbytes % AES_BLOCK_SIZE);
1803 temp = walk.src.virt.addr;
1804 while (dec_bytes) {
1805 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)temp);
1806 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1807 dec_bytes -= AES_BLOCK_SIZE;
1808 temp += 16;
1809 }
1810 dec_bytes = nbytes - (nbytes % AES_BLOCK_SIZE);
1811 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1812 iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
1813 nbytes &= AES_BLOCK_SIZE - 1;
1814 err = skcipher_walk_done(&walk, nbytes);
1815 }
1816
1817 /* crypt and hash remaining bytes < AES_BLOCK_SIZE */
1818 if ((dec_bytes = walk.nbytes)) {
1819 memcpy(ctx->lastbuffer, walk.src.virt.addr, dec_bytes);
1820 memset(ctx->lastbuffer + dec_bytes, 0, (AES_BLOCK_SIZE - dec_bytes));
1821 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->lastbuffer);
1822 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1823 ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1824 iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
1825 err = skcipher_walk_done(&walk, 0);
1826 }
1827
1828 //finalize and copy hash
1829 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)&lengths);
1830 gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
1831 u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->block);
1832
1833 scatterwalk_map_and_copy(ctx->lastbuffer, req->src, req->cryptlen + req->assoclen - authsize, authsize, 0);
1834 err = crypto_memneq(ctx->lastbuffer, ctx->hash, authsize) ? -EBADMSG : 0;
1835
1836 aead_request_complete(req, 0);
1837
1838 return err;
1839 }
1840
1841 /*! \fn void aes_gcm_exit_tfm(struct crypto_tfm *tfm)
1842 * \ingroup IFX_aes_cbcmac_FUNCTIONS
1843 * \brief free pointers in aes_ctx
1844 * \param tfm linux crypto shash transform
1845 */
1846 static void aes_gcm_exit_tfm(struct crypto_tfm *tfm)
1847 {
1848 struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
1849 if (ctx->gf128) gf128mul_free_4k(ctx->gf128);
1850 }
1851
1852 /*
1853 * \brief AES function mappings
1854 */
1855 struct aead_alg ifxdeu_gcm_aes_alg = {
1856 .base.cra_name = "gcm(aes)",
1857 .base.cra_driver_name = "ifxdeu-gcm(aes)",
1858 .base.cra_priority = 400,
1859 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
1860 .base.cra_blocksize = 1,
1861 .base.cra_ctxsize = sizeof(struct aes_ctx),
1862 .base.cra_module = THIS_MODULE,
1863 .base.cra_list = LIST_HEAD_INIT(ifxdeu_gcm_aes_alg.base.cra_list),
1864 .base.cra_exit = aes_gcm_exit_tfm,
1865 .ivsize = GCM_AES_IV_SIZE,
1866 .maxauthsize = AES_BLOCK_SIZE,
1867 .chunksize = AES_BLOCK_SIZE,
1868 .setkey = aes_set_key_aead,
1869 .encrypt = gcm_aes_encrypt,
1870 .decrypt = gcm_aes_decrypt,
1871 .setauthsize = gcm_aes_setauthsize,
1872 };
1873
1874 /*! \fn int ifxdeu_init_aes (void)
1875 * \ingroup IFX_AES_FUNCTIONS
1876 * \brief function to initialize AES driver
1877 * \return ret
1878 */
1879 int ifxdeu_init_aes (void)
1880 {
1881 int ret = -ENOSYS;
1882
1883 aes_chip_init();
1884
1885 if ((ret = crypto_register_alg(&ifxdeu_aes_alg)))
1886 goto aes_err;
1887
1888 if ((ret = crypto_register_skcipher(&ifxdeu_ecb_aes_alg)))
1889 goto ecb_aes_err;
1890
1891 if ((ret = crypto_register_skcipher(&ifxdeu_cbc_aes_alg)))
1892 goto cbc_aes_err;
1893
1894 if ((ret = crypto_register_skcipher(&ifxdeu_xts_aes_alg)))
1895 goto xts_aes_err;
1896
1897 if ((ret = crypto_register_skcipher(&ifxdeu_ofb_aes_alg)))
1898 goto ofb_aes_err;
1899
1900 if ((ret = crypto_register_skcipher(&ifxdeu_cfb_aes_alg)))
1901 goto cfb_aes_err;
1902
1903 if ((ret = crypto_register_skcipher(&ifxdeu_ctr_basic_aes_alg)))
1904 goto ctr_basic_aes_err;
1905
1906 if ((ret = crypto_register_skcipher(&ifxdeu_ctr_rfc3686_aes_alg)))
1907 goto ctr_rfc3686_aes_err;
1908
1909 if ((ret = crypto_register_shash(&ifxdeu_cbcmac_aes_alg)))
1910 goto cbcmac_aes_err;
1911
1912 if ((ret = crypto_register_aead(&ifxdeu_gcm_aes_alg)))
1913 goto gcm_aes_err;
1914
1915 CRTCL_SECT_INIT;
1916
1917
1918 printk (KERN_NOTICE "IFX DEU AES initialized%s%s.\n", disable_multiblock ? "" : " (multiblock)", disable_deudma ? "" : " (DMA)");
1919 return ret;
1920
1921 gcm_aes_err:
1922 crypto_unregister_aead(&ifxdeu_gcm_aes_alg);
1923 printk (KERN_ERR "IFX gcm_aes initialization failed!\n");
1924 return ret;
1925 cbcmac_aes_err:
1926 crypto_unregister_shash(&ifxdeu_cbcmac_aes_alg);
1927 printk (KERN_ERR "IFX cbcmac_aes initialization failed!\n");
1928 return ret;
1929 ctr_rfc3686_aes_err:
1930 crypto_unregister_skcipher(&ifxdeu_ctr_rfc3686_aes_alg);
1931 printk (KERN_ERR "IFX ctr_rfc3686_aes initialization failed!\n");
1932 return ret;
1933 ctr_basic_aes_err:
1934 crypto_unregister_skcipher(&ifxdeu_ctr_basic_aes_alg);
1935 printk (KERN_ERR "IFX ctr_basic_aes initialization failed!\n");
1936 return ret;
1937 cfb_aes_err:
1938 crypto_unregister_skcipher(&ifxdeu_cfb_aes_alg);
1939 printk (KERN_ERR "IFX cfb_aes initialization failed!\n");
1940 return ret;
1941 ofb_aes_err:
1942 crypto_unregister_skcipher(&ifxdeu_ofb_aes_alg);
1943 printk (KERN_ERR "IFX ofb_aes initialization failed!\n");
1944 return ret;
1945 xts_aes_err:
1946 crypto_unregister_skcipher(&ifxdeu_xts_aes_alg);
1947 printk (KERN_ERR "IFX xts_aes initialization failed!\n");
1948 return ret;
1949 cbc_aes_err:
1950 crypto_unregister_skcipher(&ifxdeu_cbc_aes_alg);
1951 printk (KERN_ERR "IFX cbc_aes initialization failed!\n");
1952 return ret;
1953 ecb_aes_err:
1954 crypto_unregister_skcipher(&ifxdeu_ecb_aes_alg);
1955 printk (KERN_ERR "IFX aes initialization failed!\n");
1956 return ret;
1957 aes_err:
1958 printk(KERN_ERR "IFX DEU AES initialization failed!\n");
1959
1960 return ret;
1961 }
1962
1963 /*! \fn void ifxdeu_fini_aes (void)
1964 * \ingroup IFX_AES_FUNCTIONS
1965 * \brief unregister aes driver
1966 */
1967 void ifxdeu_fini_aes (void)
1968 {
1969 crypto_unregister_alg (&ifxdeu_aes_alg);
1970 crypto_unregister_skcipher (&ifxdeu_ecb_aes_alg);
1971 crypto_unregister_skcipher (&ifxdeu_cbc_aes_alg);
1972 crypto_unregister_skcipher (&ifxdeu_xts_aes_alg);
1973 crypto_unregister_skcipher (&ifxdeu_ofb_aes_alg);
1974 crypto_unregister_skcipher (&ifxdeu_cfb_aes_alg);
1975 crypto_unregister_skcipher (&ifxdeu_ctr_basic_aes_alg);
1976 crypto_unregister_skcipher (&ifxdeu_ctr_rfc3686_aes_alg);
1977 crypto_unregister_shash (&ifxdeu_cbcmac_aes_alg);
1978 crypto_unregister_aead (&ifxdeu_gcm_aes_alg);
1979 }