* adds arv4518pw mach support * fixes arv4525pw * make sure all mach names have the...
[openwrt/openwrt.git] / target / linux / lantiq / patches / 270-crypto.patch
1 --- a/drivers/crypto/Kconfig
2 +++ b/drivers/crypto/Kconfig
3 @@ -243,4 +243,75 @@
4 OMAP processors have SHA1/MD5 hw accelerator. Select this if you
5 want to use the OMAP module for SHA1/MD5 algorithms.
6
7 +config CRYPTO_DEV_LANTIQ
8 + bool "Support for Lantiq crypto engine"
9 + select CRYPTO_ALGAPI
10 + default y
11 + help
12 + Will support Lantiq crypto hardware
13 + If you are unsure, say M.
14 +
15 +menuconfig CRYPTO_DEV_LANTIQ_DES
16 + bool "Lantiq crypto hardware for DES algorithm"
17 + depends on CRYPTO_DEV_LANTIQ
18 + select CRYPTO_BLKCIPHER
19 + default y
20 + help
21 + Use crypto hardware for DES/3DES algorithm.
22 + If unsure say N.
23 +
24 +menuconfig CRYPTO_DEV_LANTIQ_AES
25 + bool "Lantiq crypto hardware for AES algorithm"
26 + depends on CRYPTO_DEV_LANTIQ
27 + select CRYPTO_BLKCIPHER
28 + default y
29 + help
30 + Use crypto hardware for AES algorithm.
31 + If unsure say N.
32 +
33 +menuconfig CRYPTO_DEV_LANTIQ_ARC4
34 + bool "Lantiq crypto hardware for ARC4 algorithm"
35 + depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9)
36 + select CRYPTO_BLKCIPHER
37 + default y
38 + help
39 + Use crypto hardware for ARC4 algorithm.
40 + If unsure say N.
41 +
42 +menuconfig CRYPTO_DEV_LANTIQ_MD5
43 + bool "Lantiq crypto hardware for MD5 algorithm"
44 + depends on CRYPTO_DEV_LANTIQ
45 + select CRYPTO_BLKCIPHER
46 + default y
47 + help
48 + Use crypto hardware for MD5 algorithm.
49 + If unsure say N.
50 +
51 +menuconfig CRYPTO_DEV_LANTIQ_SHA1
52 + bool "Lantiq crypto hardware for SHA1 algorithm"
53 + depends on CRYPTO_DEV_LANTIQ
54 + select CRYPTO_BLKCIPHER
55 + default y
56 + help
57 + Use crypto hardware for SHA1 algorithm.
58 + If unsure say N.
59 +
60 +menuconfig CRYPTO_DEV_LANTIQ_SHA1_HMAC
61 + bool "Lantiq crypto hardware for SHA1_HMAC algorithm"
62 + depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9)
63 + select CRYPTO_BLKCIPHER
64 + default y
65 + help
66 + Use crypto hardware for SHA1_HMAC algorithm.
67 + If unsure say N.
68 +
69 +menuconfig CRYPTO_DEV_LANTIQ_MD5_HMAC
70 + bool "Lantiq crypto hardware for MD5_HMAC algorithms"
71 + depends on (CRYPTO_DEV_LANTIQ && IFXMIPS_AR9)
72 + select CRYPTO_BLKCIPHER
73 + default y
74 + help
75 + Use crypto hardware for MD5_HMAC algorithm.
76 + If unsure say N.
77 +
78 endif # CRYPTO_HW
79 --- /dev/null
80 +++ b/drivers/crypto/lantiq/Makefile
81 @@ -0,0 +1,11 @@
82 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu.o
83 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_falcon.o
84 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_danube.o
85 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ) += deu_ar9.o
86 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ_DES) += des.o
87 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ_AES) += aes.o
88 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) += arc4.o
89 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ_SHA1) += sha1.o
90 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC) += sha1_hmac.o
91 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ_MD5) += md5.o
92 +obj-$(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC) += md5_hmac.o
93 --- /dev/null
94 +++ b/drivers/crypto/lantiq/aes.c
95 @@ -0,0 +1,1029 @@
96 +/*
97 + * This program is free software; you can redistribute it and/or modify
98 + * it under the terms of the GNU General Public License as published by
99 + * the Free Software Foundation; either version 2 of the License, or
100 + * (at your option) any later version.
101 + *
102 + * This program is distributed in the hope that it will be useful,
103 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
104 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
105 + * GNU General Public License for more details.
106 + *
107 + * You should have received a copy of the GNU General Public License
108 + * along with this program; if not, write to the Free Software
109 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
110 + *
111 + * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
112 + * Copyright (C) 2009 Mohammad Firdaus
113 + */
114 +
115 +/**
116 + \defgroup LQ_DEU LQ_DEU_DRIVERS
117 + \ingroup API
118 + \brief Lantiq DEU driver module
119 +*/
120 +
121 +/**
122 + \file aes.c
123 + \ingroup LQ_DEU
124 + \brief AES Encryption Driver main file
125 +*/
126 +
127 +/**
128 + \defgroup LQ_AES_FUNCTIONS LQ_AES_FUNCTIONS
129 + \ingroup LQ_DEU
130 + \brief Lantiq AES driver Functions
131 +*/
132 +
133 +#include <linux/version.h>
134 +#include <linux/module.h>
135 +#include <linux/init.h>
136 +#include <linux/types.h>
137 +#include <linux/errno.h>
138 +#include <linux/crypto.h>
139 +#include <linux/interrupt.h>
140 +#include <linux/delay.h>
141 +#include <asm/byteorder.h>
142 +#include <crypto/algapi.h>
143 +#include "deu.h"
144 +
145 +#ifdef CONFIG_CRYPTO_DEV_DMA
146 +# include "deu_dma.h"
147 +#endif
148 +
149 +static spinlock_t cipher_lock;
150 +
151 +/* Definition of constants */
152 +
153 +#define AES_MIN_KEY_SIZE 16
154 +#define AES_MAX_KEY_SIZE 32
155 +#define AES_BLOCK_SIZE 16
156 +#define CTR_RFC3686_NONCE_SIZE 4
157 +#define CTR_RFC3686_IV_SIZE 8
158 +#define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE \
159 + + CTR_RFC3686_NONCE_SIZE)
160 +
161 +struct aes_ctx {
162 + int key_length;
163 + u32 buf[AES_MAX_KEY_SIZE];
164 + u8 nonce[CTR_RFC3686_NONCE_SIZE];
165 +};
166 +
167 +/** \fn int aes_set_key(struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
168 + * \ingroup LQ_AES_FUNCTIONS
169 + * \brief sets the AES keys
170 + * \param tfm linux crypto algo transform
171 + * \param in_key input key
172 + * \param key_len key lengths of 16, 24 and 32 bytes supported
173 + * \return -EINVAL - bad key length, 0 - SUCCESS
174 +*/
175 +static int aes_set_key(struct crypto_tfm *tfm,
176 + const u8 *in_key,
177 + unsigned int key_len)
178 +{
179 + struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
180 + u32 *flags = &tfm->crt_flags;
181 +
182 + DPRINTF(0, "ctx @%p, key_len %d\n", ctx, key_len);
183 +
184 + if (key_len != 16 && key_len != 24 && key_len != 32) {
185 + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
186 + return -EINVAL;
187 + }
188 +
189 + ctx->key_length = key_len;
190 + memcpy((u8 *)(ctx->buf), in_key, key_len);
191 +
192 + return 0;
193 +}
194 +
195 +#ifndef CONFIG_CRYPTO_DEV_DMA
196 +/** \fn void deu_aes(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode)
197 + * \ingroup LQ_AES_FUNCTIONS
198 + * \brief main interface to AES hardware
199 + * \param ctx_arg crypto algo context
200 + * \param out_arg output bytestream
201 + * \param in_arg input bytestream
202 + * \param iv_arg initialization vector
203 + * \param nbytes length of bytestream
204 + * \param encdec 1 for encrypt; 0 for decrypt
205 + * \param mode operation mode such as ebc, cbc, ctr
206 + *
207 +*/
208 +static void deu_aes(void *ctx_arg,
209 + u8 *out_arg,
210 + const u8 *in_arg,
211 + u8 *iv_arg,
212 + size_t nbytes,
213 + int encdec,
214 + int mode)
215 +#else
216 +
217 +/** \fn void deu_aes_core(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode)
218 + * \ingroup LQ_AES_FUNCTIONS
219 + * \brief main interface to AES hardware
220 + * \param ctx_arg crypto algo context
221 + * \param out_arg output bytestream
222 + * \param in_arg input bytestream
223 + * \param iv_arg initialization vector
224 + * \param nbytes length of bytestream
225 + * \param encdec 1 for encrypt; 0 for decrypt
226 + * \param mode operation mode such as ebc, cbc, ctr
227 + *
228 +*/
229 +static void deu_aes_core(void *ctx_arg,
230 + u8 *out_arg,
231 + const u8 *in_arg,
232 + u8 *iv_arg,
233 + size_t nbytes,
234 + int encdec,
235 + int mode)
236 +#endif
237 +
238 +{
239 + /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
240 + volatile struct deu_aes *aes = (volatile struct deu_aes *)AES_START;
241 + struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
242 + u32 *in_key = ctx->buf;
243 + ulong flag;
244 + /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
245 + int key_len = ctx->key_length;
246 +
247 +#ifndef CONFIG_CRYPTO_DEV_DMA
248 + int i = 0;
249 + int byte_cnt = nbytes;
250 +#else
251 + volatile struct deu_dma *dma = (struct deu_dma *)LQ_DEU_DMA_CON;
252 + struct dma_device_info *dma_device = lq_deu[0].dma_device;
253 + /* struct deu_drv_priv *deu_priv =
254 + * (struct deu_drv_priv *)dma_device->priv; */
255 + int wlen = 0;
256 + u32 *outcopy = NULL;
257 + u32 *dword_mem_aligned_in = NULL;
258 +
259 +# ifdef CONFIG_CRYPTO_DEV_POLL_DMA
260 + u32 timeout = 0;
261 + u32 *out_dma = NULL;
262 +# endif
263 +#endif
264 +
265 + DPRINTF(0, "ctx @%p, mode %d, encdec %d\n", ctx, mode, encdec);
266 +
267 + CRTCL_SECT_START;
268 +
269 + /* 128, 192 or 256 bit key length */
270 + aes->ctrl.K = key_len / 8 - 2;
271 + if (key_len == 128 / 8) {
272 + aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0));
273 + aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1));
274 + aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2));
275 + aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3));
276 + }
277 + else if (key_len == 192 / 8) {
278 + aes->K5R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0));
279 + aes->K4R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1));
280 + aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2));
281 + aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3));
282 + aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 4));
283 + aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 5));
284 + }
285 + else if (key_len == 256 / 8) {
286 + aes->K7R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 0));
287 + aes->K6R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 1));
288 + aes->K5R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 2));
289 + aes->K4R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 3));
290 + aes->K3R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 4));
291 + aes->K2R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 5));
292 + aes->K1R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 6));
293 + aes->K0R = DEU_ENDIAN_SWAP(*((u32 *)in_key + 7));
294 + }
295 + else {
296 + CRTCL_SECT_END;
297 + return; /* -EINVAL; */
298 + }
299 +
300 + /* let HW pre-process DEcryption key in any case (even if
301 + ENcryption is used). Key Valid (KV) bit is then only
302 + checked in decryption routine! */
303 + aes->ctrl.PNK = 1;
304 +
305 +#ifdef CONFIG_CRYPTO_DEV_DMA
306 + while (aes->ctrl.BUS) {
307 + /* this will not take long */
308 + }
309 + AES_DMA_MISC_CONFIG();
310 +#endif
311 +
312 + aes->ctrl.E_D = !encdec; /* encryption */
313 + aes->ctrl.O = mode; /* 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */
314 + aes->ctrl.SM = 1; /* start after writing input register */
315 + aes->ctrl.DAU = 0; /* Disable Automatic Update of init
316 + vector */
317 + aes->ctrl.ARS = 1; /* Autostart Select - write to IHR */
318 +
319 + /* aes->ctrl.F = 128; */ /* default; only for CFB and OFB modes;
320 + change only for
321 + customer-specific apps */
322 + if (mode > 0) {
323 + aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *)iv_arg);
324 + aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 1));
325 + aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 2));
326 + aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 3));
327 + };
328 +
329 +#ifndef CONFIG_CRYPTO_DEV_DMA
330 + i = 0;
331 + while (byte_cnt >= 16) {
332 + aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 0));
333 + aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 1));
334 + aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 2));
335 + /* start crypto */
336 + aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *)in_arg + (i * 4) + 3));
337 +
338 + while (aes->ctrl.BUS) {
339 + /* this will not take long */
340 + }
341 +
342 + *((volatile u32 *)out_arg + (i * 4) + 0) = aes->OD3R;
343 + *((volatile u32 *)out_arg + (i * 4) + 1) = aes->OD2R;
344 + *((volatile u32 *)out_arg + (i * 4) + 2) = aes->OD1R;
345 + *((volatile u32 *)out_arg + (i * 4) + 3) = aes->OD0R;
346 +
347 + i++;
348 + byte_cnt -= 16;
349 + }
350 +#else /* dma */
351 + /* Prepare Rx buf length used in dma psuedo interrupt */
352 + /* deu_priv->deu_rx_buf = out_arg; */
353 + /* deu_priv->deu_rx_len = nbytes; */
354 +
355 + /* memory alignment issue */
356 + dword_mem_aligned_in = (u32 *)DEU_DWORD_REORDERING(in_arg,
357 + aes_buff_in,
358 + BUFFER_IN, nbytes);
359 +
360 + dma->ctrl.ALGO = 1; /* AES */
361 + dma->ctrl.BS = 0;
362 + aes->ctrl.DAU = 0;
363 + dma->ctrl.EN = 1;
364 +
365 + while (aes->ctrl.BUS) {
366 + /* wait for AES to be ready */
367 + };
368 +
369 + wlen = dma_device_write(dma_device, (u8 *)dword_mem_aligned_in,
370 + nbytes, NULL);
371 + if (wlen != nbytes) {
372 + dma->ctrl.EN = 0;
373 + CRTCL_SECT_END;
374 + printk(KERN_ERR "[%s %s %d]: dma_device_write fail!\n",
375 + __FILE__, __func__, __LINE__);
376 + return; /* -EINVAL; */
377 + }
378 +
379 + WAIT_AES_DMA_READY();
380 +
381 +# ifdef CONFIG_CRYPTO_DEV_POLL_DMA
382 + outcopy = (u32 *)DEU_DWORD_REORDERING(out_arg, aes_buff_out,
383 + BUFFER_OUT, nbytes);
384 +
385 + /* polling DMA rx channel */
386 + while ((dma_device_read(dma_device, (u8 **)&out_dma, NULL)) == 0) {
387 + timeout++;
388 +
389 + if (timeout >= 333000) {
390 + dma->ctrl.EN = 0;
391 + CRTCL_SECT_END;
392 + printk (KERN_ERR "[%s %s %d]: timeout!!\n",
393 + __FILE__, __func__, __LINE__);
394 + return; /* -EINVAL; */
395 + }
396 + }
397 +
398 + WAIT_AES_DMA_READY();
399 +
400 + AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes);
401 +
402 +# else /* not working at the moment.. */
403 + CRTCL_SECT_END;
404 +
405 + /* sleep and wait for Rx finished */
406 + DEU_WAIT_EVENT(deu_priv->deu_thread_wait, DEU_EVENT,
407 + deu_priv->deu_event_flags);
408 +
409 + CRTCL_SECT_START;
410 +# endif
411 +
412 +#endif /* dma */
413 +
414 + /* tc.chen : copy iv_arg back */
415 + if (mode > 0) {
416 + *((u32 *)iv_arg) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg));
417 + *((u32 *)iv_arg + 1) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 1));
418 + *((u32 *)iv_arg + 2) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 2));
419 + *((u32 *)iv_arg + 3) = DEU_ENDIAN_SWAP(*((u32 *)iv_arg + 3));
420 + }
421 +
422 + CRTCL_SECT_END;
423 +}
424 +
425 +/** \fn int ctr_rfc3686_aes_set_key(struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
426 + * \ingroup LQ_AES_FUNCTIONS
427 + * \brief sets RFC3686 key
428 + * \param tfm linux crypto algo transform
429 + * \param in_key input key
430 + * \param key_len key lengths of 20, 28 and 36 bytes supported; last 4 bytes is nonce
431 + * \return 0 - SUCCESS
432 + * -EINVAL - bad key length
433 +*/
434 +static int ctr_rfc3686_aes_set_key(struct crypto_tfm *tfm,
435 + const uint8_t *in_key,
436 + unsigned int key_len)
437 +{
438 + struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
439 + u32 *flags = &tfm->crt_flags;
440 +
441 + memcpy(ctx->nonce, in_key + (key_len - CTR_RFC3686_NONCE_SIZE),
442 + CTR_RFC3686_NONCE_SIZE);
443 +
444 + key_len -= CTR_RFC3686_NONCE_SIZE; /* remove 4 bytes of nonce */
445 +
446 + if (key_len != 16 && key_len != 24 && key_len != 32) {
447 + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
448 + return -EINVAL;
449 + }
450 +
451 + ctx->key_length = key_len;
452 +
453 + memcpy((u8 *)(ctx->buf), in_key, key_len);
454 +
455 + return 0;
456 +}
457 +
458 +/** \fn void deu_aes(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
459 + * \ingroup LQ_AES_FUNCTIONS
460 + * \brief main interface with DEU hardware in DMA mode
461 + * \param ctx_arg crypto algo context
462 + * \param out_arg output bytestream
463 + * \param in_arg input bytestream
464 + * \param iv_arg initialization vector
465 + * \param nbytes length of bytestream
466 + * \param encdec 1 for encrypt; 0 for decrypt
467 + * \param mode operation mode such as ebc, cbc, ctr
468 +*/
469 +
470 +#ifdef CONFIG_CRYPTO_DEV_DMA
471 +static void deu_aes(void *ctx_arg,
472 + u8 *out_arg,
473 + const u8 *in_arg,
474 + u8 *iv_arg,
475 + u32 nbytes,
476 + int encdec,
477 + int mode)
478 +{
479 + u32 remain = nbytes;
480 + u32 inc;
481 +
482 + while (remain > 0) {
483 + if (remain >= DEU_MAX_PACKET_SIZE)
484 + inc = DEU_MAX_PACKET_SIZE;
485 + else
486 + inc = remain;
487 +
488 + remain -= inc;
489 +
490 + deu_aes_core(ctx_arg, out_arg, in_arg, iv_arg, inc, encdec,
491 + mode);
492 +
493 + out_arg += inc;
494 + in_arg += inc;
495 + }
496 +}
497 +#endif
498 +
499 +/* definitions from linux/include/crypto.h:
500 +#define CRYPTO_TFM_MODE_ECB 0x00000001
501 +#define CRYPTO_TFM_MODE_CBC 0x00000002
502 +#define CRYPTO_TFM_MODE_CFB 0x00000004
503 +#define CRYPTO_TFM_MODE_CTR 0x00000008
504 +#define CRYPTO_TFM_MODE_OFB 0x00000010
505 +but hardware definition: 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */
506 +
507 +/** \fn void deu_aes_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
508 + * \ingroup LQ_AES_FUNCTIONS
509 + * \brief sets AES hardware to ECB mode
510 + * \param ctx crypto algo context
511 + * \param dst output bytestream
512 + * \param src input bytestream
513 + * \param iv initialization vector
514 + * \param nbytes length of bytestream
515 + * \param encdec 1 for encrypt; 0 for decrypt
516 + * \param inplace not used
517 +*/
518 +static void deu_aes_ecb(void *ctx,
519 + uint8_t *dst,
520 + const uint8_t *src,
521 + uint8_t *iv,
522 + size_t nbytes,
523 + int encdec,
524 + int inplace)
525 +{
526 + deu_aes(ctx, dst, src, NULL, nbytes, encdec, 0);
527 +}
528 +
529 +/** \fn void deu_aes_cbc(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
530 + * \ingroup LQ_AES_FUNCTIONS
531 + * \brief sets AES hardware to CBC mode
532 + * \param ctx crypto algo context
533 + * \param dst output bytestream
534 + * \param src input bytestream
535 + * \param iv initialization vector
536 + * \param nbytes length of bytestream
537 + * \param encdec 1 for encrypt; 0 for decrypt
538 + * \param inplace not used
539 +*/
540 +static void deu_aes_cbc(void *ctx,
541 + uint8_t *dst,
542 + const uint8_t *src,
543 + uint8_t *iv,
544 + size_t nbytes,
545 + int encdec,
546 + int inplace)
547 +{
548 + deu_aes(ctx, dst, src, iv, nbytes, encdec, 1);
549 +}
550 +
551 +#if 0
552 +/** \fn void deu_aes_ofb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
553 + * \ingroup LQ_AES_FUNCTIONS
554 + * \brief sets AES hardware to OFB mode
555 + * \param ctx crypto algo context
556 + * \param dst output bytestream
557 + * \param src input bytestream
558 + * \param iv initialization vector
559 + * \param nbytes length of bytestream
560 + * \param encdec 1 for encrypt; 0 for decrypt
561 + * \param inplace not used
562 +*/
563 +static void deu_aes_ofb(void *ctx,
564 + uint8_t *dst,
565 + const uint8_t *src,
566 + uint8_t *iv,
567 + size_t nbytes,
568 + int encdec,
569 + int inplace)
570 +{
571 + deu_aes(ctx, dst, src, iv, nbytes, encdec, 2);
572 +}
573 +
574 +/** \fn void deu_aes_cfb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
575 + * \ingroup LQ_AES_FUNCTIONS
576 + * \brief sets AES hardware to CFB mode
577 + * \param ctx crypto algo context
578 + * \param dst output bytestream
579 + * \param src input bytestream
580 + * \param iv initialization vector
581 + * \param nbytes length of bytestream
582 + * \param encdec 1 for encrypt; 0 for decrypt
583 + * \param inplace not used
584 +*/
585 +static void deu_aes_cfb(void *ctx,
586 + uint8_t *dst,
587 + const uint8_t *src,
588 + uint8_t *iv,
589 + size_t nbytes,
590 + int encdec,
591 + int inplace)
592 +{
593 + deu_aes(ctx, dst, src, iv, nbytes, encdec, 3);
594 +}
595 +#endif
596 +
597 +/** \fn void deu_aes_ctr(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
598 + * \ingroup LQ_AES_FUNCTIONS
599 + * \brief sets AES hardware to CTR mode
600 + * \param ctx crypto algo context
601 + * \param dst output bytestream
602 + * \param src input bytestream
603 + * \param iv initialization vector
604 + * \param nbytes length of bytestream
605 + * \param encdec 1 for encrypt; 0 for decrypt
606 + * \param inplace not used
607 +*/
608 +static void deu_aes_ctr(void *ctx,
609 + uint8_t *dst,
610 + const uint8_t *src,
611 + uint8_t *iv,
612 + size_t nbytes,
613 + int encdec,
614 + int inplace)
615 +{
616 + deu_aes(ctx, dst, src, iv, nbytes, encdec, 4);
617 +}
618 +
619 +/** \fn void aes_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
620 + * \ingroup LQ_AES_FUNCTIONS
621 + * \brief encrypt AES_BLOCK_SIZE of data
622 + * \param tfm linux crypto algo transform
623 + * \param out output bytestream
624 + * \param in input bytestream
625 +*/
626 +static void aes_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
627 +{
628 + struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
629 + deu_aes(ctx, out, in, NULL, AES_BLOCK_SIZE, CRYPTO_DIR_ENCRYPT, 0);
630 +}
631 +
632 +/** \fn void aes_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
633 + * \ingroup LQ_AES_FUNCTIONS
634 + * \brief decrypt AES_BLOCK_SIZE of data
635 + * \param tfm linux crypto algo transform
636 + * \param out output bytestream
637 + * \param in input bytestream
638 +*/
639 +static void aes_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
640 +{
641 + struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
642 + deu_aes(ctx, out, in, NULL, AES_BLOCK_SIZE, CRYPTO_DIR_DECRYPT, 0);
643 +}
644 +
645 +/*
646 + * \brief AES function mappings
647 +*/
648 +static struct crypto_alg aes_alg = {
649 + .cra_name = "aes",
650 + .cra_driver_name = "lq_deu-aes",
651 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
652 + .cra_blocksize = AES_BLOCK_SIZE,
653 + .cra_ctxsize = sizeof(struct aes_ctx),
654 + .cra_module = THIS_MODULE,
655 + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
656 + .cra_u = {
657 + .cipher = {
658 + .cia_min_keysize = AES_MIN_KEY_SIZE,
659 + .cia_max_keysize = AES_MAX_KEY_SIZE,
660 + .cia_setkey = aes_set_key,
661 + .cia_encrypt = aes_encrypt,
662 + .cia_decrypt = aes_decrypt,
663 + }
664 + }
665 +};
666 +
667 +/** \fn int ecb_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
668 + * \ingroup LQ_AES_FUNCTIONS
669 + * \brief ECB AES encrypt using linux crypto blkcipher
670 + * \param desc blkcipher descriptor
671 + * \param dst output scatterlist
672 + * \param src input scatterlist
673 + * \param nbytes data size in bytes
674 + * \return err
675 +*/
676 +static int ecb_aes_encrypt(struct blkcipher_desc *desc,
677 + struct scatterlist *dst,
678 + struct scatterlist *src,
679 + unsigned int nbytes)
680 +{
681 + struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
682 + struct blkcipher_walk walk;
683 + int err;
684 +
685 + blkcipher_walk_init(&walk, dst, src, nbytes);
686 + err = blkcipher_walk_virt(desc, &walk);
687 +
688 + while ((nbytes = walk.nbytes)) {
689 + nbytes -= (nbytes % AES_BLOCK_SIZE);
690 + deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
691 + NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
692 + nbytes &= AES_BLOCK_SIZE - 1;
693 + err = blkcipher_walk_done(desc, &walk, nbytes);
694 + }
695 +
696 + return err;
697 +}
698 +
699 +/** \fn int ecb_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
700 + * \ingroup LQ_AES_FUNCTIONS
701 + * \brief ECB AES decrypt using linux crypto blkcipher
702 + * \param desc blkcipher descriptor
703 + * \param dst output scatterlist
704 + * \param src input scatterlist
705 + * \param nbytes data size in bytes
706 + * \return err
707 +*/
708 +static int ecb_aes_decrypt(struct blkcipher_desc *desc,
709 + struct scatterlist *dst,
710 + struct scatterlist *src,
711 + unsigned int nbytes)
712 +{
713 + struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
714 + struct blkcipher_walk walk;
715 + int err;
716 +
717 + blkcipher_walk_init(&walk, dst, src, nbytes);
718 + err = blkcipher_walk_virt(desc, &walk);
719 +
720 + while ((nbytes = walk.nbytes)) {
721 + nbytes -= (nbytes % AES_BLOCK_SIZE);
722 + deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
723 + NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
724 + nbytes &= AES_BLOCK_SIZE - 1;
725 + err = blkcipher_walk_done(desc, &walk, nbytes);
726 + }
727 +
728 + return err;
729 +}
730 +
731 +/*
732 + * \brief AES function mappings
733 +*/
734 +static struct crypto_alg ecb_aes_alg = {
735 + .cra_name = "ecb(aes)",
736 + .cra_driver_name = "lq_deu-ecb(aes)",
737 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
738 + .cra_blocksize = AES_BLOCK_SIZE,
739 + .cra_ctxsize = sizeof(struct aes_ctx),
740 + .cra_type = &crypto_blkcipher_type,
741 + .cra_module = THIS_MODULE,
742 + .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
743 + .cra_u = {
744 + .blkcipher = {
745 + .min_keysize = AES_MIN_KEY_SIZE,
746 + .max_keysize = AES_MAX_KEY_SIZE,
747 + .setkey = aes_set_key,
748 + .encrypt = ecb_aes_encrypt,
749 + .decrypt = ecb_aes_decrypt,
750 + }
751 + }
752 +};
753 +
754 +/** \fn int cbc_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
755 + * \ingroup LQ_AES_FUNCTIONS
756 + * \brief CBC AES encrypt using linux crypto blkcipher
757 + * \param desc blkcipher descriptor
758 + * \param dst output scatterlist
759 + * \param src input scatterlist
760 + * \param nbytes data size in bytes
761 + * \return err
762 +*/
763 +static int cbc_aes_encrypt(struct blkcipher_desc *desc,
764 + struct scatterlist *dst,
765 + struct scatterlist *src,
766 + unsigned int nbytes)
767 +{
768 + struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
769 + struct blkcipher_walk walk;
770 + int err;
771 +
772 + blkcipher_walk_init(&walk, dst, src, nbytes);
773 + err = blkcipher_walk_virt(desc, &walk);
774 +
775 + while ((nbytes = walk.nbytes)) {
776 + u8 *iv = walk.iv;
777 + nbytes -= (nbytes % AES_BLOCK_SIZE);
778 + deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
779 + iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
780 + nbytes &= AES_BLOCK_SIZE - 1;
781 + err = blkcipher_walk_done(desc, &walk, nbytes);
782 + }
783 +
784 + return err;
785 +}
786 +
787 +/** \fn int cbc_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
788 + * \ingroup LQ_AES_FUNCTIONS
789 + * \brief CBC AES decrypt using linux crypto blkcipher
790 + * \param desc blkcipher descriptor
791 + * \param dst output scatterlist
792 + * \param src input scatterlist
793 + * \param nbytes data size in bytes
794 + * \return err
795 +*/
796 +static int cbc_aes_decrypt(struct blkcipher_desc *desc,
797 + struct scatterlist *dst,
798 + struct scatterlist *src,
799 + unsigned int nbytes)
800 +{
801 + struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
802 + struct blkcipher_walk walk;
803 + int err;
804 +
805 + blkcipher_walk_init(&walk, dst, src, nbytes);
806 + err = blkcipher_walk_virt(desc, &walk);
807 +
808 + while ((nbytes = walk.nbytes)) {
809 + u8 *iv = walk.iv;
810 + nbytes -= (nbytes % AES_BLOCK_SIZE);
811 + deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
812 + iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
813 + nbytes &= AES_BLOCK_SIZE - 1;
814 + err = blkcipher_walk_done(desc, &walk, nbytes);
815 + }
816 +
817 + return err;
818 +}
819 +
820 +/*
821 + * \brief AES function mappings
822 +*/
823 +static struct crypto_alg cbc_aes_alg = {
824 + .cra_name = "cbc(aes)",
825 + .cra_driver_name = "lq_deu-cbc(aes)",
826 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
827 + .cra_blocksize = AES_BLOCK_SIZE,
828 + .cra_ctxsize = sizeof(struct aes_ctx),
829 + .cra_type = &crypto_blkcipher_type,
830 + .cra_module = THIS_MODULE,
831 + .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
832 + .cra_u = {
833 + .blkcipher = {
834 + .min_keysize = AES_MIN_KEY_SIZE,
835 + .max_keysize = AES_MAX_KEY_SIZE,
836 + .ivsize = AES_BLOCK_SIZE,
837 + .setkey = aes_set_key,
838 + .encrypt = cbc_aes_encrypt,
839 + .decrypt = cbc_aes_decrypt,
840 + }
841 + }
842 +};
843 +
844 +/** \fn int ctr_basic_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
845 + * \ingroup LQ_AES_FUNCTIONS
846 + * \brief Counter mode AES encrypt using linux crypto blkcipher
847 + * \param desc blkcipher descriptor
848 + * \param dst output scatterlist
849 + * \param src input scatterlist
850 + * \param nbytes data size in bytes
851 + * \return err
852 +*/
853 +static int ctr_basic_aes_encrypt(struct blkcipher_desc *desc,
854 + struct scatterlist *dst,
855 + struct scatterlist *src,
856 + unsigned int nbytes)
857 +{
858 + struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
859 + struct blkcipher_walk walk;
860 + int err;
861 +
862 + blkcipher_walk_init(&walk, dst, src, nbytes);
863 + err = blkcipher_walk_virt(desc, &walk);
864 +
865 + while ((nbytes = walk.nbytes)) {
866 + u8 *iv = walk.iv;
867 + nbytes -= (nbytes % AES_BLOCK_SIZE);
868 + deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
869 + iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
870 + nbytes &= AES_BLOCK_SIZE - 1;
871 + err = blkcipher_walk_done(desc, &walk, nbytes);
872 + }
873 +
874 + return err;
875 +}
876 +
877 +/** \fn int ctr_basic_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
878 + * \ingroup LQ_AES_FUNCTIONS
879 + * \brief Counter mode AES decrypt using linux crypto blkcipher
880 + * \param desc blkcipher descriptor
881 + * \param dst output scatterlist
882 + * \param src input scatterlist
883 + * \param nbytes data size in bytes
884 + * \return err
885 +*/
886 +static int ctr_basic_aes_decrypt(struct blkcipher_desc *desc,
887 + struct scatterlist *dst,
888 + struct scatterlist *src,
889 + unsigned int nbytes)
890 +{
891 + struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
892 + struct blkcipher_walk walk;
893 + int err;
894 +
895 + blkcipher_walk_init(&walk, dst, src, nbytes);
896 + err = blkcipher_walk_virt(desc, &walk);
897 +
898 + while ((nbytes = walk.nbytes)) {
899 + u8 *iv = walk.iv;
900 + nbytes -= (nbytes % AES_BLOCK_SIZE);
901 + deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
902 + iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
903 + nbytes &= AES_BLOCK_SIZE - 1;
904 + err = blkcipher_walk_done(desc, &walk, nbytes);
905 + }
906 +
907 + return err;
908 +}
909 +
910 +/*
911 + * \brief AES function mappings
912 +*/
913 +static struct crypto_alg ctr_basic_aes_alg = {
914 + .cra_name = "ctr(aes)",
915 + .cra_driver_name = "lq_deu-ctr(aes)",
916 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
917 + .cra_blocksize = AES_BLOCK_SIZE,
918 + .cra_ctxsize = sizeof(struct aes_ctx),
919 + .cra_type = &crypto_blkcipher_type,
920 + .cra_module = THIS_MODULE,
921 + .cra_list = LIST_HEAD_INIT(ctr_basic_aes_alg.cra_list),
922 + .cra_u = {
923 + .blkcipher = {
924 + .min_keysize = AES_MIN_KEY_SIZE,
925 + .max_keysize = AES_MAX_KEY_SIZE,
926 + .ivsize = AES_BLOCK_SIZE,
927 + .setkey = aes_set_key,
928 + .encrypt = ctr_basic_aes_encrypt,
929 + .decrypt = ctr_basic_aes_decrypt,
930 + }
931 + }
932 +};
933 +
934 +/** \fn int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
935 + * \ingroup LQ_AES_FUNCTIONS
936 + * \brief Counter mode AES (rfc3686) encrypt using linux crypto blkcipher
937 + * \param desc blkcipher descriptor
938 + * \param dst output scatterlist
939 + * \param src input scatterlist
940 + * \param nbytes data size in bytes
941 + * \return err
942 +*/
943 +static int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc,
944 + struct scatterlist *dst,
945 + struct scatterlist *src,
946 + unsigned int nbytes)
947 +{
948 + struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
949 + struct blkcipher_walk walk;
950 + int err;
951 + u8 rfc3686_iv[16];
952 +
953 + blkcipher_walk_init(&walk, dst, src, nbytes);
954 + err = blkcipher_walk_virt(desc, &walk);
955 +
956 + /* set up counter block */
957 + memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
958 + memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv,
959 + CTR_RFC3686_IV_SIZE);
960 +
961 + /* initialize counter portion of counter block */
962 + *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
963 + cpu_to_be32(1);
964 +
965 + while ((nbytes = walk.nbytes)) {
966 + nbytes -= (nbytes % AES_BLOCK_SIZE);
967 + deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
968 + rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
969 + nbytes &= AES_BLOCK_SIZE - 1;
970 + err = blkcipher_walk_done(desc, &walk, nbytes);
971 + }
972 +
973 + return err;
974 +}
975 +
976 +/** \fn int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
977 + * \ingroup LQ_AES_FUNCTIONS
978 + * \brief Counter mode AES (rfc3686) decrypt using linux crypto blkcipher
979 + * \param desc blkcipher descriptor
980 + * \param dst output scatterlist
981 + * \param src input scatterlist
982 + * \param nbytes data size in bytes
983 + * \return err
984 +*/
985 +static int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc,
986 + struct scatterlist *dst,
987 + struct scatterlist *src,
988 + unsigned int nbytes)
989 +{
990 + struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
991 + struct blkcipher_walk walk;
992 + int err;
993 + u8 rfc3686_iv[16];
994 +
995 + blkcipher_walk_init(&walk, dst, src, nbytes);
996 + err = blkcipher_walk_virt(desc, &walk);
997 +
998 + /* set up counter block */
999 + memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1000 + memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv,
1001 + CTR_RFC3686_IV_SIZE);
1002 +
1003 + /* initialize counter portion of counter block */
1004 + *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1005 + cpu_to_be32(1);
1006 +
1007 + while ((nbytes = walk.nbytes)) {
1008 + nbytes -= (nbytes % AES_BLOCK_SIZE);
1009 + deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1010 + rfc3686_iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
1011 + nbytes &= AES_BLOCK_SIZE - 1;
1012 + err = blkcipher_walk_done(desc, &walk, nbytes);
1013 + }
1014 +
1015 + return err;
1016 +}
1017 +
1018 +/*
1019 + * \brief AES function mappings
1020 +*/
1021 +static struct crypto_alg ctr_rfc3686_aes_alg = {
1022 + .cra_name = "rfc3686(ctr(aes))",
1023 + .cra_driver_name = "lq_deu-ctr-rfc3686(aes)",
1024 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1025 + .cra_blocksize = AES_BLOCK_SIZE,
1026 + .cra_ctxsize = sizeof(struct aes_ctx),
1027 + .cra_type = &crypto_blkcipher_type,
1028 + .cra_module = THIS_MODULE,
1029 + .cra_list = LIST_HEAD_INIT(ctr_rfc3686_aes_alg.cra_list),
1030 + .cra_u = {
1031 + .blkcipher = {
1032 + .min_keysize = AES_MIN_KEY_SIZE,
1033 + .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
1034 + .ivsize = CTR_RFC3686_IV_SIZE,
1035 + .setkey = ctr_rfc3686_aes_set_key,
1036 + .encrypt = ctr_rfc3686_aes_encrypt,
1037 + .decrypt = ctr_rfc3686_aes_decrypt,
1038 + }
1039 + }
1040 +};
1041 +
1042 +/** \fn int lq_deu_init_aes (void)
1043 + * \ingroup LQ_AES_FUNCTIONS
1044 + * \brief function to initialize AES driver
1045 + * \return ret
1046 +*/
1047 +int lq_deu_init_aes(void)
1048 +{
1049 + int ret;
1050 +
1051 + if ((ret = crypto_register_alg(&aes_alg)))
1052 + goto aes_err;
1053 +
1054 + if ((ret = crypto_register_alg(&ecb_aes_alg)))
1055 + goto ecb_aes_err;
1056 +
1057 + if ((ret = crypto_register_alg(&cbc_aes_alg)))
1058 + goto cbc_aes_err;
1059 +
1060 + if ((ret = crypto_register_alg(&ctr_basic_aes_alg)))
1061 + goto ctr_basic_aes_err;
1062 +
1063 + if ((ret = crypto_register_alg(&ctr_rfc3686_aes_alg)))
1064 + goto ctr_rfc3686_aes_err;
1065 +
1066 + deu_aes_chip_init();
1067 +
1068 + CRTCL_SECT_INIT;
1069 +
1070 +#ifdef CONFIG_CRYPTO_DEV_DMA
1071 + if (ALLOCATE_MEMORY(BUFFER_IN, AES_ALGO) < 0) {
1072 + printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n",
1073 + __FILE__, __func__, __LINE__);
1074 + goto ctr_rfc3686_aes_err;
1075 + }
1076 + if (ALLOCATE_MEMORY(BUFFER_OUT, AES_ALGO) < 0) {
1077 + printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n",
1078 + __FILE__, __func__, __LINE__);
1079 + goto ctr_rfc3686_aes_err;
1080 + }
1081 +#endif
1082 +
1083 + printk(KERN_NOTICE "Lantiq DEU AES initialized%s.\n",
1084 + disable_deudma ? "" : " (DMA)");
1085 + return ret;
1086 +
1087 +ctr_rfc3686_aes_err:
1088 + crypto_unregister_alg(&ctr_rfc3686_aes_alg);
1089 + printk(KERN_ERR "Lantiq ctr_rfc3686_aes initialization failed!\n");
1090 + return ret;
1091 +ctr_basic_aes_err:
1092 + crypto_unregister_alg(&ctr_basic_aes_alg);
1093 + printk(KERN_ERR "Lantiq ctr_basic_aes initialization failed!\n");
1094 + return ret;
1095 +cbc_aes_err:
1096 + crypto_unregister_alg(&cbc_aes_alg);
1097 + printk(KERN_ERR "Lantiq cbc_aes initialization failed!\n");
1098 + return ret;
1099 +ecb_aes_err:
1100 + crypto_unregister_alg(&ecb_aes_alg);
1101 + printk(KERN_ERR "Lantiq aes initialization failed!\n");
1102 + return ret;
1103 +aes_err:
1104 + printk(KERN_ERR "Lantiq DEU AES initialization failed!\n");
1105 + return ret;
1106 +}
1107 +
1108 +/** \fn void lq_deu_fini_aes(void)
1109 + * \ingroup LQ_AES_FUNCTIONS
1110 + * \brief unregister aes driver
1111 +*/
1112 +void lq_deu_fini_aes(void)
1113 +{
1114 + crypto_unregister_alg(&aes_alg);
1115 + crypto_unregister_alg(&ecb_aes_alg);
1116 + crypto_unregister_alg(&cbc_aes_alg);
1117 + crypto_unregister_alg(&ctr_basic_aes_alg);
1118 + crypto_unregister_alg(&ctr_rfc3686_aes_alg);
1119 +
1120 +#ifdef CONFIG_CRYPTO_DEV_DMA
1121 + FREE_MEMORY(aes_buff_in);
1122 + FREE_MEMORY(aes_buff_out);
1123 +#endif
1124 +}
1125 --- /dev/null
1126 +++ b/drivers/crypto/lantiq/arc4.c
1127 @@ -0,0 +1,397 @@
1128 +/*
1129 + * This program is free software; you can redistribute it and/or modify
1130 + * it under the terms of the GNU General Public License as published by
1131 + * the Free Software Foundation; either version 2 of the License, or
1132 + * (at your option) any later version.
1133 + *
1134 + * This program is distributed in the hope that it will be useful,
1135 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1136 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1137 + * GNU General Public License for more details.
1138 + *
1139 + * You should have received a copy of the GNU General Public License
1140 + * along with this program; if not, write to the Free Software
1141 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
1142 + *
1143 + * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
1144 + * Copyright (C) 2009 Mohammad Firdaus
1145 + */
1146 +
1147 +/**
1148 + \defgroup LQ_DEU LQ_DEU_DRIVERS
1149 + \ingroup API
1150 + \brief Lantiq DEU driver module
1151 +*/
1152 +
1153 +/**
1154 + \file arc4.c
1155 + \ingroup LQ_DEU
1156 + \brief ARC4 encryption DEU driver file
1157 +*/
1158 +
1159 +/**
1160 + \defgroup LQ_ARC4_FUNCTIONS LQ_ARC4_FUNCTIONS
1161 + \ingroup LQ_DEU
1162 + \brief Lantiq DEU driver functions
1163 +*/
1164 +
1165 +#include <linux/version.h>
1166 +#include <linux/module.h>
1167 +#include <linux/init.h>
1168 +#include <linux/types.h>
1169 +#include <linux/errno.h>
1170 +#include <linux/crypto.h>
1171 +#include <crypto/algapi.h>
1172 +#include <linux/interrupt.h>
1173 +#include <asm/byteorder.h>
1174 +#include <linux/delay.h>
1175 +
1176 +#ifdef CONFIG_SOL_LANTIQ_XWAY
1177 +
1178 +#include "deu.h"
1179 +
1180 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
1181 +
1182 +static spinlock_t cipher_lock;
1183 +
1184 +/* Preprocessor declerations */
1185 +#define ARC4_MIN_KEY_SIZE 1
1186 +/* #define ARC4_MAX_KEY_SIZE 256 */
1187 +#define ARC4_MAX_KEY_SIZE 16
1188 +#define ARC4_BLOCK_SIZE 1
1189 +
1190 +/*
1191 + * \brief arc4 private structure
1192 +*/
1193 +struct arc4_ctx {
1194 + int key_length;
1195 + u8 buf[120];
1196 +};
1197 +
1198 +/** \fn static void deu_arc4(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
1199 + \ingroup LQ_ARC4_FUNCTIONS
1200 + \brief main interface to AES hardware
1201 + \param ctx_arg crypto algo context
1202 + \param out_arg output bytestream
1203 + \param in_arg input bytestream
1204 + \param iv_arg initialization vector
1205 + \param nbytes length of bytestream
1206 + \param encdec 1 for encrypt; 0 for decrypt
1207 + \param mode operation mode such as ebc, cbc, ctr
1208 +*/
1209 +static void deu_arc4(void *ctx_arg,
1210 + u8 *out_arg,
1211 + const u8 *in_arg,
1212 + u8 *iv_arg,
1213 + u32 nbytes,
1214 + int encdec,
1215 + int mode)
1216 +{
1217 + volatile struct deu_arc4 *arc4 = (struct deu_arc4 *) ARC4_START;
1218 + int i = 0;
1219 + ulong flag;
1220 +
1221 +#if 1 /* need to handle nbytes not multiple of 16 */
1222 + volatile u32 tmp_array32[4];
1223 + volatile u8 *tmp_ptr8;
1224 + int remaining_bytes, j;
1225 +#endif
1226 +
1227 + CRTCL_SECT_START;
1228 +
1229 + arc4->IDLEN = nbytes;
1230 +
1231 +#if 1
1232 + while (i < nbytes) {
1233 + arc4->ID3R = *((u32 *) in_arg + (i>>2) + 0);
1234 + arc4->ID2R = *((u32 *) in_arg + (i>>2) + 1);
1235 + arc4->ID1R = *((u32 *) in_arg + (i>>2) + 2);
1236 + arc4->ID0R = *((u32 *) in_arg + (i>>2) + 3);
1237 +
1238 + arc4->ctrl.GO = 1;
1239 +
1240 + while (arc4->ctrl.BUS) {
1241 + /* this will not take long */ }
1242 +
1243 +#if 1
1244 + /* need to handle nbytes not multiple of 16 */
1245 + tmp_array32[0] = arc4->OD3R;
1246 + tmp_array32[1] = arc4->OD2R;
1247 + tmp_array32[2] = arc4->OD1R;
1248 + tmp_array32[3] = arc4->OD0R;
1249 +
1250 + remaining_bytes = nbytes - i;
1251 + if (remaining_bytes > 16)
1252 + remaining_bytes = 16;
1253 +
1254 + tmp_ptr8 = (u8 *)&tmp_array32[0];
1255 + for (j = 0; j < remaining_bytes; j++)
1256 + *out_arg++ = *tmp_ptr8++;
1257 +#else
1258 + *((u32 *) out_arg + (i>>2) + 0) = arc4->OD3R;
1259 + *((u32 *) out_arg + (i>>2) + 1) = arc4->OD2R;
1260 + *((u32 *) out_arg + (i>>2) + 2) = arc4->OD1R;
1261 + *((u32 *) out_arg + (i>>2) + 3) = arc4->OD0R;
1262 +#endif
1263 +
1264 + i += 16;
1265 + }
1266 +#else /* dma */
1267 +
1268 +#endif /* dma */
1269 +
1270 + CRTCL_SECT_END;
1271 +}
1272 +
1273 +/** \fn arc4_chip_init(void)
1274 + \ingroup LQ_ARC4_FUNCTIONS
1275 + \brief initialize arc4 hardware
1276 +*/
1277 +static void arc4_chip_init(void)
1278 +{
1279 + /* do nothing */
1280 +}
1281 +
1282 +/** \fn static int arc4_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
1283 + \ingroup LQ_ARC4_FUNCTIONS
1284 + \brief sets ARC4 key
1285 + \param tfm linux crypto algo transform
1286 + \param in_key input key
1287 + \param key_len key lengths less than or equal to 16 bytes supported
1288 +*/
1289 +static int arc4_set_key(struct crypto_tfm *tfm,
1290 + const u8 *inkey,
1291 + unsigned int key_len)
1292 +{
1293 + /* struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); */
1294 + volatile struct deu_arc4 *arc4 = (struct deu_arc4 *) ARC4_START;
1295 +
1296 + u32 *in_key = (u32 *)inkey;
1297 +
1298 + /* must program all bits at one go?!!! */
1299 +#if 1
1300 + /* #ifndef CONFIG_CRYPTO_DEV_VR9_DMA */
1301 + *LQ_ARC4_CON = ( (1<<31) | ((key_len - 1)<<27) | (1<<26) | (3<<16) );
1302 + /* NDC=1,ENDI=1,GO=0,KSAE=1,SM=0 */
1303 +
1304 + arc4->K3R = *((u32 *) in_key + 0);
1305 + arc4->K2R = *((u32 *) in_key + 1);
1306 + arc4->K1R = *((u32 *) in_key + 2);
1307 + arc4->K0R = *((u32 *) in_key + 3);
1308 +#else /* dma */
1309 + *AMAZONS_ARC4_CON = ( (1<<31) | ((key_len - 1)<<27) | (1<<26) | (3<<16) | (1<<4) );
1310 + /* NDC=1,ENDI=1,GO=0,KSAE=1,SM=1 */
1311 +
1312 + arc4->K3R = *((u32 *) in_key + 0);
1313 + arc4->K2R = *((u32 *) in_key + 1);
1314 + arc4->K1R = *((u32 *) in_key + 2);
1315 + arc4->K0R = *((u32 *) in_key + 3);
1316 +
1317 +#if 0
1318 + arc4->K3R = deu_endian_swap(*((u32 *) in_key + 0));
1319 + arc4->K2R = deu_endian_swap(*((u32 *) in_key + 1));
1320 + arc4->K1R = deu_endian_swap(*((u32 *) in_key + 2));
1321 + arc4->K0R = deu_endian_swap(*((u32 *) in_key + 3));
1322 +#endif
1323 +
1324 +#endif
1325 +
1326 +#if 0 /* arc4 is a ugly state machine, KSAE can only be set once per session */
1327 + ctx->key_length = key_len;
1328 +
1329 + memcpy((u8 *)(ctx->buf), in_key, key_len);
1330 +#endif
1331 +
1332 + return 0;
1333 +}
1334 +
1335 +/** \fn static void deu_arc4_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
1336 + \ingroup LQ_ARC4_FUNCTIONS
1337 + \brief sets ARC4 hardware to ECB mode
1338 + \param ctx crypto algo context
1339 + \param dst output bytestream
1340 + \param src input bytestream
1341 + \param iv initialization vector
1342 + \param nbytes length of bytestream
1343 + \param encdec 1 for encrypt; 0 for decrypt
1344 + \param inplace not used
1345 +*/
1346 +static void deu_arc4_ecb(void *ctx,
1347 + uint8_t *dst,
1348 + const uint8_t *src,
1349 + uint8_t *iv,
1350 + size_t nbytes,
1351 + int encdec,
1352 + int inplace)
1353 +{
1354 + deu_arc4(ctx, dst, src, NULL, nbytes, encdec, 0);
1355 +}
1356 +
1357 +/** \fn static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1358 + \ingroup LQ_ARC4_FUNCTIONS
1359 + \brief encrypt/decrypt ARC4_BLOCK_SIZE of data
1360 + \param tfm linux crypto algo transform
1361 + \param out output bytestream
1362 + \param in input bytestream
1363 +*/
1364 +static void arc4_crypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
1365 +{
1366 + struct arc4_ctx *ctx = crypto_tfm_ctx(tfm);
1367 +
1368 + deu_arc4(ctx, out, in, NULL, ARC4_BLOCK_SIZE,
1369 + CRYPTO_DIR_DECRYPT, CRYPTO_TFM_MODE_ECB);
1370 +}
1371 +
1372 +/*
1373 + * \brief ARC4 function mappings
1374 +*/
1375 +static struct crypto_alg arc4_alg = {
1376 + .cra_name = "arc4",
1377 + .cra_driver_name = "lq_deu-arc4",
1378 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
1379 + .cra_blocksize = ARC4_BLOCK_SIZE,
1380 + .cra_ctxsize = sizeof(struct arc4_ctx),
1381 + .cra_module = THIS_MODULE,
1382 + .cra_list = LIST_HEAD_INIT(arc4_alg.cra_list),
1383 + .cra_u = {
1384 + .cipher = {
1385 + .cia_min_keysize = ARC4_MIN_KEY_SIZE,
1386 + .cia_max_keysize = ARC4_MAX_KEY_SIZE,
1387 + .cia_setkey = arc4_set_key,
1388 + .cia_encrypt = arc4_crypt,
1389 + .cia_decrypt = arc4_crypt,
1390 + }
1391 + }
1392 +};
1393 +
1394 +/** \fn static int ecb_arc4_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
1395 + \ingroup LQ_ARC4_FUNCTIONS
1396 + \brief ECB ARC4 encrypt using linux crypto blkcipher
1397 + \param desc blkcipher descriptor
1398 + \param dst output scatterlist
1399 + \param src input scatterlist
1400 + \param nbytes data size in bytes
1401 +*/
1402 +static int ecb_arc4_encrypt(struct blkcipher_desc *desc,
1403 + struct scatterlist *dst,
1404 + struct scatterlist *src,
1405 + unsigned int nbytes)
1406 +{
1407 + struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
1408 + struct blkcipher_walk walk;
1409 + int err;
1410 +
1411 + DPRINTF(1, "\n");
1412 + blkcipher_walk_init(&walk, dst, src, nbytes);
1413 + err = blkcipher_walk_virt(desc, &walk);
1414 +
1415 + while ((nbytes = walk.nbytes)) {
1416 + deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1417 + NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
1418 + nbytes &= ARC4_BLOCK_SIZE - 1;
1419 + err = blkcipher_walk_done(desc, &walk, nbytes);
1420 + }
1421 +
1422 + return err;
1423 +}
1424 +
1425 +/** \fn static int ecb_arc4_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
1426 + \ingroup LQ_ARC4_FUNCTIONS
1427 + \brief ECB ARC4 decrypt using linux crypto blkcipher
1428 + \param desc blkcipher descriptor
1429 + \param dst output scatterlist
1430 + \param src input scatterlist
1431 + \param nbytes data size in bytes
1432 +*/
1433 +static int ecb_arc4_decrypt(struct blkcipher_desc *desc,
1434 + struct scatterlist *dst,
1435 + struct scatterlist *src,
1436 + unsigned int nbytes)
1437 +{
1438 + struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
1439 + struct blkcipher_walk walk;
1440 + int err;
1441 +
1442 + DPRINTF(1, "\n");
1443 + blkcipher_walk_init(&walk, dst, src, nbytes);
1444 + err = blkcipher_walk_virt(desc, &walk);
1445 +
1446 + while ((nbytes = walk.nbytes)) {
1447 + deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
1448 + NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
1449 + nbytes &= ARC4_BLOCK_SIZE - 1;
1450 + err = blkcipher_walk_done(desc, &walk, nbytes);
1451 + }
1452 +
1453 + return err;
1454 +}
1455 +
1456 +/*
1457 + * \brief ARC4 function mappings
1458 +*/
1459 +static struct crypto_alg ecb_arc4_alg = {
1460 + .cra_name = "ecb(arc4)",
1461 + .cra_driver_name = "lq_deu-ecb(arc4)",
1462 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
1463 + .cra_blocksize = ARC4_BLOCK_SIZE,
1464 + .cra_ctxsize = sizeof(struct arc4_ctx),
1465 + .cra_type = &crypto_blkcipher_type,
1466 + .cra_module = THIS_MODULE,
1467 + .cra_list = LIST_HEAD_INIT(ecb_arc4_alg.cra_list),
1468 + .cra_u = {
1469 + .blkcipher = {
1470 + .min_keysize = ARC4_MIN_KEY_SIZE,
1471 + .max_keysize = ARC4_MAX_KEY_SIZE,
1472 + .setkey = arc4_set_key,
1473 + .encrypt = ecb_arc4_encrypt,
1474 + .decrypt = ecb_arc4_decrypt,
1475 + }
1476 + }
1477 +};
1478 +
1479 +/** \fn int lq_deu_init_arc4(void)
1480 + \ingroup LQ_ARC4_FUNCTIONS
1481 + \brief initialize arc4 driver
1482 +*/
1483 +int lq_deu_init_arc4(void)
1484 +{
1485 + int ret;
1486 +
1487 + if ((ret = crypto_register_alg(&arc4_alg)))
1488 + goto arc4_err;
1489 +
1490 + if ((ret = crypto_register_alg(&ecb_arc4_alg)))
1491 + goto ecb_arc4_err;
1492 +
1493 + arc4_chip_init();
1494 +
1495 + CRTCL_SECT_INIT;
1496 +
1497 + printk(KERN_NOTICE "Lantiq DEU ARC4 initialized %s.\n",
1498 + disable_deudma ? "" : " (DMA)");
1499 + return ret;
1500 +
1501 +arc4_err:
1502 + crypto_unregister_alg(&arc4_alg);
1503 + printk(KERN_ERR "Lantiq arc4 initialization failed!\n");
1504 + return ret;
1505 +ecb_arc4_err:
1506 + crypto_unregister_alg(&ecb_arc4_alg);
1507 + printk(KERN_ERR "Lantiq ecb_arc4 initialization failed!\n");
1508 +
1509 + return ret;
1510 +}
1511 +
1512 +/** \fn void lq_deu_fini_arc4(void)
1513 + \ingroup LQ_ARC4_FUNCTIONS
1514 + \brief unregister arc4 driver
1515 +*/
1516 +void lq_deu_fini_arc4(void)
1517 +{
1518 + crypto_unregister_alg(&arc4_alg);
1519 + crypto_unregister_alg(&ecb_arc4_alg);
1520 +}
1521 +
1522 +#endif
1523 +
1524 +#endif
1525 --- /dev/null
1526 +++ b/drivers/crypto/lantiq/des.c
1527 @@ -0,0 +1,929 @@
1528 +/*
1529 + * This program is free software; you can redistribute it and/or modify
1530 + * it under the terms of the GNU General Public License as published by
1531 + * the Free Software Foundation; either version 2 of the License, or
1532 + * (at your option) any later version.
1533 + *
1534 + * This program is distributed in the hope that it will be useful,
1535 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
1536 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1537 + * GNU General Public License for more details.
1538 + *
1539 + * You should have received a copy of the GNU General Public License
1540 + * along with this program; if not, write to the Free Software
1541 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
1542 + *
1543 + * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
1544 + * Copyright (C) 2009 Mohammad Firdaus
1545 + */
1546 +
1547 +/**
1548 + \defgroup LQ_DEU LQ_DEU_DRIVERS
1549 + \ingroup API
1550 + \brief Lantiq DEU driver
1551 +*/
1552 +
1553 +/**
1554 + \file des.c
1555 + \ingroup LQ_DEU
1556 + \brief DES encryption DEU driver file
1557 +*/
1558 +
1559 +/**
1560 + \defgroup LQ_DES_FUNCTIONS LQ_DES_FUNCTIONS
1561 + \ingroup LQ_DEU
1562 + \brief Lantiq DES Encryption functions
1563 +*/
1564 +
1565 +#include <linux/version.h>
1566 +#include <linux/module.h>
1567 +#include <linux/init.h>
1568 +#include <linux/types.h>
1569 +#include <linux/errno.h>
1570 +#include <linux/crypto.h>
1571 +#include <linux/interrupt.h>
1572 +#include <linux/delay.h>
1573 +#include <asm/byteorder.h>
1574 +#include <crypto/algapi.h>
1575 +
1576 +#ifdef CONFIG_SOL_LANTIQ_XWAY
1577 +
1578 +#include "deu.h"
1579 +
1580 +#ifdef CONFIG_CRYPTO_DEV_DMA
1581 +# include "deu_dma.h"
1582 +#endif
1583 +
1584 +static spinlock_t cipher_lock;
1585 +
1586 +/* Preprocessor declarations */
1587 +#define DES_KEY_SIZE 8
1588 +#define DES_EXPKEY_WORDS 32
1589 +#define DES_BLOCK_SIZE 8
1590 +#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE)
1591 +#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
1592 +#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
1593 +
1594 +struct des_ctx {
1595 + int controlr_M;
1596 + int key_length;
1597 + u8 iv[DES_BLOCK_SIZE];
1598 + u32 expkey[DES3_EDE_EXPKEY_WORDS];
1599 +};
1600 +
1601 +/** \fn int des_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
1602 + * \ingroup LQ_DES_FUNCTIONS
1603 + * \brief sets DES key
1604 + * \param tfm linux crypto algo transform
1605 + * \param key input key
1606 + * \param key_len key length
1607 +*/
1608 +static int des_setkey(struct crypto_tfm *tfm,
1609 + const u8 *key,
1610 + unsigned int key_len)
1611 +{
1612 + struct des_ctx *ctx = crypto_tfm_ctx(tfm);
1613 +
1614 + DPRINTF(0, "ctx @%p, key_len %d %d\n", ctx, key_len);
1615 +
1616 + ctx->controlr_M = 0; /* des */
1617 + ctx->key_length = key_len;
1618 +
1619 + memcpy((u8 *)(ctx->expkey), key, key_len);
1620 +
1621 + return 0;
1622 +}
1623 +
1624 +#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
1625 +/** \fn void deu_des(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
1626 + * \ingroup LQ_DES_FUNCTIONS
1627 + * \brief main interface to DES hardware
1628 + * \param ctx_arg crypto algo context
1629 + * \param out_arg output bytestream
1630 + * \param in_arg input bytestream
1631 + * \param iv_arg initialization vector
1632 + * \param nbytes length of bytestream
1633 + * \param encdec 1 for encrypt; 0 for decrypt
1634 + * \param mode operation mode such as ebc, cbc
1635 +*/
1636 +
1637 +static void deu_des(void *ctx_arg,
1638 + u8 *out_arg,
1639 + const u8 *in_arg,
1640 + u8 *iv_arg,
1641 + u32 nbytes,
1642 + int encdec,
1643 + int mode)
1644 +#else
1645 +/** \fn void deu_des_core(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
1646 + * \ingroup LQ_DES_FUNCTIONS
1647 + * \brief main interface to DES hardware
1648 + * \param ctx_arg crypto algo context
1649 + * \param out_arg output bytestream
1650 + * \param in_arg input bytestream
1651 + * \param iv_arg initialization vector
1652 + * \param nbytes length of bytestream
1653 + * \param encdec 1 for encrypt; 0 for decrypt
1654 + * \param mode operation mode such as ebc, cbc
1655 +*/
1656 +static void deu_des_core(void *ctx_arg,
1657 + u8 *out_arg,
1658 + const u8 *in_arg,
1659 + u8 *iv_arg,
1660 + u32 nbytes,
1661 + int encdec,
1662 + int mode)
1663 +#endif
1664 +{
1665 + volatile struct deu_des *des = (struct deu_des *) DES_3DES_START;
1666 + struct des_ctx *dctx = ctx_arg;
1667 + u32 *key = dctx->expkey;
1668 + ulong flag;
1669 +
1670 +#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
1671 + int i = 0;
1672 + int nblocks = 0;
1673 +#else
1674 + volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON;
1675 + struct dma_device_info *dma_device = lq_deu[0].dma_device;
1676 + /* struct deu_drv_priv *deu_priv =
1677 + * (struct deu_drv_priv *)dma_device->priv; */
1678 + int wlen = 0;
1679 + u32 *outcopy = NULL;
1680 + u32 *dword_mem_aligned_in = NULL;
1681 +
1682 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_POLL_DMA
1683 + u32 timeout = 0;
1684 + u32 *out_dma = NULL;
1685 +#endif
1686 +
1687 +#endif
1688 +
1689 + DPRINTF(0, "ctx @%p, mode %d, encdec %d\n", dctx, mode, encdec);
1690 +
1691 + CRTCL_SECT_START;
1692 +
1693 + des->ctrl.E_D = !encdec; /* encryption */
1694 + des->ctrl.O = mode; /* 0 ECB, 1 CBC, 2 OFB, 3 CFB, 4 CTR */
1695 + des->ctrl.SM = 1; /* start after writing input register */
1696 + des->ctrl.DAU = 0; /* Disable Automatic Update of init vect */
1697 + des->ctrl.ARS = 1; /* Autostart Select - write to IHR */
1698 +
1699 + des->ctrl.M = dctx->controlr_M;
1700 + /* write keys */
1701 + if (dctx->controlr_M == 0) {
1702 + /* DES mode */
1703 + des->K1HR = DEU_ENDIAN_SWAP(*((u32 *) key + 0));
1704 + des->K1LR = DEU_ENDIAN_SWAP(*((u32 *) key + 1));
1705 +#ifdef CRYPTO_DEBUG
1706 + printk("key1: %x\n", (*((u32 *) key + 0)));
1707 + printk("key2: %x\n", (*((u32 *) key + 1)));
1708 +#endif
1709 + } else {
1710 + /* 3DES mode (EDE-x) */
1711 + switch (dctx->key_length) {
1712 + case 24:
1713 + des->K3HR = DEU_ENDIAN_SWAP(*((u32 *) key + 4));
1714 + des->K3LR = DEU_ENDIAN_SWAP(*((u32 *) key + 5));
1715 + /* no break; */
1716 + case 16:
1717 + des->K2HR = DEU_ENDIAN_SWAP(*((u32 *) key + 2));
1718 + des->K2LR = DEU_ENDIAN_SWAP(*((u32 *) key + 3));
1719 + /* no break; */
1720 + case 8:
1721 + des->K1HR = DEU_ENDIAN_SWAP(*((u32 *) key + 0));
1722 + des->K1LR = DEU_ENDIAN_SWAP(*((u32 *) key + 1));
1723 + break;
1724 + default:
1725 + CRTCL_SECT_END;
1726 + return;
1727 + }
1728 + }
1729 +
1730 + /* write init vector (not required for ECB mode) */
1731 + if (mode > 0) {
1732 + des->IVHR = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
1733 + des->IVLR = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
1734 + }
1735 +
1736 +#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
1737 + nblocks = nbytes / 4;
1738 +
1739 + for (i = 0; i < nblocks; i += 2) {
1740 + /* wait for busy bit to clear */
1741 +
1742 + /*--- Workaround ---------------------------------------------
1743 + do a dummy read to the busy flag because it is not raised
1744 + early enough in CFB/OFB 3DES modes */
1745 +#ifdef CRYPTO_DEBUG
1746 + printk("ihr: %x\n", (*((u32 *) in_arg + i)));
1747 + printk("ilr: %x\n", (*((u32 *) in_arg + 1 + i)));
1748 +#endif
1749 + des->IHR = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + i));
1750 + /* start crypto */
1751 + des->ILR = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + 1 + i));
1752 +
1753 + while (des->ctrl.BUS) {
1754 + /* this will not take long */
1755 + }
1756 +
1757 + *((u32 *) out_arg + 0 + i) = des->OHR;
1758 + *((u32 *) out_arg + 1 + i) = des->OLR;
1759 +
1760 +#ifdef CRYPTO_DEBUG
1761 + printk("ohr: %x\n", (*((u32 *) out_arg + i)));
1762 + printk("olr: %x\n", (*((u32 *) out_arg + 1 + i)));
1763 +#endif
1764 + }
1765 +
1766 +#else /* dma mode */
1767 +
1768 + /* Prepare Rx buf length used in dma psuedo interrupt */
1769 + /* deu_priv->deu_rx_buf = out_arg; */
1770 + /* deu_priv->deu_rx_len = nbytes; */
1771 +
1772 + /* memory alignment issue */
1773 + dword_mem_aligned_in = (u32 *) DEU_DWORD_REORDERING(in_arg, des_buff_in,
1774 + BUFFER_IN, nbytes);
1775 +
1776 + dma->ctrl.ALGO = 0; /* DES */
1777 + des->ctrl.DAU = 0;
1778 + dma->ctrl.BS = 0;
1779 + dma->ctrl.EN = 1;
1780 +
1781 + while (des->ctrl.BUS) {
1782 + /* wait for AES to be ready */
1783 + };
1784 +
1785 + wlen = dma_device_write(dma_device, (u8 *) dword_mem_aligned_in, nbytes,
1786 + NULL);
1787 + if (wlen != nbytes) {
1788 + dma->ctrl.EN = 0;
1789 + CRTCL_SECT_END;
1790 + printk(KERN_ERR "[%s %s %d]: dma_device_write fail!\n",
1791 + __FILE__, __func__, __LINE__);
1792 + return; /* -EINVAL; */
1793 + }
1794 +
1795 + WAIT_DES_DMA_READY();
1796 +
1797 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_POLL_DMA
1798 + outcopy = (u32 *) DEU_DWORD_REORDERING(out_arg, des_buff_out,
1799 + BUFFER_OUT, nbytes);
1800 +
1801 + /* polling DMA rx channel */
1802 + while ((dma_device_read(dma_device, (u8 **) &out_dma, NULL)) == 0) {
1803 + timeout++;
1804 +
1805 + if (timeout >= 333000) {
1806 + dma->ctrl.EN = 0;
1807 + CRTCL_SECT_END;
1808 + printk(KERN_ERR "[%s %s %d]: timeout!!\n",
1809 + __FILE__, __func__, __LINE__);
1810 + return; /* -EINVAL; */
1811 + }
1812 + }
1813 +
1814 + WAIT_DES_DMA_READY();
1815 +
1816 + DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes);
1817 +#else
1818 + CRTCL_SECT_END; /* Sleep and wait for Rx finished */
1819 + DEU_WAIT_EVENT(deu_priv->deu_thread_wait, DEU_EVENT,
1820 + deu_priv->deu_event_flags);
1821 + CRTCL_SECT_START;
1822 +#endif
1823 +
1824 +#endif /* dma mode */
1825 +
1826 + if (mode > 0) {
1827 + *(u32 *) iv_arg = DEU_ENDIAN_SWAP(des->IVHR);
1828 + *((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(des->IVLR);
1829 + };
1830 +
1831 + CRTCL_SECT_END;
1832 +}
1833 +
1834 +/* definitions from linux/include/crypto.h:
1835 +#define CRYPTO_TFM_MODE_ECB 0x00000001
1836 +#define CRYPTO_TFM_MODE_CBC 0x00000002
1837 +#define CRYPTO_TFM_MODE_CFB 0x00000004
1838 +#define CRYPTO_TFM_MODE_CTR 0x00000008
1839 +#define CRYPTO_TFM_MODE_OFB 0x00000010
1840 +but hardware definition: 0 ECB 1 CBC 2 OFB 3 CFB 4 CTR */
1841 +
1842 +/** \fn void deu_des(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
1843 + * \ingroup LQ_DES_FUNCTIONS
1844 + * \brief main interface to DES hardware
1845 + * \param ctx_arg crypto algo context
1846 + * \param out_arg output bytestream
1847 + * \param in_arg input bytestream
1848 + * \param iv_arg initialization vector
1849 + * \param nbytes length of bytestream
1850 + * \param encdec 1 for encrypt; 0 for decrypt
1851 + * \param mode operation mode such as ebc, cbc
1852 +*/
1853 +
1854 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
1855 +static void deu_des(void *ctx_arg,
1856 + u8 *out_arg,
1857 + const u8 *in_arg,
1858 + u8 *iv_arg,
1859 + u32 nbytes,
1860 + int encdec,
1861 + int mode)
1862 +{
1863 + u32 remain = nbytes;
1864 + u32 inc;
1865 +
1866 + DPRINTF(0, "\n");
1867 +
1868 + while (remain > 0) {
1869 + if (remain >= DEU_MAX_PACKET_SIZE)
1870 + inc = DEU_MAX_PACKET_SIZE;
1871 + else
1872 + inc = remain;
1873 +
1874 + remain -= inc;
1875 +
1876 + deu_des_core(ctx_arg, out_arg, in_arg, iv_arg, inc, encdec,
1877 + mode);
1878 +
1879 + out_arg += inc;
1880 + in_arg += inc;
1881 + }
1882 +}
1883 +#endif
1884 +
1885 +/** \fn void deu_des_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
1886 + * \ingroup LQ_DES_FUNCTIONS
1887 + * \brief sets DES hardware to ECB mode
1888 + * \param ctx crypto algo context
1889 + * \param dst output bytestream
1890 + * \param src input bytestream
1891 + * \param iv initialization vector
1892 + * \param nbytes length of bytestream
1893 + * \param encdec 1 for encrypt; 0 for decrypt
1894 + * \param inplace not used
1895 +*/
1896 +
1897 +static void deu_des_ecb(void *ctx,
1898 + uint8_t *dst,
1899 + const uint8_t *src,
1900 + uint8_t *iv,
1901 + size_t nbytes,
1902 + int encdec,
1903 + int inplace)
1904 +{
1905 + DPRINTF(0, "ctx @%p\n", ctx);
1906 + deu_des(ctx, dst, src, NULL, nbytes, encdec, 0);
1907 +}
1908 +
1909 +/** \fn void deu_des_cbc(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
1910 + * \ingroup LQ_DES_FUNCTIONS
1911 + * \brief sets DES hardware to CBC mode
1912 + * \param ctx crypto algo context
1913 + * \param dst output bytestream
1914 + * \param src input bytestream
1915 + * \param iv initialization vector
1916 + * \param nbytes length of bytestream
1917 + * \param encdec 1 for encrypt; 0 for decrypt
1918 + * \param inplace not used
1919 +*/
1920 +static void deu_des_cbc(void *ctx,
1921 + uint8_t *dst,
1922 + const uint8_t *src,
1923 + uint8_t *iv,
1924 + size_t nbytes,
1925 + int encdec,
1926 + int inplace)
1927 +{
1928 + DPRINTF(0, "ctx @%p\n", ctx);
1929 + deu_des(ctx, dst, src, iv, nbytes, encdec, 1);
1930 +}
1931 +
1932 +#if 0
1933 +/** \fn void deu_des_ofb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
1934 + * \ingroup LQ_DES_FUNCTIONS
1935 + * \brief sets DES hardware to OFB mode
1936 + * \param ctx crypto algo context
1937 + * \param dst output bytestream
1938 + * \param src input bytestream
1939 + * \param iv initialization vector
1940 + * \param nbytes length of bytestream
1941 + * \param encdec 1 for encrypt; 0 for decrypt
1942 + * \param inplace not used
1943 +*/
1944 +static void deu_des_ofb(void *ctx,
1945 + uint8_t *dst,
1946 + const uint8_t *src,
1947 + uint8_t *iv,
1948 + size_t nbytes,
1949 + int encdec,
1950 + int inplace)
1951 +{
1952 + DPRINTF(0, "ctx @%p\n", ctx);
1953 + deu_des(ctx, dst, src, iv, nbytes, encdec, 2);
1954 +}
1955 +
1956 +/** \fn void deu_des_cfb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
1957 + \ingroup LQ_DES_FUNCTIONS
1958 + \brief sets DES hardware to CFB mode
1959 + \param ctx crypto algo context
1960 + \param dst output bytestream
1961 + \param src input bytestream
1962 + \param iv initialization vector
1963 + \param nbytes length of bytestream
1964 + \param encdec 1 for encrypt; 0 for decrypt
1965 + \param inplace not used
1966 +*/
1967 +static void deu_des_cfb(void *ctx,
1968 + uint8_t *dst,
1969 + const uint8_t *src,
1970 + uint8_t *iv,
1971 + size_t nbytes,
1972 + int encdec,
1973 + int inplace)
1974 +{
1975 + DPRINTF(0, "ctx @%p\n", ctx);
1976 + deu_des(ctx, dst, src, iv, nbytes, encdec, 3);
1977 +}
1978 +
1979 +/** \fn void deu_des_ctr(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
1980 + * \ingroup LQ_DES_FUNCTIONS
1981 + * \brief sets DES hardware to CTR mode
1982 + * \param ctx crypto algo context
1983 + * \param dst output bytestream
1984 + * \param src input bytestream
1985 + * \param iv initialization vector
1986 + * \param nbytes length of bytestream
1987 + * \param encdec 1 for encrypt; 0 for decrypt
1988 + * \param inplace not used
1989 +*/
1990 +static void deu_des_ctr(void *ctx,
1991 + uint8_t *dst,
1992 + const uint8_t *src,
1993 + uint8_t *iv,
1994 + size_t nbytes,
1995 + int encdec,
1996 + int inplace)
1997 +{
1998 + DPRINTF(0, "ctx @%p\n", ctx);
1999 + deu_des(ctx, dst, src, iv, nbytes, encdec, 4);
2000 +}
2001 +#endif
2002 +
2003 +/** \fn void des_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
2004 + * \ingroup LQ_DES_FUNCTIONS
2005 + * \brief encrypt DES_BLOCK_SIZE of data
2006 + * \param tfm linux crypto algo transform
2007 + * \param out output bytestream
2008 + * \param in input bytestream
2009 +*/
2010 +static void des_encrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
2011 +{
2012 + struct des_ctx *ctx = crypto_tfm_ctx(tfm);
2013 + DPRINTF(0, "ctx @%p\n", ctx);
2014 + deu_des(ctx, out, in, NULL, DES_BLOCK_SIZE, CRYPTO_DIR_ENCRYPT, 0);
2015 +}
2016 +
2017 +/** \fn void des_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
2018 + * \ingroup LQ_DES_FUNCTIONS
2019 + * \brief encrypt DES_BLOCK_SIZE of data
2020 + * \param tfm linux crypto algo transform
2021 + * \param out output bytestream
2022 + * \param in input bytestream
2023 +*/
2024 +static void des_decrypt(struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
2025 +{
2026 + struct des_ctx *ctx = crypto_tfm_ctx(tfm);
2027 + DPRINTF(0, "ctx @%p\n", ctx);
2028 + deu_des(ctx, out, in, NULL, DES_BLOCK_SIZE, CRYPTO_DIR_DECRYPT, 0);
2029 +}
2030 +
2031 +/*
2032 + * \brief RFC2451:
2033 + *
2034 + * For DES-EDE3, there is no known need to reject weak or
2035 + * complementation keys. Any weakness is obviated by the use of
2036 + * multiple keys.
2037 + *
2038 + * However, if the first two or last two independent 64-bit keys are
2039 + * equal (k1 == k2 or k2 == k3), then the DES3 operation is simply the
2040 + * same as DES. Implementers MUST reject keys that exhibit this
2041 + * property.
2042 + *
2043 + */
2044 +
2045 +/** \fn int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
2046 + * \ingroup LQ_DES_FUNCTIONS
2047 + * \brief sets 3DES key
2048 + * \param tfm linux crypto algo transform
2049 + * \param key input key
2050 + * \param keylen key length
2051 +*/
2052 +static int des3_ede_setkey(struct crypto_tfm *tfm,
2053 + const u8 *key,
2054 + unsigned int key_len)
2055 +{
2056 + struct des_ctx *ctx = crypto_tfm_ctx(tfm);
2057 +
2058 + DPRINTF(0, "ctx @%p, key_len %d\n", ctx, key_len);
2059 +
2060 + ctx->controlr_M = key_len / 8 + 1; /* 3DES EDE1 / EDE2 / EDE3 Mode */
2061 + ctx->key_length = key_len;
2062 +
2063 + memcpy((u8 *)(ctx->expkey), key, key_len);
2064 +
2065 + return 0;
2066 +}
2067 +
2068 +/*
2069 + * \brief DES function mappings
2070 +*/
2071 +static struct crypto_alg des_alg = {
2072 + .cra_name = "des",
2073 + .cra_driver_name = "lq_deu-des",
2074 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
2075 + .cra_blocksize = DES_BLOCK_SIZE,
2076 + .cra_ctxsize = sizeof(struct des_ctx),
2077 + .cra_module = THIS_MODULE,
2078 + .cra_alignmask = 3,
2079 + .cra_list = LIST_HEAD_INIT(des_alg.cra_list),
2080 + .cra_u = {
2081 + .cipher = {
2082 + .cia_min_keysize = DES_KEY_SIZE,
2083 + .cia_max_keysize = DES_KEY_SIZE,
2084 + .cia_setkey = des_setkey,
2085 + .cia_encrypt = des_encrypt,
2086 + .cia_decrypt = des_decrypt
2087 + }
2088 + }
2089 +};
2090 +
2091 +/*
2092 + * \brief DES function mappings
2093 +*/
2094 +static struct crypto_alg des3_ede_alg = {
2095 + .cra_name = "des3_ede",
2096 + .cra_driver_name = "lq_deu-des3_ede",
2097 + .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
2098 + .cra_blocksize = DES_BLOCK_SIZE,
2099 + .cra_ctxsize = sizeof(struct des_ctx),
2100 + .cra_module = THIS_MODULE,
2101 + .cra_alignmask = 3,
2102 + .cra_list = LIST_HEAD_INIT(des3_ede_alg.cra_list),
2103 + .cra_u = {
2104 + .cipher = {
2105 + .cia_min_keysize = DES_KEY_SIZE,
2106 + .cia_max_keysize = DES_KEY_SIZE,
2107 + .cia_setkey = des3_ede_setkey,
2108 + .cia_encrypt = des_encrypt,
2109 + .cia_decrypt = des_decrypt
2110 + }
2111 + }
2112 +};
2113 +
2114 +/** \fn int ecb_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
2115 + * \ingroup LQ_DES_FUNCTIONS
2116 + * \brief ECB DES encrypt using linux crypto blkcipher
2117 + * \param desc blkcipher descriptor
2118 + * \param dst output scatterlist
2119 + * \param src input scatterlist
2120 + * \param nbytes data size in bytes
2121 +*/
2122 +static int ecb_des_encrypt(struct blkcipher_desc *desc,
2123 + struct scatterlist *dst,
2124 + struct scatterlist *src,
2125 + unsigned int nbytes)
2126 +{
2127 + struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
2128 + struct blkcipher_walk walk;
2129 + int err;
2130 +
2131 + DPRINTF(0, "ctx @%p\n", ctx);
2132 +
2133 + blkcipher_walk_init(&walk, dst, src, nbytes);
2134 + err = blkcipher_walk_virt(desc, &walk);
2135 +
2136 + while ((nbytes = walk.nbytes)) {
2137 + nbytes -= (nbytes % DES_BLOCK_SIZE);
2138 + deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
2139 + NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
2140 + nbytes &= DES_BLOCK_SIZE - 1;
2141 + err = blkcipher_walk_done(desc, &walk, nbytes);
2142 + }
2143 +
2144 + return err;
2145 +}
2146 +
2147 +/** \fn int ecb_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
2148 + * \ingroup LQ_DES_FUNCTIONS
2149 + * \brief ECB DES decrypt using linux crypto blkcipher
2150 + * \param desc blkcipher descriptor
2151 + * \param dst output scatterlist
2152 + * \param src input scatterlist
2153 + * \param nbytes data size in bytes
2154 + * \return err
2155 +*/
2156 +static int ecb_des_decrypt(struct blkcipher_desc *desc,
2157 + struct scatterlist *dst,
2158 + struct scatterlist *src,
2159 + unsigned int nbytes)
2160 +{
2161 + struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
2162 + struct blkcipher_walk walk;
2163 + int err;
2164 +
2165 + DPRINTF(0, "ctx @%p\n", ctx);
2166 +
2167 + blkcipher_walk_init(&walk, dst, src, nbytes);
2168 + err = blkcipher_walk_virt(desc, &walk);
2169 +
2170 + while ((nbytes = walk.nbytes)) {
2171 + nbytes -= (nbytes % DES_BLOCK_SIZE);
2172 + deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
2173 + NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
2174 + nbytes &= DES_BLOCK_SIZE - 1;
2175 + err = blkcipher_walk_done(desc, &walk, nbytes);
2176 + }
2177 +
2178 + return err;
2179 +}
2180 +
2181 +/*
2182 + * \brief DES function mappings
2183 +*/
2184 +static struct crypto_alg ecb_des_alg = {
2185 + .cra_name = "ecb(des)",
2186 + .cra_driver_name = "lq_deu-ecb(des)",
2187 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
2188 + .cra_blocksize = DES_BLOCK_SIZE,
2189 + .cra_ctxsize = sizeof(struct des_ctx),
2190 + .cra_type = &crypto_blkcipher_type,
2191 + .cra_module = THIS_MODULE,
2192 + .cra_list = LIST_HEAD_INIT(ecb_des_alg.cra_list),
2193 + .cra_u = {
2194 + .blkcipher = {
2195 + .min_keysize = DES_KEY_SIZE,
2196 + .max_keysize = DES_KEY_SIZE,
2197 + .setkey = des_setkey,
2198 + .encrypt = ecb_des_encrypt,
2199 + .decrypt = ecb_des_decrypt,
2200 + }
2201 + }
2202 +};
2203 +
2204 +/*
2205 + * \brief DES function mappings
2206 +*/
2207 +static struct crypto_alg ecb_des3_ede_alg = {
2208 + .cra_name = "ecb(des3_ede)",
2209 + .cra_driver_name = "lq_deu-ecb(des3_ede)",
2210 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
2211 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2212 + .cra_ctxsize = sizeof(struct des_ctx),
2213 + .cra_type = &crypto_blkcipher_type,
2214 + .cra_module = THIS_MODULE,
2215 + .cra_list = LIST_HEAD_INIT(ecb_des3_ede_alg.cra_list),
2216 + .cra_u = {
2217 + .blkcipher = {
2218 + .min_keysize = DES3_EDE_KEY_SIZE,
2219 + .max_keysize = DES3_EDE_KEY_SIZE,
2220 + .setkey = des3_ede_setkey,
2221 + .encrypt = ecb_des_encrypt,
2222 + .decrypt = ecb_des_decrypt,
2223 + }
2224 + }
2225 +};
2226 +
2227 +/** \fn int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
2228 + * \ingroup LQ_DES_FUNCTIONS
2229 + * \brief CBC DES encrypt using linux crypto blkcipher
2230 + * \param desc blkcipher descriptor
2231 + * \param dst output scatterlist
2232 + * \param src input scatterlist
2233 + * \param nbytes data size in bytes
2234 + * \return err
2235 +*/
2236 +static int cbc_des_encrypt(struct blkcipher_desc *desc,
2237 + struct scatterlist *dst,
2238 + struct scatterlist *src,
2239 + unsigned int nbytes)
2240 +{
2241 + struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
2242 + struct blkcipher_walk walk;
2243 + int err;
2244 +
2245 + DPRINTF(0, "ctx @%p\n", ctx);
2246 +
2247 + blkcipher_walk_init(&walk, dst, src, nbytes);
2248 + err = blkcipher_walk_virt(desc, &walk);
2249 +
2250 + while ((nbytes = walk.nbytes)) {
2251 + u8 *iv = walk.iv;
2252 + /* printk("iv = %08x\n", *(u32 *)iv); */
2253 + nbytes -= (nbytes % DES_BLOCK_SIZE);
2254 + deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
2255 + iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
2256 + nbytes &= DES_BLOCK_SIZE - 1;
2257 + err = blkcipher_walk_done(desc, &walk, nbytes);
2258 + }
2259 +
2260 + return err;
2261 +}
2262 +
2263 +/** \fn int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
2264 + * \ingroup LQ_DES_FUNCTIONS
2265 + * \brief CBC DES decrypt using linux crypto blkcipher
2266 + * \param desc blkcipher descriptor
2267 + * \param dst output scatterlist
2268 + * \param src input scatterlist
2269 + * \param nbytes data size in bytes
2270 + * \return err
2271 +*/
2272 +static int cbc_des_decrypt(struct blkcipher_desc *desc,
2273 + struct scatterlist *dst,
2274 + struct scatterlist *src,
2275 + unsigned int nbytes)
2276 +{
2277 + struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
2278 + struct blkcipher_walk walk;
2279 + int err;
2280 +
2281 + DPRINTF(0, "ctx @%p\n", ctx);
2282 +
2283 + blkcipher_walk_init(&walk, dst, src, nbytes);
2284 + err = blkcipher_walk_virt(desc, &walk);
2285 +
2286 + while ((nbytes = walk.nbytes)) {
2287 + u8 *iv = walk.iv;
2288 + /* printk("iv = %08x\n", *(u32 *)iv); */
2289 + nbytes -= (nbytes % DES_BLOCK_SIZE);
2290 + deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
2291 + iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
2292 + nbytes &= DES_BLOCK_SIZE - 1;
2293 + err = blkcipher_walk_done(desc, &walk, nbytes);
2294 + }
2295 +
2296 + return err;
2297 +}
2298 +
2299 +/*
2300 + * \brief DES function mappings
2301 +*/
2302 +static struct crypto_alg cbc_des_alg = {
2303 + .cra_name = "cbc(des)",
2304 + .cra_driver_name = "lq_deu-cbc(des)",
2305 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
2306 + .cra_blocksize = DES_BLOCK_SIZE,
2307 + .cra_ctxsize = sizeof(struct des_ctx),
2308 + .cra_type = &crypto_blkcipher_type,
2309 + .cra_module = THIS_MODULE,
2310 + .cra_list = LIST_HEAD_INIT(cbc_des_alg.cra_list),
2311 + .cra_u = {
2312 + .blkcipher = {
2313 + .min_keysize = DES_KEY_SIZE,
2314 + .max_keysize = DES_KEY_SIZE,
2315 + .ivsize = DES_BLOCK_SIZE,
2316 + .setkey = des_setkey,
2317 + .encrypt = cbc_des_encrypt,
2318 + .decrypt = cbc_des_decrypt,
2319 + }
2320 + }
2321 +};
2322 +
2323 +/*
2324 + * \brief DES function mappings
2325 +*/
2326 +static struct crypto_alg cbc_des3_ede_alg = {
2327 + .cra_name = "cbc(des3_ede)",
2328 + .cra_driver_name = "lq_deu-cbc(des3_ede)",
2329 + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
2330 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2331 + .cra_ctxsize = sizeof(struct des_ctx),
2332 + .cra_type = &crypto_blkcipher_type,
2333 + .cra_module = THIS_MODULE,
2334 + .cra_list = LIST_HEAD_INIT(cbc_des3_ede_alg.cra_list),
2335 + .cra_u = {
2336 + .blkcipher = {
2337 + .min_keysize = DES3_EDE_KEY_SIZE,
2338 + .max_keysize = DES3_EDE_KEY_SIZE,
2339 + .ivsize = DES_BLOCK_SIZE,
2340 + .setkey = des3_ede_setkey,
2341 + .encrypt = cbc_des_encrypt,
2342 + .decrypt = cbc_des_decrypt,
2343 + }
2344 + }
2345 +};
2346 +
2347 +/** \fn int lq_deu_init_des(void)
2348 + * \ingroup LQ_DES_FUNCTIONS
2349 + * \brief initialize des driver
2350 +*/
2351 +int lq_deu_init_des(void)
2352 +{
2353 + int ret = 0;
2354 +
2355 + ret = crypto_register_alg(&des_alg);
2356 + if (ret < 0)
2357 + goto des_err;
2358 +
2359 + ret = crypto_register_alg(&ecb_des_alg);
2360 + if (ret < 0)
2361 + goto ecb_des_err;
2362 +
2363 + ret = crypto_register_alg(&cbc_des_alg);
2364 + if (ret < 0)
2365 + goto cbc_des_err;
2366 +
2367 + ret = crypto_register_alg(&des3_ede_alg);
2368 + if (ret < 0)
2369 + goto des3_ede_err;
2370 +
2371 + ret = crypto_register_alg(&ecb_des3_ede_alg);
2372 + if (ret < 0)
2373 + goto ecb_des3_ede_err;
2374 +
2375 + ret = crypto_register_alg(&cbc_des3_ede_alg);
2376 + if (ret < 0)
2377 + goto cbc_des3_ede_err;
2378 +
2379 + deu_des_chip_init();
2380 +
2381 + CRTCL_SECT_INIT;
2382 +
2383 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
2384 + if (ALLOCATE_MEMORY(BUFFER_IN, DES_ALGO) < 0) {
2385 + printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n",
2386 + __FILE__, __func__, __LINE__);
2387 + goto cbc_des3_ede_err;
2388 + }
2389 + if (ALLOCATE_MEMORY(BUFFER_OUT, DES_ALGO) < 0) {
2390 + printk(KERN_ERR "[%s %s %d]: malloc memory fail!\n",
2391 + __FILE__, __func__, __LINE__);
2392 + goto cbc_des3_ede_err;
2393 + }
2394 +#endif
2395 +
2396 + printk(KERN_NOTICE "Lantiq DEU DES initialized%s.\n",
2397 + disable_deudma ? "" : " (DMA)");
2398 + return ret;
2399 +
2400 +des_err:
2401 + crypto_unregister_alg(&des_alg);
2402 + printk(KERN_ERR "Lantiq des initialization failed!\n");
2403 +
2404 + return ret;
2405 +
2406 +ecb_des_err:
2407 + crypto_unregister_alg(&ecb_des_alg);
2408 + printk(KERN_ERR "Lantiq ecb_des initialization failed!\n");
2409 +
2410 + return ret;
2411 +
2412 +cbc_des_err:
2413 + crypto_unregister_alg(&cbc_des_alg);
2414 + printk(KERN_ERR "Lantiq cbc_des initialization failed!\n");
2415 +
2416 + return ret;
2417 +
2418 +des3_ede_err:
2419 + crypto_unregister_alg(&des3_ede_alg);
2420 + printk(KERN_ERR "Lantiq des3_ede initialization failed!\n");
2421 +
2422 + return ret;
2423 +
2424 +ecb_des3_ede_err:
2425 + crypto_unregister_alg(&ecb_des3_ede_alg);
2426 + printk(KERN_ERR "Lantiq ecb_des3_ede initialization failed!\n");
2427 +
2428 + return ret;
2429 +
2430 +cbc_des3_ede_err:
2431 + crypto_unregister_alg(&cbc_des3_ede_alg);
2432 + printk(KERN_ERR "Lantiq cbc_des3_ede initialization failed!\n");
2433 +
2434 + return ret;
2435 +}
2436 +
2437 +/** \fn void lq_deu_fini_des(void)
2438 + * \ingroup LQ_DES_FUNCTIONS
2439 + * \brief unregister des driver
2440 +*/
2441 +void lq_deu_fini_des(void)
2442 +{
2443 + crypto_unregister_alg(&des_alg);
2444 + crypto_unregister_alg(&ecb_des_alg);
2445 + crypto_unregister_alg(&cbc_des_alg);
2446 + crypto_unregister_alg(&des3_ede_alg);
2447 + crypto_unregister_alg(&ecb_des3_ede_alg);
2448 + crypto_unregister_alg(&cbc_des3_ede_alg);
2449 +
2450 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
2451 + FREE_MEMORY(des_buff_in);
2452 + FREE_MEMORY(des_buff_out);
2453 +#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA_DANUBE */
2454 +}
2455 +
2456 +#endif
2457 --- /dev/null
2458 +++ b/drivers/crypto/lantiq/deu.c
2459 @@ -0,0 +1,195 @@
2460 +/*
2461 + * This program is free software; you can redistribute it and/or modify
2462 + * it under the terms of the GNU General Public License as published by
2463 + * the Free Software Foundation; either version 2 of the License, or
2464 + * (at your option) any later version.
2465 + *
2466 + * This program is distributed in the hope that it will be useful,
2467 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2468 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2469 + * GNU General Public License for more details.
2470 + *
2471 + * You should have received a copy of the GNU General Public License
2472 + * along with this program; if not, write to the Free Software
2473 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
2474 + *
2475 + * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
2476 + * Copyright (C) 2009 Mohammad Firdaus
2477 + */
2478 +
2479 +/**
2480 + \defgroup LQ_DEU LQ_DEU_DRIVERS
2481 + \ingroup API
2482 + \brief Lantiq DEU driver module
2483 +*/
2484 +
2485 +/**
2486 + \file deu.c
2487 + \ingroup LQ_DEU
2488 + \brief main DEU driver file
2489 +*/
2490 +
2491 +/**
2492 + \defgroup LQ_DEU_FUNCTIONS LQ_DEU_FUNCTIONS
2493 + \ingroup LQ_DEU
2494 + \brief Lantiq DEU functions
2495 +*/
2496 +
2497 +#include <linux/version.h>
2498 +#if defined(CONFIG_MODVERSIONS)
2499 +#define MODVERSIONS
2500 +#include <linux/modversions.h>
2501 +#endif
2502 +#include <linux/module.h>
2503 +#include <linux/init.h>
2504 +#include <linux/types.h>
2505 +#include <linux/errno.h>
2506 +#include <linux/crypto.h>
2507 +#include <linux/proc_fs.h>
2508 +#include <linux/fs.h> /* Stuff about file systems that we need */
2509 +#include <asm/byteorder.h>
2510 +
2511 +#if 0
2512 +#ifdef CONFIG_SOC_LANTIQ_XWAY
2513 +# include <lq_pmu.h>
2514 +#endif
2515 +#endif
2516 +
2517 +#include "deu.h"
2518 +
2519 +struct lq_crypto_priv lq_crypto_ops;
2520 +
2521 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
2522 +int disable_deudma = 0;
2523 +#else
2524 +int disable_deudma = 1;
2525 +#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA */
2526 +
2527 +#ifdef CRYPTO_DEBUG
2528 +char deu_debug_level = 3;
2529 +#endif
2530 +
2531 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_MODULE
2532 +# define STATIC static
2533 +#else
2534 +# define STATIC
2535 +#endif
2536 +
2537 +/** \fn static int lq_deu_init(void)
2538 + * \ingroup LQ_DEU_FUNCTIONS
2539 + * \brief link all modules that have been selected in kernel config for Lantiq HW crypto support
2540 + * \return ret
2541 +*/
2542 +int lq_deu_init(void)
2543 +{
2544 + int ret = -ENOSYS;
2545 + u32 config;
2546 +
2547 + printk(KERN_INFO "Lantiq crypto hardware driver version %s\n",
2548 + LQ_DEU_DRV_VERSION);
2549 +
2550 + config = deu_chip_init();
2551 +
2552 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
2553 + deu_dma_init();
2554 +#endif
2555 +
2556 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_AES)
2557 + if(config & LQ_DEU_ID_AES) {
2558 + if ((ret = lq_deu_init_aes())) {
2559 + printk(KERN_ERR "Lantiq AES initialization failed!\n");
2560 + }
2561 + } else {
2562 + printk(KERN_ERR "Lantiq AES not supported!\n");
2563 + }
2564 +#endif
2565 +
2566 +#ifdef CONFIG_SOL_LANTIQ_XWAY
2567 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DES)
2568 + if(config & LQ_DEU_ID_DES) {
2569 + if ((ret = lq_deu_init_des())) {
2570 + printk(KERN_ERR "Lantiq DES initialization failed!\n");
2571 + }
2572 + } else {
2573 + printk(KERN_ERR "Lantiq DES not supported!\n");
2574 + }
2575 +#endif
2576 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) && defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA)
2577 + if ((ret = lq_deu_init_arc4())) {
2578 + printk(KERN_ERR "Lantiq ARC4 initialization failed!\n");
2579 + }
2580 +#endif
2581 +#endif
2582 +
2583 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1)
2584 + if(config & LQ_DEU_ID_HASH) {
2585 + if ((ret = lq_deu_init_sha1())) {
2586 + printk(KERN_ERR "Lantiq SHA1 initialization failed!\n");
2587 + }
2588 + } else {
2589 + printk(KERN_ERR "Lantiq SHA1 not supported!\n");
2590 + }
2591 +#endif
2592 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5)
2593 + if(config & LQ_DEU_ID_HASH) {
2594 + if ((ret = lq_deu_init_md5())) {
2595 + printk(KERN_ERR "Lantiq MD5 initialization failed!\n");
2596 + }
2597 + } else {
2598 + printk(KERN_ERR "Lantiq MD5 not supported!\n");
2599 + }
2600 +#endif
2601 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC)
2602 + if ((ret = lq_deu_init_sha1_hmac())) {
2603 + printk(KERN_ERR "Lantiq SHA1_HMAC initialization failed!\n");
2604 + }
2605 +#endif
2606 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC)
2607 + if ((ret = lq_deu_init_md5_hmac())) {
2608 + printk(KERN_ERR "Lantiq MD5_HMAC initialization failed!\n");
2609 + }
2610 +#endif
2611 + return ret;
2612 +}
2613 +
2614 +/** \fn static void lq_deu_fini(void)
2615 + * \ingroup LQ_DEU_FUNCTIONS
2616 + * \brief remove the loaded crypto algorithms
2617 +*/
2618 +void lq_deu_exit(void)
2619 +{
2620 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_AES)
2621 + lq_deu_fini_aes();
2622 +#endif
2623 +#ifdef CONFIG_SOL_LANTIQ_XWAY
2624 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DES)
2625 + lq_deu_fini_des();
2626 +#endif
2627 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_ARC4) \
2628 + && defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA)
2629 + lq_deu_fini_arc4();
2630 +#endif
2631 +#endif
2632 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1)
2633 + lq_deu_fini_sha1();
2634 +#endif
2635 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5)
2636 + lq_deu_fini_md5();
2637 +#endif
2638 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_SHA1_HMAC)
2639 + lq_deu_fini_sha1_hmac();
2640 +#endif
2641 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_MD5_HMAC)
2642 + lq_deu_fini_md5_hmac();
2643 +#endif
2644 +
2645 + printk("DEU has exited successfully\n");
2646 +
2647 +#if defined(CONFIG_CRYPTO_DEV_LANTIQ_DMA)
2648 + deu_dma_exit();
2649 + printk("DMA has deregistered successfully\n");
2650 +#endif
2651 +}
2652 +
2653 +EXPORT_SYMBOL(lq_deu_init);
2654 +EXPORT_SYMBOL(lq_deu_exit);
2655 --- /dev/null
2656 +++ b/drivers/crypto/lantiq/deu.h
2657 @@ -0,0 +1,248 @@
2658 +/*
2659 + * This program is free software; you can redistribute it and/or modify
2660 + * it under the terms of the GNU General Public License as published by
2661 + * the Free Software Foundation; either version 2 of the License, or
2662 + * (at your option) any later version.
2663 + *
2664 + * This program is distributed in the hope that it will be useful,
2665 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2666 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2667 + * GNU General Public License for more details.
2668 + *
2669 + * You should have received a copy of the GNU General Public License
2670 + * along with this program; if not, write to the Free Software
2671 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
2672 + *
2673 + * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
2674 + * Copyright (C) 2009 Mohammad Firdaus
2675 + */
2676 +
2677 +/**
2678 + \defgroup LQ_DEU LQ_DEU_DRIVERS
2679 + \ingroup API
2680 + \brief Lantiq DEU driver module
2681 +*/
2682 +
2683 +/**
2684 + \file deu.h
2685 + \brief Main DEU driver header file
2686 +*/
2687 +
2688 +/**
2689 + \defgroup LQ_DEU_DEFINITIONS LQ_DEU_DEFINITIONS
2690 + \ingroup LQ_DEU
2691 + \brief Lantiq DEU definitions
2692 +*/
2693 +
2694 +
2695 +#ifndef DEU_H
2696 +#define DEU_H
2697 +
2698 +#undef CRYPTO_DEBUG
2699 +
2700 +#define LQ_DEU_DRV_VERSION "1.0.1"
2701 +
2702 +#if defined(CONFIG_LANTIQ_DANUBE)
2703 +# include "deu_danube.h"
2704 +#elif defined(CONFIG_LANTIQ_AR9)
2705 +# include "deu_ar9.h"
2706 +#elif defined(CONFIG_SOC_LANTIQ_FALCON)
2707 +# include "deu_falcon.h"
2708 +#else
2709 +//# error "Unknown platform"
2710 +# include "deu_danube.h"
2711 +#endif
2712 +
2713 +struct lq_crypto_priv {
2714 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
2715 + u32 *des_buff_in;
2716 + u32 *des_buff_out;
2717 + u32 *aes_buff_in;
2718 + u32 *aes_buff_out;
2719 +
2720 + int (*dma_init)(void);
2721 + void (*dma_exit)(void);
2722 + u32 (*dma_align)(const u8 *, u32 *, int, int);
2723 + void (*aes_dma_memcpy)(u32 *, u32 *, u8 *, int);
2724 + void (*des_dma_memcpy)(u32 *, u32 *, u8 *, int);
2725 + int (*aes_dma_malloc)(int);
2726 + int (*des_dma_malloc)(int);
2727 + void (*dma_free)(u32 *);
2728 +#endif
2729 +
2730 + u32 (*endian_swap)(u32);
2731 + u32 (*input_swap)(u32);
2732 + void (*aes_chip_init)(void);
2733 + void (*des_chip_init)(void);
2734 + u32 (*chip_init)(void);
2735 +};
2736 +
2737 +extern struct lq_crypto_priv lq_crypto_ops;
2738 +
2739 +#define LQ_DEU_ALIGNMENT 16
2740 +
2741 +#define PFX "lq_deu: "
2742 +
2743 +#define LQ_DEU_CRA_PRIORITY 300
2744 +#define LQ_DEU_COMPOSITE_PRIORITY 400
2745 +
2746 +#define CRYPTO_DIR_ENCRYPT 1
2747 +#define CRYPTO_DIR_DECRYPT 0
2748 +
2749 +#define CRTCL_SECT_INIT spin_lock_init(&cipher_lock)
2750 +#define CRTCL_SECT_START spin_lock_irqsave(&cipher_lock, flag)
2751 +#define CRTCL_SECT_END spin_unlock_irqrestore(&cipher_lock, flag)
2752 +
2753 +#define LQ_DEU_ID_REV 0x00001F
2754 +#define LQ_DEU_ID_ID 0x00FF00
2755 +#define LQ_DEU_ID_DMA 0x010000
2756 +#define LQ_DEU_ID_HASH 0x020000
2757 +#define LQ_DEU_ID_AES 0x040000
2758 +#define LQ_DEU_ID_3DES 0x080000
2759 +#define LQ_DEU_ID_DES 0x100000
2760 +
2761 +extern int disable_deudma;
2762 +
2763 +int lq_deu_init(void);
2764 +void lq_deu_exit(void);
2765 +
2766 +int lq_deu_init_des(void);
2767 +int lq_deu_init_aes(void);
2768 +int lq_deu_init_arc4(void);
2769 +int lq_deu_init_sha1(void);
2770 +int lq_deu_init_md5(void);
2771 +int lq_deu_init_sha1_hmac(void);
2772 +int lq_deu_init_md5_hmac(void);
2773 +
2774 +void lq_deu_fini_des(void);
2775 +void lq_deu_fini_aes(void);
2776 +void lq_deu_fini_arc4(void);
2777 +void lq_deu_fini_sha1(void);
2778 +void lq_deu_fini_md5(void);
2779 +void lq_deu_fini_sha1_hmac(void);
2780 +void lq_deu_fini_md5_hmac(void);
2781 +
2782 +/* board specific functions */
2783 +/* { */
2784 +static inline u32 deu_chip_init(void)
2785 +{
2786 + return lq_crypto_ops.chip_init();
2787 +}
2788 +
2789 +static inline void deu_des_chip_init(void)
2790 +{
2791 + lq_crypto_ops.des_chip_init();
2792 +}
2793 +
2794 +static inline void deu_aes_chip_init(void)
2795 +{
2796 + lq_crypto_ops.aes_chip_init();
2797 +}
2798 +
2799 +static inline u32 deu_input_swap(u32 input)
2800 +{
2801 + return lq_crypto_ops.input_swap(input);
2802 +}
2803 +
2804 +static inline u32 deu_endian_swap(u32 input)
2805 +{
2806 + return lq_crypto_ops.endian_swap(input);
2807 +}
2808 +
2809 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
2810 +static inline int deu_aes_dma_malloc(int value)
2811 +{
2812 + return lq_crypto_ops.aes_dma_malloc(value);
2813 +}
2814 +
2815 +static inline int deu_des_dma_malloc(int value)
2816 +{
2817 + return lq_crypto_ops.des_dma_malloc(value);
2818 +}
2819 +
2820 +static inline u32 *deu_dma_align(const u8 *arg,
2821 + u32 *buff_alloc,
2822 + int in_out,
2823 + int nbytes)
2824 +{
2825 + return lq_crypto_ops.dma_align(arg, buff_alloc, in_out, nbytes);
2826 +}
2827 +
2828 +static inline void deu_aes_dma_memcpy(u32 *outcopy,
2829 + u32 *out_dma,
2830 + u8 *out_arg,
2831 + int nbytes)
2832 +{
2833 + lq_crypto_ops.aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes);
2834 +}
2835 +
2836 +static inline void deu_des_dma_memcpy(u32 *outcopy,
2837 + u32 *out_dma,
2838 + u8 *out_arg,
2839 + int nbytes)
2840 +{
2841 + lq_crypto_ops.des_dma_memcpy(outcopy, out_dma, out_arg, nbytes);
2842 +}
2843 +
2844 +static inline void deu_dma_free(u32 *addr)
2845 +{
2846 + lq_crypto_ops.dma_free(addr);
2847 +}
2848 +
2849 +static inline int deu_dma_init(void)
2850 +{
2851 + lq_crypto_ops.dma_init();
2852 +}
2853 +
2854 +static inline void deu_dma_exit(void)
2855 +{
2856 + lq_crypto_ops.dma_exit();
2857 +}
2858 +#endif
2859 +
2860 +/* } */
2861 +
2862 +#define DEU_WAKELIST_INIT(queue) \
2863 + init_waitqueue_head(&queue)
2864 +
2865 +#define DEU_WAIT_EVENT_TIMEOUT(queue, event, flags, timeout) \
2866 + do { \
2867 + wait_event_interruptible_timeout((queue), \
2868 + test_bit((event), \
2869 + &(flags)), (timeout)); \
2870 + clear_bit((event), &(flags)); \
2871 + }while (0)
2872 +
2873 +
2874 +#define DEU_WAKEUP_EVENT(queue, event, flags) \
2875 + do { \
2876 + set_bit((event), &(flags)); \
2877 + wake_up_interruptible(&(queue)); \
2878 + }while (0)
2879 +
2880 +#define DEU_WAIT_EVENT(queue, event, flags) \
2881 + do { \
2882 + wait_event_interruptible(queue, \
2883 + test_bit((event), &(flags))); \
2884 + clear_bit((event), &(flags)); \
2885 + }while (0)
2886 +
2887 +struct deu_drv_priv {
2888 + wait_queue_head_t deu_thread_wait;
2889 +#define DEU_EVENT 1
2890 + volatile long deu_event_flags;
2891 + u8 *deu_rx_buf;
2892 + u32 deu_rx_len;
2893 +};
2894 +
2895 +#ifdef CRYPTO_DEBUG
2896 +extern char deu_debug_level;
2897 +# define DPRINTF(level, format, args...) \
2898 + if (level < deu_debug_level) \
2899 + printk(KERN_INFO "[%s %s %d]: " format, \
2900 + __FILE__, __func__, __LINE__, ##args)
2901 +#else
2902 +# define DPRINTF(level, format, args...) do { } while(0)
2903 +#endif
2904 +
2905 +#endif /* DEU_H */
2906 --- /dev/null
2907 +++ b/drivers/crypto/lantiq/deu_ar9.c
2908 @@ -0,0 +1,327 @@
2909 +/*
2910 + * This program is free software; you can redistribute it and/or modify
2911 + * it under the terms of the GNU General Public License as published by
2912 + * the Free Software Foundation; either version 2 of the License, or
2913 + * (at your option) any later version.
2914 + *
2915 + * This program is distributed in the hope that it will be useful,
2916 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
2917 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2918 + * GNU General Public License for more details.
2919 + *
2920 + * You should have received a copy of the GNU General Public License
2921 + * along with this program; if not, write to the Free Software
2922 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
2923 + *
2924 + * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
2925 + * Copyright (C) 2009 Mohammad Firdaus
2926 + */
2927 +
2928 +#include <linux/module.h>
2929 +#include <linux/init.h>
2930 +#include <linux/types.h>
2931 +#include <linux/errno.h>
2932 +#include <asm/io.h> /* dma_cache_inv */
2933 +#include <linux/platform_device.h>
2934 +
2935 +#ifdef CONFIG_SOC_LANTIQ_XWAY
2936 +
2937 +#include "deu.h"
2938 +
2939 +/**
2940 + \defgroup LQ_DEU LQ_DEU_DRIVERS
2941 + \ingroup API
2942 + \brief Lantiq DEU driver module
2943 +*/
2944 +
2945 +/**
2946 + \file deu_ar9.c
2947 + \brief Lantiq DEU board specific driver file for ar9
2948 +*/
2949 +
2950 +/**
2951 + \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS
2952 + \ingroup LQ_DEU
2953 + \brief board specific functions
2954 +*/
2955 +
2956 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
2957 +struct lq_deu_device lq_deu[1];
2958 +
2959 +static u8 *g_dma_page_ptr = NULL;
2960 +static u8 *g_dma_block = NULL;
2961 +static u8 *g_dma_block2 = NULL;
2962 +
2963 +/** \fn int dma_init(void)
2964 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
2965 + * \brief Initialize DMA for DEU usage. DMA specific registers are
2966 + * intialized here, including a pointer to the device, memory
2967 + * space for the device and DEU-DMA descriptors
2968 + * \return -1: fail, 0: SUCCESS
2969 +*/
2970 +static int dma_init(void)
2971 +{
2972 + volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON;
2973 + struct dma_device_info *dma_device = NULL;
2974 + int i = 0;
2975 +
2976 + struct dma_device_info *deu_dma_device_ptr;
2977 +
2978 + /* get one free page and share between g_dma_block and g_dma_block2 */
2979 + printk("PAGE_SIZE = %ld\n", PAGE_SIZE);
2980 + /* need 16-byte alignment memory block */
2981 + g_dma_page_ptr = (u8 *)__get_free_page(GFP_KERNEL);
2982 + /* need 16-byte alignment memory block */
2983 + g_dma_block = g_dma_page_ptr;
2984 + /* need 16-byte alignment memory block */
2985 + g_dma_block2 = (u8 *)(g_dma_page_ptr + (PAGE_SIZE >> 1));
2986 +
2987 + /* deu_dma_priv_init(); */
2988 +
2989 + deu_dma_device_ptr = dma_device_reserve("DEU");
2990 + if (!deu_dma_device_ptr) {
2991 + printk("DEU: reserve DMA fail!\n");
2992 + return -1;
2993 + }
2994 + lq_deu[0].dma_device = deu_dma_device_ptr;
2995 +
2996 + dma_device = deu_dma_device_ptr;
2997 + /* dma_device->priv = &deu_dma_priv; */
2998 + dma_device->buffer_alloc = &deu_dma_buffer_alloc;
2999 + dma_device->buffer_free = &deu_dma_buffer_free;
3000 + dma_device->intr_handler = &deu_dma_intr_handler;
3001 +
3002 + dma_device->tx_endianness_mode = LQ_DMA_ENDIAN_TYPE3;
3003 + dma_device->rx_endianness_mode = LQ_DMA_ENDIAN_TYPE3;
3004 + dma_device->port_num = 1;
3005 + dma_device->tx_burst_len = 2;
3006 + dma_device->rx_burst_len = 2;
3007 + dma_device->max_rx_chan_num = 1;
3008 + dma_device->max_tx_chan_num = 1;
3009 + dma_device->port_packet_drop_enable = 0;
3010 +
3011 + for (i = 0; i < dma_device->max_rx_chan_num; i++) {
3012 + dma_device->rx_chan[i]->packet_size = DEU_MAX_PACKET_SIZE;
3013 + dma_device->rx_chan[i]->desc_len = 1;
3014 + dma_device->rx_chan[i]->control = LQ_DMA_CH_ON;
3015 + dma_device->rx_chan[i]->byte_offset = 0;
3016 + dma_device->rx_chan[i]->chan_poll_enable = 1;
3017 + }
3018 +
3019 + for (i = 0; i < dma_device->max_tx_chan_num; i++) {
3020 + dma_device->tx_chan[i]->control = LQ_DMA_CH_ON;
3021 + dma_device->tx_chan[i]->desc_len = 1;
3022 + dma_device->tx_chan[i]->chan_poll_enable = 1;
3023 + }
3024 +
3025 + dma_device->current_tx_chan = 0;
3026 + dma_device->current_rx_chan = 0;
3027 +
3028 + i = dma_device_register(dma_device);
3029 + for (i = 0; i < dma_device->max_rx_chan_num; i++) {
3030 + (dma_device->rx_chan[i])->open(dma_device->rx_chan[i]);
3031 + }
3032 +
3033 + dma->ctrl.BS = 0;
3034 + dma->ctrl.RXCLS = 0;
3035 + dma->ctrl.EN = 1;
3036 +
3037 + return 0;
3038 +}
3039 +
3040 +/** \fn u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes)
3041 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3042 + * \brief Not used for AR9
3043 + * \param arg Pointer to the input / output memory address
3044 + * \param buffer_alloc A pointer to the buffer
3045 + * \param in_buff Input (if == 1) or Output (if == 0) buffer
3046 + * \param nbytes Number of bytes of data
3047 +*/
3048 +static u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes)
3049 +{
3050 + return (u32 *) arg;
3051 +}
3052 +
3053 +/** \fn void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
3054 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3055 + * \brief copy the DMA data to the memory address space for AES
3056 + * \param outcopy Not used
3057 + * \param out_dma A pointer to the memory address that stores the DMA data
3058 + * \param out_arg The pointer to the memory address that needs to be copied to]
3059 + * \param nbytes Number of bytes of data
3060 +*/
3061 +static void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
3062 +{
3063 + memcpy(out_arg, out_dma, nbytes);
3064 +}
3065 +
3066 +/** \fn void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
3067 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3068 + * \brief copy the DMA data to the memory address space for DES
3069 + * \param outcopy Not used
3070 + * \param out_dma A pointer to the memory address that stores the DMA data
3071 + * \param out_arg The pointer to the memory address that needs to be copied to]
3072 + * \param nbytes Number of bytes of data
3073 +*/
3074 +static void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
3075 +{
3076 + memcpy(out_arg, out_dma, nbytes);
3077 +}
3078 +
3079 +/** \fn dma_exit(void)
3080 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3081 + * \brief unregister dma devices after exit
3082 +*/
3083 +static void dma_exit(void)
3084 +{
3085 + if (g_dma_page_ptr)
3086 + free_page((u32) g_dma_page_ptr);
3087 + dma_device_release(lq_deu[0].dma_device);
3088 + dma_device_unregister(lq_deu[0].dma_device);
3089 +}
3090 +#endif /* CONFIG_CRYPTO_DEV_LANTIQ_DMA */
3091 +
3092 +/** \fn u32 endian_swap(u32 input)
3093 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3094 + * \brief Swap data given to the function
3095 + * \param input Data input to be swapped
3096 + * \return either the swapped data or the input data depending on whether it is in DMA mode or FPI mode
3097 +*/
3098 +static u32 endian_swap(u32 input)
3099 +{
3100 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
3101 + u8 *ptr = (u8 *)&input;
3102 + return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]);
3103 +#else
3104 + return input;
3105 +#endif
3106 +}
3107 +
3108 +/** \fn u32 input_swap(u32 input)
3109 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3110 + * \brief Not used
3111 + * \return input
3112 +*/
3113 +static u32 input_swap(u32 input)
3114 +{
3115 + return input;
3116 +}
3117 +
3118 +/** \fn void aes_chip_init(void)
3119 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3120 + * \brief initialize AES hardware
3121 +*/
3122 +static void aes_chip_init(void)
3123 +{
3124 + volatile struct deu_aes *aes = (struct deu_aes *) AES_START;
3125 +
3126 + aes->ctrl.SM = 1;
3127 +#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
3128 + aes->ctrl.ARS = 1;
3129 +#else
3130 + aes->ctrl.NDC = 1; /* to write to ENDI */
3131 + asm("sync");
3132 + aes->ctrl.ENDI = 0;
3133 + asm("sync");
3134 + aes->ctrl.ARS = 0; /* 0 for dma */
3135 + asm("sync");
3136 +#endif
3137 +}
3138 +
3139 +/** \fn void des_chip_init(void)
3140 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3141 + * \brief initialize DES hardware
3142 +*/
3143 +static void des_chip_init(void)
3144 +{
3145 + volatile struct deu_des *des = (struct deu_des *) DES_3DES_START;
3146 +
3147 +#ifndef CONFIG_CRYPTO_DEV_LANTIQ_DMA
3148 + /* start crypto engine with write to ILR */
3149 + des->ctrl.SM = 1;
3150 + asm("sync");
3151 + des->ctrl.ARS = 1;
3152 +#else
3153 + des->ctrl.SM = 1;
3154 + des->ctrl.NDC = 1;
3155 + asm("sync");
3156 + des->ctrl.ENDI = 0;
3157 + asm("sync");
3158 + des->ctrl.ARS = 0; /* 0 for dma */
3159 +
3160 +#endif
3161 +}
3162 +
3163 +static u32 chip_init(void)
3164 +{
3165 + volatile struct deu_clk_ctrl *clc = (struct deu_clk_ctrl *) LQ_DEU_CLK;
3166 +
3167 +#if 0
3168 + lq_pmu_enable(1<<20);
3169 +#endif
3170 +
3171 + clc->FSOE = 0;
3172 + clc->SBWE = 0;
3173 + clc->SPEN = 0;
3174 + clc->SBWE = 0;
3175 + clc->DISS = 0;
3176 + clc->DISR = 0;
3177 +
3178 + return *LQ_DEU_ID;
3179 +}
3180 +
3181 +static int lq_crypto_probe(struct platform_device *pdev)
3182 +{
3183 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
3184 + lq_crypto_ops.dma_init = dma_init;
3185 + lq_crypto_ops.dma_exit = dma_exit;
3186 + lq_crypto_ops.aes_dma_memcpy = aes_dma_memcpy;
3187 + lq_crypto_ops.des_dma_memcpy = des_dma_memcpy;
3188 + lq_crypto_ops.aes_dma_malloc = aes_dma_malloc;
3189 + lq_crypto_ops.des_dma_malloc = des_dma_malloc;
3190 + lq_crypto_ops.dma_align = dma_align;
3191 + lq_crypto_ops.dma_free = dma_free;
3192 +#endif
3193 +
3194 + lq_crypto_ops.endian_swap = endian_swap;
3195 + lq_crypto_ops.input_swap = input_swap;
3196 + lq_crypto_ops.aes_chip_init = aes_chip_init;
3197 + lq_crypto_ops.des_chip_init = des_chip_init;
3198 + lq_crypto_ops.chip_init = chip_init;
3199 +
3200 + printk("lq_ar9_deu: driver loaded!\n");
3201 +
3202 + lq_deu_init();
3203 +
3204 + return 0;
3205 +}
3206 +
3207 +static int lq_crypto_remove(struct platform_device *pdev)
3208 +{
3209 + lq_deu_exit();
3210 +
3211 + return 0;
3212 +}
3213 +
3214 +static struct platform_driver lq_crypto = {
3215 + .probe = lq_crypto_probe,
3216 + .remove = lq_crypto_remove,
3217 + .driver = {
3218 + .owner = THIS_MODULE,
3219 + .name = "lq_ar9_deu"
3220 + }
3221 +};
3222 +
3223 +static int __init lq_crypto_init(void)
3224 +{
3225 + return platform_driver_register(&lq_crypto);
3226 +}
3227 +module_init(lq_crypto_init);
3228 +
3229 +static void __exit lq_crypto_exit(void)
3230 +{
3231 + platform_driver_unregister(&lq_crypto);
3232 +}
3233 +module_exit(lq_crypto_exit);
3234 +
3235 +#endif
3236 --- /dev/null
3237 +++ b/drivers/crypto/lantiq/deu_ar9.h
3238 @@ -0,0 +1,291 @@
3239 +/*
3240 + * This program is free software; you can redistribute it and/or modify
3241 + * it under the terms of the GNU General Public License as published by
3242 + * the Free Software Foundation; either version 2 of the License, or
3243 + * (at your option) any later version.
3244 + *
3245 + * This program is distributed in the hope that it will be useful,
3246 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3247 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3248 + * GNU General Public License for more details.
3249 + *
3250 + * You should have received a copy of the GNU General Public License
3251 + * along with this program; if not, write to the Free Software
3252 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
3253 + *
3254 + * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
3255 + * Copyright (C) 2009 Mohammad Firdaus / Infineon Technologies
3256 + */
3257 +
3258 +/**
3259 + \defgroup LQ_DEU LQ_DEU_DRIVERS
3260 + \ingroup API
3261 + \brief DEU driver module
3262 +*/
3263 +
3264 +/**
3265 + \defgroup LQ_DEU_DEFINITIONS LQ_DEU_DEFINITIONS
3266 + \ingroup LQ_DEU
3267 + \brief Lantiq DEU definitions
3268 +*/
3269 +
3270 +/**
3271 + \file deu_ar9.h
3272 + \brief DEU driver header file
3273 +*/
3274 +
3275 +
3276 +#ifndef DEU_AR9_H
3277 +#define DEU_AR9_H
3278 +
3279 +#define LQ_DEU_BASE_ADDR (KSEG1 | 0x1E103100)
3280 +#define LQ_DEU_CLK ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0000))
3281 +#define LQ_DEU_ID ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0008))
3282 +#define LQ_DES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0010))
3283 +#define LQ_AES_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0050))
3284 +#define LQ_HASH_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x00B0))
3285 +#define LQ_ARC4_CON ((volatile u32 *)(LQ_DEU_BASE_ADDR + 0x0100))
3286 +
3287 +#define ARC4_START LQ_ARC4_CON
3288 +#define DES_3DES_START LQ_DES_CON
3289 +#define HASH_START LQ_HASH_CON
3290 +#define AES_START LQ_AES_CON
3291 +
3292 +#ifdef CONFIG_CRYPTO_DEV_DMA
3293 +# include "deu_dma.h"
3294 +# define DEU_DWORD_REORDERING(ptr, buffer, in_out, bytes) \
3295 + deu_dma_align(ptr, buffer, in_out, bytes)
3296 +# define AES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \
3297 + deu_aes_dma_memcpy(outcopy, out_dma, out_arg, nbytes)
3298 +# define DES_MEMORY_COPY(outcopy, out_dma, out_arg, nbytes) \
3299 + deu_des_dma_memcpy(outcopy, out_dma, out_arg, nbytes)
3300 +# define BUFFER_IN 1
3301 +# define BUFFER_OUT 0
3302 +# define AES_ALGO 1
3303 +# define DES_ALGO 0
3304 +# define ALLOCATE_MEMORY(val, type) 1
3305 +# define FREE_MEMORY(buff)
3306 +extern struct lq_deu_device lq_deu[1];
3307 +#endif /* CONFIG_CRYPTO_DEV_DMA */
3308 +
3309 +/* SHA CONSTANTS */
3310 +#define HASH_CON_VALUE 0x0700002C
3311 +
3312 +#define INPUT_ENDIAN_SWAP(input) deu_input_swap(input)
3313 +#define DEU_ENDIAN_SWAP(input) deu_endian_swap(input)
3314 +#define DELAY_PERIOD 10
3315 +#define FIND_DEU_CHIP_VERSION chip_version()
3316 +
3317 +#define WAIT_AES_DMA_READY() \
3318 + do { \
3319 + int i; \
3320 + volatile struct deu_dma *dma = \
3321 + (struct deu_dma *) LQ_DEU_DMA_CON; \
3322 + volatile struct deu_aes *aes = \
3323 + (volatile struct deu_aes *) AES_START; \
3324 + for (i = 0; i < 10; i++) \
3325 + udelay(DELAY_PERIOD); \
3326 + while (dma->ctrl.BSY) {}; \
3327 + while (aes->ctrl.BUS) {}; \
3328 + } while (0)
3329 +
3330 +#define WAIT_DES_DMA_READY() \
3331 + do { \
3332 + int i; \
3333 + volatile struct deu_dma *dma = \
3334 + (struct deu_dma *) LQ_DEU_DMA_CON; \
3335 + volatile struct deu_des *des = \
3336 + (struct deu_des *) DES_3DES_START; \
3337 + for (i = 0; i < 10; i++) \
3338 + udelay(DELAY_PERIOD); \
3339 + while (dma->ctrl.BSY) {}; \
3340 + while (des->ctrl.BUS) {}; \
3341 + } while (0)
3342 +
3343 +#define AES_DMA_MISC_CONFIG() \
3344 + do { \
3345 + volatile struct deu_aes *aes = \
3346 + (volatile struct deu_aes *) AES_START; \
3347 + aes->ctrl.KRE = 1; \
3348 + aes->ctrl.GO = 1; \
3349 + } while(0)
3350 +
3351 +#define SHA_HASH_INIT \
3352 + do { \
3353 + volatile struct deu_hash *hash = \
3354 + (struct deu_hash *) HASH_START; \
3355 + hash->ctrl.SM = 1; \
3356 + hash->ctrl.ALGO = 0; \
3357 + hash->ctrl.INIT = 1; \
3358 + } while(0)
3359 +
3360 +/* DEU Common Structures for AR9*/
3361 +
3362 +struct deu_clk_ctrl {
3363 + u32 Res:26;
3364 + u32 FSOE:1;
3365 + u32 SBWE:1;
3366 + u32 EDIS:1;
3367 + u32 SPEN:1;
3368 + u32 DISS:1;
3369 + u32 DISR:1;
3370 +};
3371 +
3372 +struct deu_des {
3373 + struct deu_des_ctrl { /* 10h */
3374 + u32 KRE:1;
3375 + u32 reserved1:5;
3376 + u32 GO:1;
3377 + u32 STP:1;
3378 + u32 Res2:6;
3379 + u32 NDC:1;
3380 + u32 ENDI:1;
3381 + u32 Res3:2;
3382 + u32 F:3;
3383 + u32 O:3;
3384 + u32 BUS:1;
3385 + u32 DAU:1;
3386 + u32 ARS:1;
3387 + u32 SM:1;
3388 + u32 E_D:1;
3389 + u32 M:3;
3390 + } ctrl;
3391 +
3392 + u32 IHR; /* 14h */
3393 + u32 ILR; /* 18h */
3394 + u32 K1HR; /* 1c */
3395 + u32 K1LR;
3396 + u32 K2HR;
3397 + u32 K2LR;
3398 + u32 K3HR;
3399 + u32 K3LR; /* 30h */
3400 + u32 IVHR; /* 34h */
3401 + u32 IVLR; /* 38 */
3402 + u32 OHR; /* 3c */
3403 + u32 OLR; /* 40 */
3404 +};
3405 +
3406 +struct deu_aes {
3407 + struct deu_aes_ctrl {
3408 + u32 KRE:1;
3409 + u32 reserved1:4;
3410 + u32 PNK:1;
3411 + u32 GO:1;
3412 + u32 STP:1;
3413 + u32 reserved2:6;
3414 + u32 NDC:1;
3415 + u32 ENDI:1;
3416 + u32 reserved3:2;
3417 + u32 F:3; /* fbs */
3418 + u32 O:3; /* om */
3419 + u32 BUS:1; /* bsy */
3420 + u32 DAU:1;
3421 + u32 ARS:1;
3422 + u32 SM:1;
3423 + u32 E_D:1;
3424 + u32 KV:1;
3425 + u32 K:2; /* KL */
3426 + } ctrl;
3427 +
3428 + u32 ID3R; /* 80h */
3429 + u32 ID2R; /* 84h */
3430 + u32 ID1R; /* 88h */
3431 + u32 ID0R; /* 8Ch */
3432 + u32 K7R; /* 90h */
3433 + u32 K6R; /* 94h */
3434 + u32 K5R; /* 98h */
3435 + u32 K4R; /* 9Ch */
3436 + u32 K3R; /* A0h */
3437 + u32 K2R; /* A4h */
3438 + u32 K1R; /* A8h */
3439 + u32 K0R; /* ACh */
3440 + u32 IV3R; /* B0h */
3441 + u32 IV2R; /* B4h */
3442 + u32 IV1R; /* B8h */
3443 + u32 IV0R; /* BCh */
3444 + u32 OD3R; /* D4h */
3445 + u32 OD2R; /* D8h */
3446 + u32 OD1R; /* DCh */
3447 + u32 OD0R; /* E0h */
3448 +};
3449 +
3450 +struct deu_arc4 {
3451 + struct arc4_controlr {
3452 + u32 KRE:1;
3453 + u32 KLEN:4;
3454 + u32 KSAE:1;
3455 + u32 GO:1;
3456 + u32 STP:1;
3457 + u32 reserved1:6;
3458 + u32 NDC:1;
3459 + u32 ENDI:1;
3460 + u32 reserved2:8;
3461 + u32 BUS:1; /* bsy */
3462 + u32 reserved3:1;
3463 + u32 ARS:1;
3464 + u32 SM:1;
3465 + u32 reserved4:4;
3466 + } ctrl;
3467 +
3468 + u32 K3R; /* 104h */
3469 + u32 K2R; /* 108h */
3470 + u32 K1R; /* 10Ch */
3471 + u32 K0R; /* 110h */
3472 + u32 IDLEN; /* 114h */
3473 + u32 ID3R; /* 118h */
3474 + u32 ID2R; /* 11Ch */
3475 + u32 ID1R; /* 120h */
3476 + u32 ID0R; /* 124h */
3477 + u32 OD3R; /* 128h */
3478 + u32 OD2R; /* 12Ch */
3479 + u32 OD1R; /* 130h */
3480 + u32 OD0R; /* 134h */
3481 +};
3482 +
3483 +struct deu_hash {
3484 + struct deu_hash_ctrl {
3485 + u32 reserved1:5;
3486 + u32 KHS:1;
3487 + u32 GO:1;
3488 + u32 INIT:1;
3489 + u32 reserved2:6;
3490 + u32 NDC:1;
3491 + u32 ENDI:1;
3492 + u32 reserved3:7;
3493 + u32 DGRY:1;
3494 + u32 BSY:1;
3495 + u32 reserved4:1;
3496 + u32 IRCL:1;
3497 + u32 SM:1;
3498 + u32 KYUE:1;
3499 + u32 HMEN:1;
3500 + u32 SSEN:1;
3501 + u32 ALGO:1;
3502 + } ctrl;
3503 +
3504 + u32 MR; /* B4h */
3505 + u32 D1R; /* B8h */
3506 + u32 D2R; /* BCh */
3507 + u32 D3R; /* C0h */
3508 + u32 D4R; /* C4h */
3509 + u32 D5R; /* C8h */
3510 + u32 dummy; /* CCh */
3511 + u32 KIDX; /* D0h */
3512 + u32 KEY; /* D4h */
3513 + u32 DBN; /* D8h */
3514 +};
3515 +
3516 +struct deu_dma {
3517 + struct deu_dma_ctrl {
3518 + u32 reserved1:22;
3519 + u32 BS:2;
3520 + u32 BSY:1;
3521 + u32 reserved2:1;
3522 + u32 ALGO:2;
3523 + u32 RXCLS:2;
3524 + u32 reserved3:1;
3525 + u32 EN:1;
3526 + } ctrl;
3527 +};
3528 +
3529 +#endif /* DEU_AR9_H */
3530 --- /dev/null
3531 +++ b/drivers/crypto/lantiq/deu_danube.c
3532 @@ -0,0 +1,484 @@
3533 +/*
3534 + * This program is free software; you can redistribute it and/or modify
3535 + * it under the terms of the GNU General Public License as published by
3536 + * the Free Software Foundation; either version 2 of the License, or
3537 + * (at your option) any later version.
3538 + *
3539 + * This program is distributed in the hope that it will be useful,
3540 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
3541 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3542 + * GNU General Public License for more details.
3543 + *
3544 + * You should have received a copy of the GNU General Public License
3545 + * along with this program; if not, write to the Free Software
3546 + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
3547 + *
3548 + * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
3549 + * Copyright (C) 2009 Mohammad Firdaus
3550 + */
3551 +
3552 +#include <linux/module.h>
3553 +#include <linux/init.h>
3554 +#include <linux/types.h>
3555 +#include <linux/errno.h>
3556 +#include <asm/io.h> /* dma_cache_inv */
3557 +#include <linux/platform_device.h>
3558 +
3559 +#ifdef CONFIG_SOC_LANTIQ_XWAY
3560 +
3561 +#include "deu.h"
3562 +
3563 +/**
3564 + \defgroup LQ_DEU LQ_DEU_DRIVERS
3565 + \ingroup API
3566 + \brief DEU driver module
3567 +*/
3568 +
3569 +/**
3570 + \file deu_danube.c
3571 + \ingroup LQ_DEU
3572 + \brief board specific DEU driver file for danube
3573 +*/
3574 +
3575 +/**
3576 + \defgroup BOARD_SPECIFIC_FUNCTIONS LQ_BOARD_SPECIFIC_FUNCTIONS
3577 + \ingroup LQ_DEU
3578 + \brief board specific DEU functions
3579 +*/
3580 +
3581 +static int danube_pre_1_4;
3582 +
3583 +#ifdef CONFIG_CRYPTO_DEV_LANTIQ_DMA
3584 +u32 *des_buff_in = NULL;
3585 +u32 *des_buff_out = NULL;
3586 +u32 *aes_buff_in = NULL;
3587 +u32 *aes_buff_out = NULL;
3588 +
3589 +struct lq_deu_device lq_deu[1];
3590 +
3591 +static u8 *g_dma_page_ptr = NULL;
3592 +static u8 *g_dma_block = NULL;
3593 +static u8 *g_dma_block2 = NULL;
3594 +
3595 +/** \fn int dma_init(void)
3596 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3597 + * \brief Initialize DMA for DEU usage. DMA specific registers are
3598 + * intialized here, including a pointer to the device, memory
3599 + * space for the device and DEU-DMA descriptors
3600 + * \return -1 if fail, otherwise return 0
3601 +*/
3602 +static int dma_init(void)
3603 +{
3604 + struct dma_device_info *dma_device = NULL;
3605 + int i = 0;
3606 + volatile struct deu_dma *dma = (struct deu_dma *) LQ_DEU_DMA_CON;
3607 + struct dma_device_info *deu_dma_device_ptr;
3608 +
3609 + /* get one free page and share between g_dma_block and g_dma_block2 */
3610 + printk("PAGE_SIZE = %ld\n", PAGE_SIZE);
3611 + /* need 16-byte alignment memory block */
3612 + g_dma_page_ptr = (u8 *)__get_free_page(GFP_KERNEL);
3613 + /* need 16-byte alignment memory block */
3614 + g_dma_block = g_dma_page_ptr;
3615 + /* need 16-byte alignment memory block */
3616 + g_dma_block2 = (u8 *)(g_dma_page_ptr + (PAGE_SIZE >> 1));
3617 +
3618 + deu_dma_device_ptr = dma_device_reserve("DEU");
3619 + if (!deu_dma_device_ptr) {
3620 + printk("DEU: reserve DMA fail!\n");
3621 + return -1;
3622 + }
3623 + lq_deu[0].dma_device = deu_dma_device_ptr;
3624 + dma_device = deu_dma_device_ptr;
3625 + /* dma_device->priv = &deu_dma_priv; */
3626 + dma_device->buffer_alloc = &deu_dma_buffer_alloc;
3627 + dma_device->buffer_free = &deu_dma_buffer_free;
3628 + dma_device->intr_handler = &deu_dma_intr_handler;
3629 + dma_device->tx_endianness_mode = LQ_DMA_ENDIAN_TYPE3;
3630 + dma_device->rx_endianness_mode = LQ_DMA_ENDIAN_TYPE3;
3631 + dma_device->port_num = 1;
3632 + dma_device->tx_burst_len = 4;
3633 + dma_device->max_rx_chan_num = 1;
3634 + dma_device->max_tx_chan_num = 1;
3635 + dma_device->port_packet_drop_enable = 0;
3636 +
3637 + for (i = 0; i < dma_device->max_rx_chan_num; i++) {
3638 + dma_device->rx_chan[i]->packet_size = DEU_MAX_PACKET_SIZE;
3639 + dma_device->rx_chan[i]->desc_len = 1;
3640 + dma_device->rx_chan[i]->control = LQ_DMA_CH_ON;
3641 + dma_device->rx_chan[i]->byte_offset = 0;
3642 + dma_device->rx_chan[i]->chan_poll_enable = 1;
3643 +
3644 + }
3645 +
3646 + for (i = 0; i < dma_device->max_tx_chan_num; i++) {
3647 + dma_device->tx_chan[i]->control = LQ_DMA_CH_ON;
3648 + dma_device->tx_chan[i]->desc_len = 1;
3649 + dma_device->tx_chan[i]->chan_poll_enable = 1;
3650 + }
3651 +
3652 + dma_device->current_tx_chan = 0;
3653 + dma_device->current_rx_chan = 0;
3654 +
3655 + dma_device_register(dma_device);
3656 + for (i = 0; i < dma_device->max_rx_chan_num; i++) {
3657 + (dma_device->rx_chan[i])->open(dma_device->rx_chan[i]);
3658 + }
3659 +
3660 + dma->ctrl.BS = 0;
3661 + dma->ctrl.RXCLS = 0;
3662 + dma->ctrl.EN = 1;
3663 +
3664 +
3665 + *LQ_DMA_PS = 1;
3666 +
3667 + /* DANUBE PRE 1.4 SOFTWARE FIX */
3668 + if (danube_pre_1_4)
3669 + *LQ_DMA_PCTRL = 0x14;
3670 + else
3671 + *LQ_DMA_PCTRL = 0xF14;
3672 +
3673 + return 0;
3674 +}
3675 +
3676 +/** \fn u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes)
3677 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3678 + * \brief A fix to align mis-aligned address for Danube version 1.3 chips
3679 + * which has memory alignment issues.
3680 + * \param arg Pointer to the input / output memory address
3681 + * \param buffer_alloc A pointer to the buffer
3682 + * \param in_buff Input (if == 1) or Output (if == 0) buffer
3683 + * \param nbytes Number of bytes of data
3684 + * \return returns arg: if address is aligned, buffer_alloc: if memory address is not aligned
3685 +*/
3686 +static u32 *dma_align(const u8 *arg, u32 *buffer_alloc, int in_buff, int nbytes)
3687 +{
3688 + if (danube_pre_1_4) {
3689 + /* for input buffer */
3690 + if (in_buff) {
3691 + if (((u32) arg) & 0xF) {
3692 + memcpy(buffer_alloc, arg, nbytes);
3693 + return (u32 *) buffer_alloc;
3694 + } else {
3695 + return (u32 *) arg;
3696 + }
3697 + }
3698 + else {
3699 + /* for output buffer */
3700 + if (((u32) arg) & 0x3)
3701 + return buffer_alloc;
3702 + else
3703 + return (u32 *) arg;
3704 + }
3705 + }
3706 +
3707 + return (u32 *) arg;
3708 +}
3709 +
3710 +/** \fn void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
3711 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3712 + * \brief copy the DMA data to the memory address space for AES. The swaping
3713 + * of the 4 bytes is done only for Danube version 1.3 (FIX). Otherwise,
3714 + * it is a direct memory copy to out_arg pointer
3715 + * \param outcopy Pointer to the address to store swapped copy
3716 + * \param out_dma A pointer to the memory address that stores the DMA data
3717 + * \param out_arg The pointer to the memory address that needs to be copied to
3718 + * \param nbytes Number of bytes of data
3719 +*/
3720 +static void aes_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
3721 +{
3722 + int i = 0;
3723 + int x = 0;
3724 +
3725 + /* DANUBE PRE 1.4 SOFTWARE FIX */
3726 + if (danube_pre_1_4) {
3727 + for (i = 0; i < (nbytes / 4); i++) {
3728 + x = i ^ 0x3;
3729 + outcopy[i] = out_dma[x];
3730 +
3731 + }
3732 + if (((u32) out_arg) & 0x3) {
3733 + memcpy((u8 *)out_arg, outcopy, nbytes);
3734 + }
3735 + } else {
3736 + memcpy(out_arg, out_dma, nbytes);
3737 + }
3738 +}
3739 +
3740 +/** \fn void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
3741 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3742 + * \brief copy the DMA data to the memory address space for DES. The swaping
3743 + * of the 4 bytes is done only for Danube version 1.3 (FIX). Otherwise,
3744 + * it is a direct memory copy to out_arg pointer
3745 + * \param outcopy Pointer to the address to store swapped copy
3746 + * \param out_dma A pointer to the memory address that stores the DMA data
3747 + * \param out_arg The pointer to the memory address that needs to be copied to
3748 + * \param nbytes Number of bytes of data
3749 +*/
3750 +static void des_dma_memcpy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes)
3751 +{
3752 + int i = 0;
3753 + int x = 0;
3754 +
3755 + /* DANUBE PRE 1.4 SOFTWARE FIX */
3756 + if (danube_pre_1_4) {
3757 + for (i = 0; i < (nbytes / 4); i++) {
3758 + x = i ^ 1;
3759 + outcopy[i] = out_dma[x];
3760 +
3761 + }
3762 + if (((u32) out_arg) & 0x3) {
3763 + memcpy((u8 *)out_arg, outcopy, nbytes);
3764 + }
3765 + } else {
3766 + memcpy(out_arg, out_dma, nbytes);
3767 + }
3768 +}
3769 +
3770 +/** \fn int des_dma_malloc(int value)
3771 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3772 + * \brief allocates memory to the necessary memory input/output buffer
3773 + * location, used during the DES algorithm DMA transfer (memory
3774 + * alignment issues)
3775 + * \param value value determinds whether the calling of the function is for a
3776 + * input buffer or for an output buffer memory allocation
3777 +*/
3778 +static int des_dma_malloc(int value)
3779 +{
3780 + if (danube_pre_1_4) {
3781 + if (value == BUFFER_IN) {
3782 + des_buff_in = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC);
3783 + if (!des_buff_in)
3784 + return -1;
3785 + else
3786 + return 0;
3787 + }
3788 + else {
3789 + des_buff_out = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC);
3790 + if (!des_buff_out)
3791 + return -1;
3792 + else
3793 + return 0;
3794 + }
3795 + } else {
3796 + return 0;
3797 + }
3798 +}
3799 +
3800 +/** \fn int aes_dma_malloc(int value)
3801 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3802 + * \brief allocates memory to the necessary memory input/output buffer
3803 + * location, used during the AES algorithm DMA transfer (memory
3804 + * alignment issues)
3805 + * \param value value determinds whether the calling of the function is for a
3806 + * input buffer or for an output buffer memory allocation
3807 +*/
3808 +static int aes_dma_malloc(int value)
3809 +{
3810 + if (danube_pre_1_4) {
3811 + if (value == BUFFER_IN) {
3812 + aes_buff_in = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC);
3813 + if (!aes_buff_in)
3814 + return -1;
3815 + else
3816 + return 0;
3817 + }
3818 + else {
3819 + aes_buff_out = kmalloc(DEU_MAX_PACKET_SIZE, GFP_ATOMIC);
3820 + if (!aes_buff_out)
3821 + return -1;
3822 + else
3823 + return 0;
3824 + }
3825 + } else {
3826 + return 0;
3827 + }
3828 +}
3829 +
3830 +/** \fn void dma_free(u32 *addr)
3831 + * \ingroup BOARD_SPECIFIC_FUNCTIONS
3832 + * \brief frees previously allocated memory
3833 + * \param addr memory address of the buffer that needs to be freed
3834 +*/
3835 +static void dma_free(u32 *addr)
3836 +{
3837 + if (addr)