packages: clean up the package folder
[openwrt/openwrt.git] / package / kernel / lantiq / ltq-deu / src / ifxmips_async_aes.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_async_aes.c
4 ** PROJECT : IFX UEIP
5 ** MODULES : DEU Module
6 **
7 ** DATE : October 11, 2010
8 ** AUTHOR : Mohammad Firdaus
9 ** DESCRIPTION : Data Encryption Unit Driver for AES Algorithm
10 ** COPYRIGHT : Copyright (c) 2010
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 08,Sept 2009 Mohammad Firdaus Initial UEIP release
22 ** 11, Oct 2010 Mohammad Firdaus Kernel Port incl. Async. Ablkcipher mode
23 ** 21,March 2011 Mohammad Firdaus Changes for Kernel 2.6.32 and IPSec integration
24 *******************************************************************************/
25 /*!
26 \defgroup IFX_DEU IFX_DEU_DRIVERS
27 \ingroup API
28 \brief ifx DEU driver module
29 */
30
31 /*!
32 \file ifxmips_async_aes.c
33 \ingroup IFX_DEU
34 \brief AES Encryption Driver main file
35 */
36
37 /*!
38 \defgroup IFX_AES_FUNCTIONS IFX_AES_FUNCTIONS
39 \ingroup IFX_DEU
40 \brief IFX AES driver Functions
41 */
42
43
44
45 #include <linux/wait.h>
46 #include <linux/crypto.h>
47 #include <linux/kernel.h>
48 #include <linux/kthread.h>
49 #include <linux/interrupt.h>
50 #include <linux/spinlock.h>
51 #include <linux/list.h>
52 #include <crypto/ctr.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/scatterwalk.h>
56
57 #include <asm/ifx/ifx_regs.h>
58 #include <asm/ifx/ifx_types.h>
59 #include <asm/ifx/common_routines.h>
60 #include <asm/ifx/irq.h>
61 #include <asm/ifx/ifx_pmu.h>
62 #include <asm/ifx/ifx_gpio.h>
63 #include <asm/kmap_types.h>
64
65 #include "ifxmips_deu.h"
66
67 #if defined(CONFIG_DANUBE)
68 #include "ifxmips_deu_danube.h"
69 extern int ifx_danube_pre_1_4;
70 #elif defined(CONFIG_AR9)
71 #include "ifxmips_deu_ar9.h"
72 #elif defined(CONFIG_VR9) || defined(CONFIG_AR10)
73 #include "ifxmips_deu_vr9.h"
74 #else
75 #error "Unkown platform"
76 #endif
77
78 /* DMA related header and variables */
79
80 spinlock_t aes_lock;
81 #define CRTCL_SECT_INIT spin_lock_init(&aes_lock)
82 #define CRTCL_SECT_START spin_lock_irqsave(&aes_lock, flag)
83 #define CRTCL_SECT_END spin_unlock_irqrestore(&aes_lock, flag)
84
85 /* Definition of constants */
86 //#define AES_START IFX_AES_CON
87 #define AES_MIN_KEY_SIZE 16
88 #define AES_MAX_KEY_SIZE 32
89 #define AES_BLOCK_SIZE 16
90 #define CTR_RFC3686_NONCE_SIZE 4
91 #define CTR_RFC3686_IV_SIZE 8
92 #define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
93
94 #ifdef CRYPTO_DEBUG
95 extern char debug_level;
96 #define DPRINTF(level, format, args...) if (level < debug_level) printk(KERN_INFO "[%s %s %d]: " format, __FILE__, __func__, __LINE__, ##args);
97 #else
98 #define DPRINTF(level, format, args...)
99 #endif /* CRYPTO_DEBUG */
100
101
102 static int disable_multiblock = 0;
103 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
104 module_param(disable_multiblock, int, 0);
105 #else
106 MODULE_PARM_DESC(disable_multiblock, "Disable encryption of whole multiblock buffers");
107 #endif
108
109 static int disable_deudma = 1;
110
111 /* Function decleration */
112 int aes_chip_init(void);
113 u32 endian_swap(u32 input);
114 u32 input_swap(u32 input);
115 u32* memory_alignment(const u8 *arg, u32 *buff_alloc, int in_out, int nbytes);
116 void aes_dma_memory_copy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes);
117 int aes_memory_allocate(int value);
118 int des_memory_allocate(int value);
119 void memory_release(u32 *addr);
120
121
122 struct aes_ctx {
123 int key_length;
124 u32 buf[AES_MAX_KEY_SIZE];
125 u8 nonce[CTR_RFC3686_NONCE_SIZE];
126
127 };
128
129 struct aes_container {
130 u8 *iv;
131 u8 *src_buf;
132 u8 *dst_buf;
133
134 int mode;
135 int encdec;
136 int complete;
137 int flag;
138
139 u32 bytes_processed;
140 u32 nbytes;
141
142 struct ablkcipher_request arequest;
143
144 };
145
146 aes_priv_t *aes_queue;
147 extern deu_drv_priv_t deu_dma_priv;
148
149 void hexdump(unsigned char *buf, unsigned int len)
150 {
151 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
152 16, 1,
153 buf, len, false);
154 }
155
156 /*! \fn void lq_deu_aes_core (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg,
157 size_t nbytes, int encdec, int mode)
158 * \ingroup IFX_AES_FUNCTIONS
159 * \brief main interface to AES hardware
160 * \param ctx_arg crypto algo context
161 * \param out_arg output bytestream
162 * \param in_arg input bytestream
163 * \param iv_arg initialization vector
164 * \param nbytes length of bytestream
165 * \param encdec 1 for encrypt; 0 for decrypt
166 * \param mode operation mode such as ebc, cbc, ctr
167 *
168 */
169
170 static int lq_deu_aes_core (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
171 u8 *iv_arg, size_t nbytes, int encdec, int mode)
172 {
173 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
174 volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
175 struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
176 u32 *in_key = ctx->buf;
177 unsigned long flag;
178 /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
179 int key_len = ctx->key_length;
180
181 volatile struct deu_dma_t *dma = (struct deu_dma_t *) IFX_DEU_DMA_CON;
182 struct dma_device_info *dma_device = ifx_deu[0].dma_device;
183 deu_drv_priv_t *deu_priv = (deu_drv_priv_t *)dma_device->priv;
184 int wlen = 0;
185 //u32 *outcopy = NULL;
186 u32 *dword_mem_aligned_in = NULL;
187
188 CRTCL_SECT_START;
189
190 /* 128, 192 or 256 bit key length */
191 aes->controlr.K = key_len / 8 - 2;
192 if (key_len == 128 / 8) {
193 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
194 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
195 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
196 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
197 }
198 else if (key_len == 192 / 8) {
199 aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
200 aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
201 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
202 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
203 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
204 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
205 }
206 else if (key_len == 256 / 8) {
207 aes->K7R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 0));
208 aes->K6R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 1));
209 aes->K5R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 2));
210 aes->K4R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 3));
211 aes->K3R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 4));
212 aes->K2R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 5));
213 aes->K1R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 6));
214 aes->K0R = DEU_ENDIAN_SWAP(*((u32 *) in_key + 7));
215 }
216 else {
217 printk (KERN_ERR "[%s %s %d]: Invalid key_len : %d\n", __FILE__, __func__, __LINE__, key_len);
218 CRTCL_SECT_END;
219 return -EINVAL;
220 }
221
222 /* let HW pre-process DEcryption key in any case (even if
223 ENcryption is used). Key Valid (KV) bit is then only
224 checked in decryption routine! */
225 aes->controlr.PNK = 1;
226
227 while (aes->controlr.BUS) {
228 // this will not take long
229 }
230 AES_DMA_MISC_CONFIG();
231
232 aes->controlr.E_D = !encdec; //encryption
233 aes->controlr.O = mode; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
234
235 //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
236 if (mode > 0) {
237 aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
238 aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
239 aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
240 aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
241 };
242
243
244 /* Prepare Rx buf length used in dma psuedo interrupt */
245 deu_priv->deu_rx_buf = (u32 *)out_arg;
246 deu_priv->deu_rx_len = nbytes;
247
248 /* memory alignment issue */
249 dword_mem_aligned_in = (u32 *) DEU_DWORD_REORDERING(in_arg, aes_buff_in, BUFFER_IN, nbytes);
250
251 dma->controlr.ALGO = 1; //AES
252 dma->controlr.BS = 0;
253 aes->controlr.DAU = 0;
254 dma->controlr.EN = 1;
255
256 while (aes->controlr.BUS) {
257 // wait for AES to be ready
258 };
259
260 deu_priv->outcopy = (u32 *) DEU_DWORD_REORDERING(out_arg, aes_buff_out, BUFFER_OUT, nbytes);
261 deu_priv->event_src = AES_ASYNC_EVENT;
262
263 wlen = dma_device_write (dma_device, (u8 *)dword_mem_aligned_in, nbytes, NULL);
264 if (wlen != nbytes) {
265 dma->controlr.EN = 0;
266 CRTCL_SECT_END;
267 printk (KERN_ERR "[%s %s %d]: dma_device_write fail!\n", __FILE__, __func__, __LINE__);
268 return -EINVAL;
269 }
270
271 // WAIT_AES_DMA_READY();
272
273 CRTCL_SECT_END;
274
275 if (mode > 0) {
276 *((u32 *) iv_arg) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg));
277 *((u32 *) iv_arg + 1) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
278 *((u32 *) iv_arg + 2) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
279 *((u32 *) iv_arg + 3) = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
280 }
281
282 return -EINPROGRESS;
283 }
284
285 /* \fn static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
286 * \ingroup IFX_AES_FUNCTIONS
287 * \brief Counts and return the number of scatterlists
288 * \param *sl Function pointer to the scatterlist
289 * \param total_bytes The total number of bytes that needs to be encrypted/decrypted
290 * \return The number of scatterlists
291 */
292
293 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
294 {
295 int i = 0;
296
297 do {
298 total_bytes -= sl[i].length;
299 i++;
300
301 } while (total_bytes > 0);
302
303 return i;
304 }
305
306 /* \fn void lq_sg_init(struct scatterlist *src,
307 * struct scatterlist *dst)
308 * \ingroup IFX_AES_FUNCTIONS
309 * \brief Maps the scatterlists into a source/destination page.
310 * \param *src Pointer to the source scatterlist
311 * \param *dst Pointer to the destination scatterlist
312 */
313
314 static void lq_sg_init(struct aes_container *aes_con,struct scatterlist *src,
315 struct scatterlist *dst)
316 {
317
318 struct page *dst_page, *src_page;
319
320 src_page = sg_virt(src);
321 aes_con->src_buf = (char *) src_page;
322
323 dst_page = sg_virt(dst);
324 aes_con->dst_buf = (char *) dst_page;
325
326 }
327
328
329 /* \fn static void lq_sg_complete(struct aes_container *aes_con)
330 * \ingroup IFX_AES_FUNCTIONS
331 * \brief Free the used up memory after encryt/decrypt.
332 */
333
334 static void lq_sg_complete(struct aes_container *aes_con)
335 {
336 unsigned long queue_flag;
337
338 spin_lock_irqsave(&aes_queue->lock, queue_flag);
339 kfree(aes_con);
340 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
341 }
342
343 /* \fn static inline struct aes_container *aes_container_cast (
344 * struct scatterlist *dst)
345 * \ingroup IFX_AES_FUNCTIONS
346 * \brief Locate the structure aes_container in memory.
347 * \param *areq Pointer to memory location where ablkcipher_request is located
348 * \return *aes_cointainer The function pointer to aes_container
349 */
350 static inline struct aes_container *aes_container_cast (
351 struct ablkcipher_request *areq)
352 {
353 return container_of(areq, struct aes_container, arequest);
354 }
355
356
357 /* \fn static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq,
358 * \ int state)
359 * \ingroup IFX_AES_FUNCTIONS
360 * \brief Process next packet to be encrypt/decrypt
361 * \param *aes_con AES container structure
362 * \param *areq Pointer to memory location where ablkcipher_request is located
363 * \param state The state of the current packet (part of scatterlist or new packet)
364 * \return -EINVAL: error, -EINPROGRESS: Crypto still running, 1: no more scatterlist
365 */
366
367 static int process_next_packet(struct aes_container *aes_con, struct ablkcipher_request *areq,
368 int state)
369 {
370 u8 *iv;
371 int mode, dir, err = -EINVAL;
372 unsigned long queue_flag;
373 u32 inc, nbytes, remain, chunk_size;
374 struct scatterlist *src = NULL;
375 struct scatterlist *dst = NULL;
376 struct crypto_ablkcipher *cipher;
377 struct aes_ctx *ctx;
378
379 spin_lock_irqsave(&aes_queue->lock, queue_flag);
380
381 dir = aes_con->encdec;
382 mode = aes_con->mode;
383 iv = aes_con->iv;
384
385 if (state & PROCESS_SCATTER) {
386 src = scatterwalk_sg_next(areq->src);
387 dst = scatterwalk_sg_next(areq->dst);
388
389 if (!src || !dst) {
390 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
391 return 1;
392 }
393 }
394 else if (state & PROCESS_NEW_PACKET) {
395 src = areq->src;
396 dst = areq->dst;
397 }
398
399 remain = aes_con->bytes_processed;
400 chunk_size = src->length;
401
402 if (remain > DEU_MAX_PACKET_SIZE)
403 inc = DEU_MAX_PACKET_SIZE;
404 else if (remain > chunk_size)
405 inc = chunk_size;
406 else
407 inc = remain;
408
409 remain -= inc;
410 aes_con->nbytes = inc;
411
412 if (state & PROCESS_SCATTER) {
413 aes_con->src_buf += aes_con->nbytes;
414 aes_con->dst_buf += aes_con->nbytes;
415 }
416
417 lq_sg_init(aes_con, src, dst);
418
419 nbytes = aes_con->nbytes;
420
421 //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
422 // __LINE__, __func__, nbytes, chunk_size);
423
424 cipher = crypto_ablkcipher_reqtfm(areq);
425 ctx = crypto_ablkcipher_ctx(cipher);
426
427
428 if (aes_queue->hw_status == AES_IDLE)
429 aes_queue->hw_status = AES_STARTED;
430
431 aes_con->bytes_processed -= aes_con->nbytes;
432 err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
433 if (err == -EBUSY) {
434 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
435 printk("Failed to enqueue request, ln: %d, err: %d\n",
436 __LINE__, err);
437 return -EINVAL;
438 }
439
440 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
441
442 err = lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, nbytes, dir, mode);
443 return err;
444
445 }
446
447 /* \fn static void process_queue (unsigned long data)
448 * \ingroup IFX_AES_FUNCTIONS
449 * \brief tasklet to signal the dequeuing of the next packet to be processed
450 * \param unsigned long data Not used
451 * \return void
452 */
453
454 static void process_queue(unsigned long data)
455 {
456
457 DEU_WAKEUP_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
458 deu_dma_priv.aes_event_flags);
459 }
460
461
462 /* \fn static int aes_crypto_thread (void *data)
463 * \ingroup IFX_AES_FUNCTIONS
464 * \brief AES thread that handles crypto requests from upper layer & DMA
465 * \param *data Not used
466 * \return -EINVAL: DEU failure, -EBUSY: DEU HW busy, 0: exit thread
467 */
468 static int aes_crypto_thread (void *data)
469 {
470 struct aes_container *aes_con = NULL;
471 struct ablkcipher_request *areq = NULL;
472 int err;
473 unsigned long queue_flag;
474
475 daemonize("lq_aes_thread");
476 printk("AES Queue Manager Starting\n");
477
478 while (1)
479 {
480 DEU_WAIT_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
481 deu_dma_priv.aes_event_flags);
482
483 spin_lock_irqsave(&aes_queue->lock, queue_flag);
484
485 /* wait to prevent starting a crypto session before
486 * exiting the dma interrupt thread.
487 */
488 if (aes_queue->hw_status == AES_STARTED) {
489 areq = ablkcipher_dequeue_request(&aes_queue->list);
490 aes_con = aes_container_cast(areq);
491 aes_queue->hw_status = AES_BUSY;
492 }
493 else if (aes_queue->hw_status == AES_IDLE) {
494 areq = ablkcipher_dequeue_request(&aes_queue->list);
495 aes_con = aes_container_cast(areq);
496 aes_queue->hw_status = AES_STARTED;
497 }
498 else if (aes_queue->hw_status == AES_BUSY) {
499 areq = ablkcipher_dequeue_request(&aes_queue->list);
500 aes_con = aes_container_cast(areq);
501 }
502 else if (aes_queue->hw_status == AES_COMPLETED) {
503 lq_sg_complete(aes_con);
504 aes_queue->hw_status = AES_IDLE;
505 areq->base.complete(&areq->base, 0);
506 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
507 return 0;
508 }
509 //printk("debug ln: %d, bytes proc: %d\n", __LINE__, aes_con->bytes_processed);
510 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
511
512 if (!aes_con) {
513 printk("AES_CON return null\n");
514 goto aes_done;
515 }
516
517 if (aes_con->bytes_processed == 0) {
518 goto aes_done;
519 }
520
521 /* Process new packet or the next packet in a scatterlist */
522 if (aes_con->flag & PROCESS_NEW_PACKET) {
523 aes_con->flag = PROCESS_SCATTER;
524 err = process_next_packet(aes_con, areq, PROCESS_NEW_PACKET);
525 }
526 else
527 err = process_next_packet(aes_con, areq, PROCESS_SCATTER);
528
529 if (err == -EINVAL) {
530 areq->base.complete(&areq->base, err);
531 lq_sg_complete(aes_con);
532 printk("src/dst returned -EINVAL in func: %s\n", __func__);
533 }
534 else if (err > 0) {
535 printk("src/dst returned zero in func: %s\n", __func__);
536 goto aes_done;
537 }
538
539 continue;
540
541 aes_done:
542 //printk("debug line - %d, func: %s, qlen: %d\n", __LINE__, __func__, aes_queue->list.qlen);
543 areq->base.complete(&areq->base, 0);
544 lq_sg_complete(aes_con);
545
546 spin_lock_irqsave(&aes_queue->lock, queue_flag);
547 if (aes_queue->list.qlen > 0) {
548 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
549 tasklet_schedule(&aes_queue->aes_task);
550 }
551 else {
552 aes_queue->hw_status = AES_IDLE;
553 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
554 }
555 } //while(1)
556
557 return 0;
558 }
559
560 /* \fn static int lq_aes_queue_mgr(struct aes_ctx *ctx, struct ablkcipher_request *areq,
561 u8 *iv, int dir, int mode)
562 * \ingroup IFX_AES_FUNCTIONS
563 * \brief starts the process of queuing DEU requests
564 * \param *ctx crypto algo contax
565 * \param *areq Pointer to the balkcipher requests
566 * \param *iv Pointer to intput vector location
567 * \param dir Encrypt/Decrypt
568 * \mode The mode AES algo is running
569 * \return 0 if success
570 */
571
572 static int lq_aes_queue_mgr(struct aes_ctx *ctx, struct ablkcipher_request *areq,
573 u8 *iv, int dir, int mode)
574 {
575 int err = -EINVAL;
576 unsigned long queue_flag;
577 struct scatterlist *src = areq->src;
578 struct scatterlist *dst = areq->dst;
579 struct aes_container *aes_con = NULL;
580 u32 remain, inc, nbytes = areq->nbytes;
581 u32 chunk_bytes = src->length;
582
583
584 aes_con = (struct aes_container *)kmalloc(sizeof(struct aes_container),
585 GFP_KERNEL);
586
587 if (!(aes_con)) {
588 printk("Cannot allocate memory for AES container, fn %s, ln %d\n",
589 __func__, __LINE__);
590 return -ENOMEM;
591 }
592
593 /* AES encrypt/decrypt mode */
594 if (mode == 5) {
595 nbytes = AES_BLOCK_SIZE;
596 chunk_bytes = AES_BLOCK_SIZE;
597 mode = 0;
598 }
599
600 aes_con->bytes_processed = nbytes;
601 aes_con->arequest = *(areq);
602 remain = nbytes;
603
604 //printk("debug - Line: %d, func: %s, reqsize: %d, scattersize: %d\n",
605 // __LINE__, __func__, nbytes, chunk_bytes);
606
607 if (remain > DEU_MAX_PACKET_SIZE)
608 inc = DEU_MAX_PACKET_SIZE;
609 else if (remain > chunk_bytes)
610 inc = chunk_bytes;
611 else
612 inc = remain;
613
614 remain -= inc;
615 lq_sg_init(aes_con, src, dst);
616
617 if (remain <= 0)
618 aes_con->complete = 1;
619 else
620 aes_con->complete = 0;
621
622 aes_con->nbytes = inc;
623 aes_con->iv = iv;
624 aes_con->mode = mode;
625 aes_con->encdec = dir;
626
627 spin_lock_irqsave(&aes_queue->lock, queue_flag);
628
629 if (aes_queue->hw_status == AES_STARTED || aes_queue->hw_status == AES_BUSY ||
630 aes_queue->list.qlen > 0) {
631
632 aes_con->flag = PROCESS_NEW_PACKET;
633 err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
634
635 /* max queue length reached */
636 if (err == -EBUSY) {
637 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
638 printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__, err);
639 return err;
640 }
641
642 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
643 return -EINPROGRESS;
644 }
645 else if (aes_queue->hw_status == AES_IDLE)
646 aes_queue->hw_status = AES_STARTED;
647
648 aes_con->flag = PROCESS_SCATTER;
649 aes_con->bytes_processed -= aes_con->nbytes;
650 /* or enqueue the whole structure so as to get back the info
651 * at the moment that it's queued. nbytes might be different */
652 err = ablkcipher_enqueue_request(&aes_queue->list, &aes_con->arequest);
653
654 if (err == -EBUSY) {
655 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
656 printk("Unable to enqueue request ln: %d, err: %d\n", __LINE__, err);
657 return err;
658 }
659
660 spin_unlock_irqrestore(&aes_queue->lock, queue_flag);
661 return lq_deu_aes_core(ctx, aes_con->dst_buf, aes_con->src_buf, iv, inc, dir, mode);
662
663 }
664
665 /* \fn static int aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
666 * unsigned int keylen)
667 * \ingroup IFX_AES_FUNCTIONS
668 * \brief Sets AES key
669 * \param *tfm Pointer to the ablkcipher transform
670 * \param *in_key Pointer to input keys
671 * \param key_len Length of the AES keys
672 * \return 0 is success, -EINVAL if bad key length
673 */
674
675 static int aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
676 unsigned int keylen)
677 {
678 struct aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
679 unsigned long *flags = (unsigned long *) &tfm->base.crt_flags;
680
681 DPRINTF(2, "set_key in %s\n", __FILE__);
682
683 if (keylen != 16 && keylen != 24 && keylen != 32) {
684 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
685 return -EINVAL;
686 }
687
688 ctx->key_length = keylen;
689 DPRINTF(0, "ctx @%p, keylen %d, ctx->key_length %d\n", ctx, keylen, ctx->key_length);
690 memcpy ((u8 *) (ctx->buf), in_key, keylen);
691
692 return 0;
693
694 }
695
696 /* \fn static int aes_generic_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
697 * unsigned int keylen)
698 * \ingroup IFX_AES_FUNCTIONS
699 * \brief Sets AES key
700 * \param *tfm Pointer to the ablkcipher transform
701 * \param *key Pointer to input keys
702 * \param keylen Length of AES keys
703 * \return 0 is success, -EINVAL if bad key length
704 */
705
706 static int aes_generic_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
707 unsigned int keylen)
708 {
709 return aes_setkey(tfm, key, keylen);
710 }
711
712 /* \fn static int rfc3686_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *in_key,
713 * unsigned int keylen)
714 * \ingroup IFX_AES_FUNCTIONS
715 * \brief Sets AES key
716 * \param *tfm Pointer to the ablkcipher transform
717 * \param *in_key Pointer to input keys
718 * \param key_len Length of the AES keys
719 * \return 0 is success, -EINVAL if bad key length
720 */
721
722 static int rfc3686_aes_setkey(struct crypto_ablkcipher *tfm,
723 const u8 *in_key, unsigned int keylen)
724 {
725 struct aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
726 unsigned long *flags = (unsigned long *)&tfm->base.crt_flags;
727
728 DPRINTF(2, "ctr_rfc3686_aes_set_key in %s\n", __FILE__);
729
730 memcpy(ctx->nonce, in_key + (keylen - CTR_RFC3686_NONCE_SIZE),
731 CTR_RFC3686_NONCE_SIZE);
732
733 keylen -= CTR_RFC3686_NONCE_SIZE; // remove 4 bytes of nonce
734
735 if (keylen != 16 && keylen != 24 && keylen != 32) {
736 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
737 return -EINVAL;
738 }
739
740 ctx->key_length = keylen;
741
742 memcpy ((u8 *) (ctx->buf), in_key, keylen);
743
744 return 0;
745 }
746
747 /* \fn static int aes_encrypt(struct ablkcipher_request *areq)
748 * \ingroup IFX_AES_FUNCTIONS
749 * \brief Encrypt function for AES algo
750 * \param *areq Pointer to ablkcipher request in memory
751 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
752 */
753
754 static int aes_encrypt (struct ablkcipher_request *areq)
755 {
756 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
757 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
758
759 return lq_aes_queue_mgr(ctx, areq, NULL, CRYPTO_DIR_ENCRYPT, 5);
760
761 }
762
763 /* \fn static int aes_decrypt(struct ablkcipher_request *areq)
764 * \ingroup IFX_AES_FUNCTIONS
765 * \brief Decrypt function for AES algo
766 * \param *areq Pointer to ablkcipher request in memory
767 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
768 */
769 static int aes_decrypt (struct ablkcipher_request *areq)
770 {
771 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
772 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
773
774 return lq_aes_queue_mgr(ctx, areq, NULL, CRYPTO_DIR_DECRYPT, 5);
775 }
776
777 /* \fn static int ecb_aes_decrypt(struct ablkcipher_request *areq)
778 * \ingroup IFX_AES_FUNCTIONS
779 * \brief Encrypt function for AES algo
780 * \param *areq Pointer to ablkcipher request in memory
781 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
782 */
783
784 static int ecb_aes_encrypt (struct ablkcipher_request *areq)
785 {
786 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
787 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
788
789 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 0);
790
791 }
792 /* \fn static int ecb_aes_decrypt(struct ablkcipher_request *areq)
793 * \ingroup IFX_AES_FUNCTIONS
794 * \brief Decrypt function for AES algo
795 * \param *areq Pointer to ablkcipher request in memory
796 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
797 */
798 static int ecb_aes_decrypt(struct ablkcipher_request *areq)
799
800 {
801 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
802 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
803
804 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 0);
805 }
806
807 /* \fn static int cbc_aes_encrypt(struct ablkcipher_request *areq)
808 * \ingroup IFX_AES_FUNCTIONS
809 * \brief Encrypt function for AES algo
810 * \param *areq Pointer to ablkcipher request in memory
811 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
812 */
813
814 static int cbc_aes_encrypt (struct ablkcipher_request *areq)
815 {
816 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
817 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
818
819 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 1);
820
821 }
822
823 /* \fn static int cbc_aes_decrypt(struct ablkcipher_request *areq)
824 * \ingroup IFX_AES_FUNCTIONS
825 * \brief Decrypt function for AES algo
826 * \param *areq Pointer to ablkcipher request in memory
827 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
828 */
829
830 static int cbc_aes_decrypt(struct ablkcipher_request *areq)
831 {
832 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
833 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
834
835 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 1);
836 }
837 #if 0
838 static int ofb_aes_encrypt (struct ablkcipher_request *areq)
839 {
840 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
841 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
842
843 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 2);
844
845 }
846
847 static int ofb_aes_decrypt(struct ablkcipher_request *areq)
848 {
849 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
850 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
851
852 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 2);
853 }
854
855 static int cfb_aes_encrypt (struct ablkcipher_request *areq)
856 {
857 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
858 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
859
860 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 3);
861
862 }
863
864 static int cfb_aes_decrypt(struct ablkcipher_request *areq)
865 {
866 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
867 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
868
869 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 3);
870 }
871 #endif
872
873 /* \fn static int ctr_aes_encrypt(struct ablkcipher_request *areq)
874 * \ingroup IFX_AES_FUNCTIONS
875 * \brief Encrypt function for AES algo
876 * \param *areq Pointer to ablkcipher request in memory
877 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
878 */
879
880 static int ctr_aes_encrypt (struct ablkcipher_request *areq)
881 {
882 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
883 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
884
885 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 4);
886
887 }
888
889 /* \fn static int ctr_aes_decrypt(struct ablkcipher_request *areq)
890 * \ingroup IFX_AES_FUNCTIONS
891 * \brief Decrypt function for AES algo
892 * \param *areq Pointer to ablkcipher request in memory
893 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
894 */
895
896 static int ctr_aes_decrypt(struct ablkcipher_request *areq)
897 {
898 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
899 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
900
901 return lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 4);
902 }
903
904 /* \fn static int rfc3686_aes_encrypt(struct ablkcipher_request *areq)
905 * \ingroup IFX_AES_FUNCTIONS
906 * \brief Encrypt function for AES algo
907 * \param *areq Pointer to ablkcipher request in memory
908 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
909 */
910
911 static int rfc3686_aes_encrypt(struct ablkcipher_request *areq)
912 {
913 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
914 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
915 int ret;
916 u8 *info = areq->info;
917 u8 rfc3686_iv[16];
918
919 memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
920 memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
921
922 /* initialize counter portion of counter block */
923 *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
924 cpu_to_be32(1);
925
926 areq->info = rfc3686_iv;
927 ret = lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_ENCRYPT, 4);
928 areq->info = info;
929 return ret;
930 }
931
932 /* \fn static int rfc3686_aes_decrypt(struct ablkcipher_request *areq)
933 * \ingroup IFX_AES_FUNCTIONS
934 * \brief Decrypt function for AES algo
935 * \param *areq Pointer to ablkcipher request in memory
936 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
937 */
938
939 static int rfc3686_aes_decrypt(struct ablkcipher_request *areq)
940 {
941 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
942 struct aes_ctx *ctx = crypto_ablkcipher_ctx(cipher);
943 int ret;
944 u8 *info = areq->info;
945 u8 rfc3686_iv[16];
946
947 /* set up counter block */
948 memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
949 memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
950
951 /* initialize counter portion of counter block */
952 *(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
953 cpu_to_be32(1);
954
955 areq->info = rfc3686_iv;
956 ret = lq_aes_queue_mgr(ctx, areq, areq->info, CRYPTO_DIR_DECRYPT, 4);
957 areq->info = info;
958 return ret;
959 }
960
961 struct lq_aes_alg {
962 struct crypto_alg alg;
963 };
964
965 /* AES supported algo array */
966 static struct lq_aes_alg aes_drivers_alg[] = {
967 {
968 .alg = {
969 .cra_name = "aes",
970 .cra_driver_name = "ifxdeu-aes",
971 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
972 .cra_blocksize = AES_BLOCK_SIZE,
973 .cra_ctxsize = sizeof(struct aes_ctx),
974 .cra_type = &crypto_ablkcipher_type,
975 .cra_priority = 300,
976 .cra_module = THIS_MODULE,
977 .cra_ablkcipher = {
978 .setkey = aes_setkey,
979 .encrypt = aes_encrypt,
980 .decrypt = aes_decrypt,
981 .geniv = "eseqiv",
982 .min_keysize = AES_MIN_KEY_SIZE,
983 .max_keysize = AES_MAX_KEY_SIZE,
984 .ivsize = AES_BLOCK_SIZE,
985 }
986 }
987 },{
988 .alg = {
989 .cra_name = "ecb(aes)",
990 .cra_driver_name = "ifxdeu-ecb(aes)",
991 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
992 .cra_blocksize = AES_BLOCK_SIZE,
993 .cra_ctxsize = sizeof(struct aes_ctx),
994 .cra_type = &crypto_ablkcipher_type,
995 .cra_priority = 300,
996 .cra_module = THIS_MODULE,
997 .cra_ablkcipher = {
998 .setkey = aes_generic_setkey,
999 .encrypt = ecb_aes_encrypt,
1000 .decrypt = ecb_aes_decrypt,
1001 .geniv = "eseqiv",
1002 .min_keysize = AES_MIN_KEY_SIZE,
1003 .max_keysize = AES_MAX_KEY_SIZE,
1004 .ivsize = AES_BLOCK_SIZE,
1005 }
1006 }
1007 },{
1008 .alg = {
1009 .cra_name = "cbc(aes)",
1010 .cra_driver_name = "ifxdeu-cbc(aes)",
1011 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1012 .cra_blocksize = AES_BLOCK_SIZE,
1013 .cra_ctxsize = sizeof(struct aes_ctx),
1014 .cra_type = &crypto_ablkcipher_type,
1015 .cra_priority = 300,
1016 .cra_module = THIS_MODULE,
1017 .cra_ablkcipher = {
1018 .setkey = aes_generic_setkey,
1019 .encrypt = cbc_aes_encrypt,
1020 .decrypt = cbc_aes_decrypt,
1021 .geniv = "eseqiv",
1022 .min_keysize = AES_MIN_KEY_SIZE,
1023 .max_keysize = AES_MAX_KEY_SIZE,
1024 .ivsize = AES_BLOCK_SIZE,
1025 }
1026 }
1027 },{
1028 .alg = {
1029 .cra_name = "ctr(aes)",
1030 .cra_driver_name = "ifxdeu-ctr(aes)",
1031 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1032 .cra_blocksize = AES_BLOCK_SIZE,
1033 .cra_ctxsize = sizeof(struct aes_ctx),
1034 .cra_type = &crypto_ablkcipher_type,
1035 .cra_priority = 300,
1036 .cra_module = THIS_MODULE,
1037 .cra_ablkcipher = {
1038 .setkey = aes_generic_setkey,
1039 .encrypt = ctr_aes_encrypt,
1040 .decrypt = ctr_aes_decrypt,
1041 .geniv = "eseqiv",
1042 .min_keysize = AES_MIN_KEY_SIZE,
1043 .max_keysize = AES_MAX_KEY_SIZE,
1044 .ivsize = AES_BLOCK_SIZE,
1045 }
1046 }
1047 },{
1048 .alg = {
1049 .cra_name = "rfc3686(ctr(aes))",
1050 .cra_driver_name = "ifxdeu-rfc3686(ctr(aes))",
1051 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1052 .cra_blocksize = AES_BLOCK_SIZE,
1053 .cra_ctxsize = sizeof(struct aes_ctx),
1054 .cra_type = &crypto_ablkcipher_type,
1055 .cra_priority = 300,
1056 .cra_module = THIS_MODULE,
1057 .cra_ablkcipher = {
1058 .setkey = rfc3686_aes_setkey,
1059 .encrypt = rfc3686_aes_encrypt,
1060 .decrypt = rfc3686_aes_decrypt,
1061 .geniv = "eseqiv",
1062 .min_keysize = AES_MIN_KEY_SIZE,
1063 .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
1064 //.max_keysize = AES_MAX_KEY_SIZE,
1065 //.ivsize = CTR_RFC3686_IV_SIZE,
1066 .ivsize = AES_BLOCK_SIZE, // else cannot reg
1067 }
1068 }
1069 }
1070 };
1071
1072 /* \fn int __init lqdeu_async_aes_init (void)
1073 * \ingroup IFX_AES_FUNCTIONS
1074 * \brief Initializes the Async. AES driver
1075 * \return 0 is success, -EINPROGRESS if encryting, EINVAL if failure
1076 */
1077
1078 int __init lqdeu_async_aes_init (void)
1079 {
1080 int i, j, ret = -EINVAL;
1081
1082 #define IFX_DEU_DRV_VERSION "2.0.0"
1083 printk(KERN_INFO "Lantiq Technologies DEU Driver version %s\n", IFX_DEU_DRV_VERSION);
1084
1085 for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++) {
1086 ret = crypto_register_alg(&aes_drivers_alg[i].alg);
1087 printk("driver: %s\n", aes_drivers_alg[i].alg.cra_name);
1088 if (ret)
1089 goto aes_err;
1090 }
1091
1092 aes_chip_init();
1093
1094 CRTCL_SECT_INIT;
1095
1096
1097 printk (KERN_NOTICE "Lantiq DEU AES initialized %s %s.\n",
1098 disable_multiblock ? "" : " (multiblock)", disable_deudma ? "" : " (DMA)");
1099
1100 return ret;
1101
1102 aes_err:
1103
1104 for (j = 0; j < i; j++)
1105 crypto_unregister_alg(&aes_drivers_alg[j].alg);
1106
1107 printk(KERN_ERR "Lantiq %s driver initialization failed!\n", (char *)&aes_drivers_alg[i].alg.cra_driver_name);
1108 return ret;
1109
1110 ctr_rfc3686_aes_err:
1111 for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++) {
1112 if (!strcmp((char *)&aes_drivers_alg[i].alg.cra_name, "rfc3686(ctr(aes))"))
1113 crypto_unregister_alg(&aes_drivers_alg[j].alg);
1114 }
1115 printk (KERN_ERR "Lantiq ctr_rfc3686_aes initialization failed!\n");
1116 return ret;
1117 }
1118
1119 /*! \fn void __exit ifxdeu_fini_aes (void)
1120 * \ingroup IFX_AES_FUNCTIONS
1121 * \brief unregister aes driver
1122 */
1123 void __exit lqdeu_fini_async_aes (void)
1124 {
1125 int i;
1126
1127 for (i = 0; i < ARRAY_SIZE(aes_drivers_alg); i++)
1128 crypto_unregister_alg(&aes_drivers_alg[i].alg);
1129
1130 aes_queue->hw_status = AES_COMPLETED;
1131
1132 DEU_WAKEUP_EVENT(deu_dma_priv.deu_thread_wait, AES_ASYNC_EVENT,
1133 deu_dma_priv.aes_event_flags);
1134
1135 kfree(aes_queue);
1136
1137 }