sunxi: initial 3.14 patchset
[openwrt/svn-archive/archive.git] / target / linux / sunxi / patches-3.14 / 271-crypto-add-ss.patch
1 diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
2 index 03ccdb0..a2acda4 100644
3 --- a/drivers/crypto/Kconfig
4 +++ b/drivers/crypto/Kconfig
5 @@ -418,4 +418,21 @@ config CRYPTO_DEV_MXS_DCP
6 To compile this driver as a module, choose M here: the module
7 will be called mxs-dcp.
8
9 +config CRYPTO_DEV_SUNXI_SS
10 + tristate "Support for Allwinner Security System cryptographic accelerator"
11 + depends on ARCH_SUNXI
12 + select CRYPTO_MD5
13 + select CRYPTO_SHA1
14 + select CRYPTO_AES
15 + select CRYPTO_DES
16 + select CRYPTO_BLKCIPHER
17 + help
18 + Some Allwinner SoC have a crypto accelerator named
19 + Security System. Select this if you want to use it.
20 + The Security System handle AES/DES/3DES ciphers in CBC mode
21 + and SHA1 and MD5 hash algorithms.
22 +
23 + To compile this driver as a module, choose M here: the module
24 + will be called sunxi-ss.
25 +
26 endif # CRYPTO_HW
27 diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
28 index 482f090..855292a 100644
29 --- a/drivers/crypto/Makefile
30 +++ b/drivers/crypto/Makefile
31 @@ -23,3 +23,4 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
32 obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
33 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
34 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
35 +obj-$(CONFIG_CRYPTO_DEV_SUNXI_SS) += sunxi-ss/
36 diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
37 new file mode 100644
38 index 0000000..8bb287d
39 --- /dev/null
40 +++ b/drivers/crypto/sunxi-ss/Makefile
41 @@ -0,0 +1,2 @@
42 +obj-$(CONFIG_CRYPTO_DEV_SUNXI_SS) += sunxi-ss.o
43 +sunxi-ss-y += sunxi-ss-core.o sunxi-ss-hash.o sunxi-ss-cipher.o
44 diff --git a/drivers/crypto/sunxi-ss/sunxi-ss-cipher.c b/drivers/crypto/sunxi-ss/sunxi-ss-cipher.c
45 new file mode 100644
46 index 0000000..c2422f7
47 --- /dev/null
48 +++ b/drivers/crypto/sunxi-ss/sunxi-ss-cipher.c
49 @@ -0,0 +1,461 @@
50 +/*
51 + * sunxi-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
52 + *
53 + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
54 + *
55 + * This file add support for AES cipher with 128,192,256 bits
56 + * keysize in CBC mode.
57 + *
58 + * You could find the datasheet at
59 + * http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
60 + *
61 + * This program is free software; you can redistribute it and/or modify
62 + * it under the terms of the GNU General Public License as published by
63 + * the Free Software Foundation; either version 2 of the License, or
64 + * (at your option) any later version.
65 + */
66 +#include "sunxi-ss.h"
67 +
68 +extern struct sunxi_ss_ctx *ss;
69 +
70 +static int sunxi_ss_cipher(struct ablkcipher_request *areq, u32 mode)
71 +{
72 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
73 + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
74 + const char *cipher_type;
75 +
76 + cipher_type = crypto_tfm_alg_name(crypto_ablkcipher_tfm(tfm));
77 +
78 + if (areq->nbytes == 0) {
79 + mutex_unlock(&ss->lock);
80 + return 0;
81 + }
82 +
83 + if (areq->info == NULL) {
84 + dev_err(ss->dev, "ERROR: Empty IV\n");
85 + mutex_unlock(&ss->lock);
86 + return -EINVAL;
87 + }
88 +
89 + if (areq->src == NULL || areq->dst == NULL) {
90 + dev_err(ss->dev, "ERROR: Some SGs are NULL\n");
91 + mutex_unlock(&ss->lock);
92 + return -EINVAL;
93 + }
94 +
95 + if (strcmp("cbc(aes)", cipher_type) == 0) {
96 + op->mode |= SS_OP_AES | SS_CBC | SS_ENABLED | mode;
97 + return sunxi_ss_aes_poll(areq);
98 + }
99 + if (strcmp("cbc(des)", cipher_type) == 0) {
100 + op->mode = SS_OP_DES | SS_CBC | SS_ENABLED | mode;
101 + return sunxi_ss_des_poll(areq);
102 + }
103 + if (strcmp("cbc(des3_ede)", cipher_type) == 0) {
104 + op->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | mode;
105 + return sunxi_ss_des_poll(areq);
106 + }
107 + dev_err(ss->dev, "ERROR: Cipher %s not handled\n", cipher_type);
108 + mutex_unlock(&ss->lock);
109 + return -EINVAL;
110 +}
111 +
112 +int sunxi_ss_cipher_encrypt(struct ablkcipher_request *areq)
113 +{
114 + return sunxi_ss_cipher(areq, SS_ENCRYPTION);
115 +}
116 +
117 +int sunxi_ss_cipher_decrypt(struct ablkcipher_request *areq)
118 +{
119 + return sunxi_ss_cipher(areq, SS_DECRYPTION);
120 +}
121 +
122 +int sunxi_ss_cipher_init(struct crypto_tfm *tfm)
123 +{
124 + struct sunxi_req_ctx *op = crypto_tfm_ctx(tfm);
125 +
126 + mutex_lock(&ss->lock);
127 +
128 + memset(op, 0, sizeof(struct sunxi_req_ctx));
129 + return 0;
130 +}
131 +
132 +int sunxi_ss_aes_poll(struct ablkcipher_request *areq)
133 +{
134 + u32 spaces;
135 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
136 + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
137 + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
138 + /* when activating SS, the default FIFO space is 32 */
139 + u32 rx_cnt = 32;
140 + u32 tx_cnt = 0;
141 + u32 v;
142 + int i;
143 + struct scatterlist *in_sg;
144 + struct scatterlist *out_sg;
145 + void *src_addr;
146 + void *dst_addr;
147 + unsigned int ileft = areq->nbytes;
148 + unsigned int oleft = areq->nbytes;
149 + unsigned int sgileft = areq->src->length;
150 + unsigned int sgoleft = areq->dst->length;
151 + unsigned int todo;
152 + u32 *src32;
153 + u32 *dst32;
154 +
155 + in_sg = areq->src;
156 + out_sg = areq->dst;
157 + for (i = 0; i < op->keylen; i += 4)
158 + writel(*(op->key + i/4), ss->base + SS_KEY0 + i);
159 + if (areq->info != NULL) {
160 + for (i = 0; i < 4 && i < ivsize / 4; i++) {
161 + v = *(u32 *)(areq->info + i * 4);
162 + writel(v, ss->base + SS_IV0 + i * 4);
163 + }
164 + }
165 + writel(op->mode, ss->base + SS_CTL);
166 +
167 + /* If we have only one SG, we can use kmap_atomic */
168 + if (sg_next(in_sg) == NULL && sg_next(out_sg) == NULL) {
169 + src_addr = kmap_atomic(sg_page(in_sg)) + in_sg->offset;
170 + if (src_addr == NULL) {
171 + dev_err(ss->dev, "kmap_atomic error for src SG\n");
172 + writel(0, ss->base + SS_CTL);
173 + mutex_unlock(&ss->lock);
174 + return -EINVAL;
175 + }
176 + dst_addr = kmap_atomic(sg_page(out_sg)) + out_sg->offset;
177 + if (dst_addr == NULL) {
178 + dev_err(ss->dev, "kmap_atomic error for dst SG\n");
179 + writel(0, ss->base + SS_CTL);
180 + kunmap_atomic(src_addr);
181 + mutex_unlock(&ss->lock);
182 + return -EINVAL;
183 + }
184 + src32 = (u32 *)src_addr;
185 + dst32 = (u32 *)dst_addr;
186 + ileft = areq->nbytes / 4;
187 + oleft = areq->nbytes / 4;
188 + i = 0;
189 + do {
190 + if (ileft > 0 && rx_cnt > 0) {
191 + todo = min(rx_cnt, ileft);
192 + ileft -= todo;
193 + do {
194 + writel_relaxed(*src32++,
195 + ss->base +
196 + SS_RXFIFO);
197 + todo--;
198 + } while (todo > 0);
199 + }
200 + if (tx_cnt > 0) {
201 + todo = min(tx_cnt, oleft);
202 + oleft -= todo;
203 + do {
204 + *dst32++ = readl_relaxed(ss->base +
205 + SS_TXFIFO);
206 + todo--;
207 + } while (todo > 0);
208 + }
209 + spaces = readl_relaxed(ss->base + SS_FCSR);
210 + rx_cnt = SS_RXFIFO_SPACES(spaces);
211 + tx_cnt = SS_TXFIFO_SPACES(spaces);
212 + } while (oleft > 0);
213 + writel(0, ss->base + SS_CTL);
214 + kunmap_atomic(src_addr);
215 + kunmap_atomic(dst_addr);
216 + mutex_unlock(&ss->lock);
217 + return 0;
218 + }
219 +
220 + /* If we have more than one SG, we cannot use kmap_atomic since
221 + * we hold the mapping too long
222 + */
223 + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
224 + if (src_addr == NULL) {
225 + dev_err(ss->dev, "KMAP error for src SG\n");
226 + mutex_unlock(&ss->lock);
227 + return -EINVAL;
228 + }
229 + dst_addr = kmap(sg_page(out_sg)) + out_sg->offset;
230 + if (dst_addr == NULL) {
231 + kunmap(sg_page(in_sg));
232 + dev_err(ss->dev, "KMAP error for dst SG\n");
233 + mutex_unlock(&ss->lock);
234 + return -EINVAL;
235 + }
236 + src32 = (u32 *)src_addr;
237 + dst32 = (u32 *)dst_addr;
238 + ileft = areq->nbytes / 4;
239 + oleft = areq->nbytes / 4;
240 + sgileft = in_sg->length / 4;
241 + sgoleft = out_sg->length / 4;
242 + do {
243 + spaces = readl_relaxed(ss->base + SS_FCSR);
244 + rx_cnt = SS_RXFIFO_SPACES(spaces);
245 + tx_cnt = SS_TXFIFO_SPACES(spaces);
246 + todo = min3(rx_cnt, ileft, sgileft);
247 + if (todo > 0) {
248 + ileft -= todo;
249 + sgileft -= todo;
250 + }
251 + while (todo > 0) {
252 + writel_relaxed(*src32++, ss->base + SS_RXFIFO);
253 + todo--;
254 + }
255 + if (in_sg != NULL && sgileft == 0 && ileft > 0) {
256 + kunmap(sg_page(in_sg));
257 + in_sg = sg_next(in_sg);
258 + while (in_sg != NULL && in_sg->length == 0)
259 + in_sg = sg_next(in_sg);
260 + if (in_sg != NULL && ileft > 0) {
261 + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
262 + if (src_addr == NULL) {
263 + dev_err(ss->dev, "ERROR: KMAP for src SG\n");
264 + mutex_unlock(&ss->lock);
265 + return -EINVAL;
266 + }
267 + src32 = src_addr;
268 + sgileft = in_sg->length / 4;
269 + }
270 + }
271 + /* do not test oleft since when oleft == 0 we have finished */
272 + todo = min3(tx_cnt, oleft, sgoleft);
273 + if (todo > 0) {
274 + oleft -= todo;
275 + sgoleft -= todo;
276 + }
277 + while (todo > 0) {
278 + *dst32++ = readl_relaxed(ss->base + SS_TXFIFO);
279 + todo--;
280 + }
281 + if (out_sg != NULL && sgoleft == 0 && oleft >= 0) {
282 + kunmap(sg_page(out_sg));
283 + out_sg = sg_next(out_sg);
284 + while (out_sg != NULL && out_sg->length == 0)
285 + out_sg = sg_next(out_sg);
286 + if (out_sg != NULL && oleft > 0) {
287 + dst_addr = kmap(sg_page(out_sg)) +
288 + out_sg->offset;
289 + if (dst_addr == NULL) {
290 + dev_err(ss->dev, "KMAP error\n");
291 + mutex_unlock(&ss->lock);
292 + return -EINVAL;
293 + }
294 + dst32 = dst_addr;
295 + sgoleft = out_sg->length / 4;
296 + }
297 + }
298 + } while (oleft > 0);
299 +
300 + writel(0, ss->base + SS_CTL);
301 + mutex_unlock(&ss->lock);
302 + return 0;
303 +}
304 +
305 +/* Pure CPU way of doing DES/3DES with SS
306 + * Since DES and 3DES SGs could be smaller than 4 bytes, I use sg_copy_to_buffer
307 + * for "linearize" them.
308 + * The problem with that is that I alloc (2 x areq->nbytes) for buf_in/buf_out
309 + * TODO: change this system
310 + * SGsrc -> buf_in -> SS -> buf_out -> SGdst */
311 +int sunxi_ss_des_poll(struct ablkcipher_request *areq)
312 +{
313 + u32 value, spaces;
314 + size_t nb_in_sg_tx, nb_in_sg_rx;
315 + size_t ir, it;
316 + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
317 + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
318 + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
319 + u32 tx_cnt = 0;
320 + u32 rx_cnt = 0;
321 + u32 v;
322 + int i;
323 + int no_chunk = 1;
324 +
325 + /* if we have only SGs with size multiple of 4,
326 + * we can use the SS AES function */
327 + struct scatterlist *in_sg;
328 + struct scatterlist *out_sg;
329 +
330 + in_sg = areq->src;
331 + out_sg = areq->dst;
332 +
333 + while (in_sg != NULL && no_chunk == 1) {
334 + if ((in_sg->length % 4) != 0)
335 + no_chunk = 0;
336 + in_sg = sg_next(in_sg);
337 + }
338 + while (out_sg != NULL && no_chunk == 1) {
339 + if ((out_sg->length % 4) != 0)
340 + no_chunk = 0;
341 + out_sg = sg_next(out_sg);
342 + }
343 +
344 + if (no_chunk == 1)
345 + return sunxi_ss_aes_poll(areq);
346 + in_sg = areq->src;
347 + out_sg = areq->dst;
348 +
349 + nb_in_sg_rx = sg_nents(in_sg);
350 + nb_in_sg_tx = sg_nents(out_sg);
351 +
352 + mutex_lock(&ss->bufin_lock);
353 + if (ss->buf_in == NULL) {
354 + ss->buf_in = kmalloc(areq->nbytes, GFP_KERNEL);
355 + ss->buf_in_size = areq->nbytes;
356 + } else {
357 + if (areq->nbytes > ss->buf_in_size) {
358 + kfree(ss->buf_in);
359 + ss->buf_in = kmalloc(areq->nbytes, GFP_KERNEL);
360 + ss->buf_in_size = areq->nbytes;
361 + }
362 + }
363 + if (ss->buf_in == NULL) {
364 + ss->buf_in_size = 0;
365 + mutex_unlock(&ss->bufin_lock);
366 + dev_err(ss->dev, "Unable to allocate pages.\n");
367 + return -ENOMEM;
368 + }
369 + if (ss->buf_out == NULL) {
370 + mutex_lock(&ss->bufout_lock);
371 + ss->buf_out = kmalloc(areq->nbytes, GFP_KERNEL);
372 + if (ss->buf_out == NULL) {
373 + ss->buf_out_size = 0;
374 + mutex_unlock(&ss->bufout_lock);
375 + dev_err(ss->dev, "Unable to allocate pages.\n");
376 + return -ENOMEM;
377 + }
378 + ss->buf_out_size = areq->nbytes;
379 + mutex_unlock(&ss->bufout_lock);
380 + } else {
381 + if (areq->nbytes > ss->buf_out_size) {
382 + mutex_lock(&ss->bufout_lock);
383 + kfree(ss->buf_out);
384 + ss->buf_out = kmalloc(areq->nbytes, GFP_KERNEL);
385 + if (ss->buf_out == NULL) {
386 + ss->buf_out_size = 0;
387 + mutex_unlock(&ss->bufout_lock);
388 + dev_err(ss->dev, "Unable to allocate pages.\n");
389 + return -ENOMEM;
390 + }
391 + ss->buf_out_size = areq->nbytes;
392 + mutex_unlock(&ss->bufout_lock);
393 + }
394 + }
395 +
396 + sg_copy_to_buffer(areq->src, nb_in_sg_rx, ss->buf_in, areq->nbytes);
397 +
398 + ir = 0;
399 + it = 0;
400 +
401 + for (i = 0; i < op->keylen; i += 4)
402 + writel(*(op->key + i/4), ss->base + SS_KEY0 + i);
403 + if (areq->info != NULL) {
404 + for (i = 0; i < 4 && i < ivsize / 4; i++) {
405 + v = *(u32 *)(areq->info + i * 4);
406 + writel(v, ss->base + SS_IV0 + i * 4);
407 + }
408 + }
409 + writel(op->mode, ss->base + SS_CTL);
410 +
411 + do {
412 + if (rx_cnt == 0 || tx_cnt == 0) {
413 + spaces = readl(ss->base + SS_FCSR);
414 + rx_cnt = SS_RXFIFO_SPACES(spaces);
415 + tx_cnt = SS_TXFIFO_SPACES(spaces);
416 + }
417 + if (rx_cnt > 0 && ir < areq->nbytes) {
418 + do {
419 + value = *(u32 *)(ss->buf_in + ir);
420 + writel(value, ss->base + SS_RXFIFO);
421 + ir += 4;
422 + rx_cnt--;
423 + } while (rx_cnt > 0 && ir < areq->nbytes);
424 + }
425 + if (tx_cnt > 0 && it < areq->nbytes) {
426 + do {
427 + value = readl(ss->base + SS_TXFIFO);
428 + *(u32 *)(ss->buf_out + it) = value;
429 + it += 4;
430 + tx_cnt--;
431 + } while (tx_cnt > 0 && it < areq->nbytes);
432 + }
433 + if (ir == areq->nbytes) {
434 + mutex_unlock(&ss->bufin_lock);
435 + ir++;
436 + }
437 + } while (it < areq->nbytes);
438 +
439 + writel(0, ss->base + SS_CTL);
440 + mutex_unlock(&ss->lock);
441 +
442 + /* a simple optimization, since we dont need the hardware for this copy
443 + * we release the lock and do the copy. With that we gain 5/10% perf */
444 + mutex_lock(&ss->bufout_lock);
445 + sg_copy_from_buffer(areq->dst, nb_in_sg_tx, ss->buf_out, areq->nbytes);
446 +
447 + mutex_unlock(&ss->bufout_lock);
448 + return 0;
449 +}
450 +
451 +/* check and set the AES key, prepare the mode to be used */
452 +int sunxi_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
453 + unsigned int keylen)
454 +{
455 + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
456 +
457 + switch (keylen) {
458 + case 128 / 8:
459 + op->mode = SS_AES_128BITS;
460 + break;
461 + case 192 / 8:
462 + op->mode = SS_AES_192BITS;
463 + break;
464 + case 256 / 8:
465 + op->mode = SS_AES_256BITS;
466 + break;
467 + default:
468 + dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
469 + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
470 + mutex_unlock(&ss->lock);
471 + return -EINVAL;
472 + }
473 + op->keylen = keylen;
474 + memcpy(op->key, key, keylen);
475 + return 0;
476 +}
477 +
478 +/* check and set the DES key, prepare the mode to be used */
479 +int sunxi_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
480 + unsigned int keylen)
481 +{
482 + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
483 +
484 + if (keylen != DES_KEY_SIZE) {
485 + dev_err(ss->dev, "Invalid keylen %u\n", keylen);
486 + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
487 + mutex_unlock(&ss->lock);
488 + return -EINVAL;
489 + }
490 + op->keylen = keylen;
491 + memcpy(op->key, key, keylen);
492 + return 0;
493 +}
494 +
495 +/* check and set the 3DES key, prepare the mode to be used */
496 +int sunxi_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
497 + unsigned int keylen)
498 +{
499 + struct sunxi_req_ctx *op = crypto_ablkcipher_ctx(tfm);
500 +
501 + if (keylen != 3 * DES_KEY_SIZE) {
502 + dev_err(ss->dev, "Invalid keylen %u\n", keylen);
503 + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
504 + mutex_unlock(&ss->lock);
505 + return -EINVAL;
506 + }
507 + op->keylen = keylen;
508 + memcpy(op->key, key, keylen);
509 + return 0;
510 +}
511 diff --git a/drivers/crypto/sunxi-ss/sunxi-ss-core.c b/drivers/crypto/sunxi-ss/sunxi-ss-core.c
512 new file mode 100644
513 index 0000000..c76016e
514 --- /dev/null
515 +++ b/drivers/crypto/sunxi-ss/sunxi-ss-core.c
516 @@ -0,0 +1,308 @@
517 +/*
518 + * sunxi-ss.c - hardware cryptographic accelerator for Allwinner A20 SoC
519 + *
520 + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
521 + *
522 + * Core file which registers crypto algorithms supported by the SS.
523 + *
524 + * You could find the datasheet at
525 + * http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
526 + *
527 + *
528 + * This program is free software; you can redistribute it and/or modify
529 + * it under the terms of the GNU General Public License as published by
530 + * the Free Software Foundation; either version 2 of the License, or
531 + * (at your option) any later version.
532 + */
533 +#include <linux/clk.h>
534 +#include <linux/crypto.h>
535 +#include <linux/io.h>
536 +#include <linux/module.h>
537 +#include <linux/of.h>
538 +#include <linux/platform_device.h>
539 +#include <crypto/scatterwalk.h>
540 +#include <linux/scatterlist.h>
541 +#include <linux/interrupt.h>
542 +#include <linux/delay.h>
543 +
544 +#include "sunxi-ss.h"
545 +
546 +struct sunxi_ss_ctx *ss;
547 +
548 +/* General notes:
549 + * I cannot use a key/IV cache because each time one of these change ALL stuff
550 + * need to be re-writed (rewrite SS_KEYX ans SS_IVX).
551 + * And for example, with dm-crypt IV changes on each request.
552 + *
553 + * After each request the device must be disabled with a write of 0 in SS_CTL
554 + *
555 + * For performance reason, we use writel_relaxed/read_relaxed for all
556 + * operations on RX and TX FIFO and also SS_FCSR.
557 + * For all other registers, we use writel/readl.
558 + * See http://permalink.gmane.org/gmane.linux.ports.arm.kernel/117644
559 + * and http://permalink.gmane.org/gmane.linux.ports.arm.kernel/117640
560 + * */
561 +
562 +static struct ahash_alg sunxi_md5_alg = {
563 + .init = sunxi_hash_init,
564 + .update = sunxi_hash_update,
565 + .final = sunxi_hash_final,
566 + .finup = sunxi_hash_finup,
567 + .digest = sunxi_hash_digest,
568 + .halg = {
569 + .digestsize = MD5_DIGEST_SIZE,
570 + .base = {
571 + .cra_name = "md5",
572 + .cra_driver_name = "md5-sunxi-ss",
573 + .cra_priority = 300,
574 + .cra_alignmask = 3,
575 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
576 + .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
577 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
578 + .cra_module = THIS_MODULE,
579 + .cra_type = &crypto_ahash_type
580 + }
581 + }
582 +};
583 +static struct ahash_alg sunxi_sha1_alg = {
584 + .init = sunxi_hash_init,
585 + .update = sunxi_hash_update,
586 + .final = sunxi_hash_final,
587 + .finup = sunxi_hash_finup,
588 + .digest = sunxi_hash_digest,
589 + .halg = {
590 + .digestsize = SHA1_DIGEST_SIZE,
591 + .base = {
592 + .cra_name = "sha1",
593 + .cra_driver_name = "sha1-sunxi-ss",
594 + .cra_priority = 300,
595 + .cra_alignmask = 3,
596 + .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
597 + .cra_blocksize = SHA1_BLOCK_SIZE,
598 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
599 + .cra_module = THIS_MODULE,
600 + .cra_type = &crypto_ahash_type
601 + }
602 + }
603 +};
604 +
605 +static struct crypto_alg sunxi_cipher_algs[] = {
606 +{
607 + .cra_name = "cbc(aes)",
608 + .cra_driver_name = "cbc-aes-sunxi-ss",
609 + .cra_priority = 300,
610 + .cra_blocksize = AES_BLOCK_SIZE,
611 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
612 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
613 + .cra_module = THIS_MODULE,
614 + .cra_alignmask = 3,
615 + .cra_type = &crypto_ablkcipher_type,
616 + .cra_init = sunxi_ss_cipher_init,
617 + .cra_u = {
618 + .ablkcipher = {
619 + .min_keysize = AES_MIN_KEY_SIZE,
620 + .max_keysize = AES_MAX_KEY_SIZE,
621 + .ivsize = AES_BLOCK_SIZE,
622 + .setkey = sunxi_ss_aes_setkey,
623 + .encrypt = sunxi_ss_cipher_encrypt,
624 + .decrypt = sunxi_ss_cipher_decrypt,
625 + }
626 + }
627 +}, {
628 + .cra_name = "cbc(des)",
629 + .cra_driver_name = "cbc-des-sunxi-ss",
630 + .cra_priority = 300,
631 + .cra_blocksize = DES_BLOCK_SIZE,
632 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
633 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
634 + .cra_module = THIS_MODULE,
635 + .cra_alignmask = 3,
636 + .cra_type = &crypto_ablkcipher_type,
637 + .cra_init = sunxi_ss_cipher_init,
638 + .cra_u.ablkcipher = {
639 + .min_keysize = DES_KEY_SIZE,
640 + .max_keysize = DES_KEY_SIZE,
641 + .ivsize = DES_BLOCK_SIZE,
642 + .setkey = sunxi_ss_des_setkey,
643 + .encrypt = sunxi_ss_cipher_encrypt,
644 + .decrypt = sunxi_ss_cipher_decrypt,
645 + }
646 +}, {
647 + .cra_name = "cbc(des3_ede)",
648 + .cra_driver_name = "cbc-des3-sunxi-ss",
649 + .cra_priority = 300,
650 + .cra_blocksize = DES3_EDE_BLOCK_SIZE,
651 + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
652 + .cra_ctxsize = sizeof(struct sunxi_req_ctx),
653 + .cra_module = THIS_MODULE,
654 + .cra_alignmask = 3,
655 + .cra_type = &crypto_ablkcipher_type,
656 + .cra_init = sunxi_ss_cipher_init,
657 + .cra_u.ablkcipher = {
658 + .min_keysize = DES3_EDE_KEY_SIZE,
659 + .max_keysize = DES3_EDE_KEY_SIZE,
660 + .ivsize = DES3_EDE_BLOCK_SIZE,
661 + .setkey = sunxi_ss_des3_setkey,
662 + .encrypt = sunxi_ss_cipher_encrypt,
663 + .decrypt = sunxi_ss_cipher_decrypt,
664 + }
665 +}
666 +};
667 +
668 +static int sunxi_ss_probe(struct platform_device *pdev)
669 +{
670 + struct resource *res;
671 + u32 v;
672 + int err;
673 + unsigned long cr;
674 + const unsigned long cr_ahb = 24 * 1000 * 1000;
675 + const unsigned long cr_mod = 150 * 1000 * 1000;
676 +
677 + if (!pdev->dev.of_node)
678 + return -ENODEV;
679 +
680 + ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
681 + if (ss == NULL)
682 + return -ENOMEM;
683 +
684 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
685 + ss->base = devm_ioremap_resource(&pdev->dev, res);
686 + if (IS_ERR(ss->base)) {
687 + dev_err(&pdev->dev, "Cannot request MMIO\n");
688 + return PTR_ERR(ss->base);
689 + }
690 +
691 + ss->ssclk = devm_clk_get(&pdev->dev, "mod");
692 + if (IS_ERR(ss->ssclk)) {
693 + err = PTR_ERR(ss->ssclk);
694 + dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err);
695 + return err;
696 + }
697 + dev_dbg(&pdev->dev, "clock ss acquired\n");
698 +
699 + ss->busclk = devm_clk_get(&pdev->dev, "ahb");
700 + if (IS_ERR(ss->busclk)) {
701 + err = PTR_ERR(ss->busclk);
702 + dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err);
703 + return err;
704 + }
705 + dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
706 +
707 + /* Enable the clocks */
708 + err = clk_prepare_enable(ss->busclk);
709 + if (err != 0) {
710 + dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
711 + return err;
712 + }
713 + err = clk_prepare_enable(ss->ssclk);
714 + if (err != 0) {
715 + dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
716 + clk_disable_unprepare(ss->busclk);
717 + return err;
718 + }
719 +
720 + /* Check that clock have the correct rates gived in the datasheet */
721 + /* Try to set the clock to the maximum allowed */
722 + err = clk_set_rate(ss->ssclk, cr_mod);
723 + if (err != 0) {
724 + dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
725 + clk_disable_unprepare(ss->ssclk);
726 + clk_disable_unprepare(ss->busclk);
727 + return err;
728 + }
729 + cr = clk_get_rate(ss->busclk);
730 + if (cr >= cr_ahb)
731 + dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
732 + cr, cr / 1000000, cr_ahb);
733 + else
734 + dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
735 + cr, cr / 1000000, cr_ahb);
736 + cr = clk_get_rate(ss->ssclk);
737 + if (cr == cr_mod)
738 + dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
739 + cr, cr / 1000000, cr_mod);
740 + else {
741 + dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
742 + cr, cr / 1000000, cr_mod);
743 + }
744 +
745 + /* TODO Does this information could be usefull ? */
746 + writel(SS_ENABLED, ss->base + SS_CTL);
747 + v = readl(ss->base + SS_CTL);
748 + v >>= 16;
749 + v &= 0x07;
750 + dev_info(&pdev->dev, "Die ID %d\n", v);
751 + writel(0, ss->base + SS_CTL);
752 +
753 + ss->dev = &pdev->dev;
754 +
755 + mutex_init(&ss->lock);
756 + mutex_init(&ss->bufin_lock);
757 + mutex_init(&ss->bufout_lock);
758 +
759 + err = crypto_register_ahash(&sunxi_md5_alg);
760 + if (err)
761 + goto error_md5;
762 + err = crypto_register_ahash(&sunxi_sha1_alg);
763 + if (err)
764 + goto error_sha1;
765 + err = crypto_register_algs(sunxi_cipher_algs,
766 + ARRAY_SIZE(sunxi_cipher_algs));
767 + if (err)
768 + goto error_ciphers;
769 +
770 + return 0;
771 +error_ciphers:
772 + crypto_unregister_ahash(&sunxi_sha1_alg);
773 +error_sha1:
774 + crypto_unregister_ahash(&sunxi_md5_alg);
775 +error_md5:
776 + clk_disable_unprepare(ss->ssclk);
777 + clk_disable_unprepare(ss->busclk);
778 + return err;
779 +}
780 +
781 +static int __exit sunxi_ss_remove(struct platform_device *pdev)
782 +{
783 + if (!pdev->dev.of_node)
784 + return 0;
785 +
786 + crypto_unregister_ahash(&sunxi_md5_alg);
787 + crypto_unregister_ahash(&sunxi_sha1_alg);
788 + crypto_unregister_algs(sunxi_cipher_algs,
789 + ARRAY_SIZE(sunxi_cipher_algs));
790 +
791 + if (ss->buf_in != NULL)
792 + kfree(ss->buf_in);
793 + if (ss->buf_out != NULL)
794 + kfree(ss->buf_out);
795 +
796 + writel(0, ss->base + SS_CTL);
797 + clk_disable_unprepare(ss->busclk);
798 + clk_disable_unprepare(ss->ssclk);
799 + return 0;
800 +}
801 +
802 +/*============================================================================*/
803 +/*============================================================================*/
804 +static const struct of_device_id a20ss_crypto_of_match_table[] = {
805 + { .compatible = "allwinner,sun7i-a20-crypto" },
806 + {}
807 +};
808 +MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
809 +
810 +static struct platform_driver sunxi_ss_driver = {
811 + .probe = sunxi_ss_probe,
812 + .remove = __exit_p(sunxi_ss_remove),
813 + .driver = {
814 + .owner = THIS_MODULE,
815 + .name = "sunxi-ss",
816 + .of_match_table = a20ss_crypto_of_match_table,
817 + },
818 +};
819 +
820 +module_platform_driver(sunxi_ss_driver);
821 +
822 +MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
823 +MODULE_LICENSE("GPL");
824 +MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
825 diff --git a/drivers/crypto/sunxi-ss/sunxi-ss-hash.c b/drivers/crypto/sunxi-ss/sunxi-ss-hash.c
826 new file mode 100644
827 index 0000000..6412bfb
828 --- /dev/null
829 +++ b/drivers/crypto/sunxi-ss/sunxi-ss-hash.c
830 @@ -0,0 +1,241 @@
831 +/*
832 + * sunxi-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
833 + *
834 + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
835 + *
836 + * This file add support for MD5 and SHA1.
837 + *
838 + * You could find the datasheet at
839 + * http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
840 + *
841 + * This program is free software; you can redistribute it and/or modify
842 + * it under the terms of the GNU General Public License as published by
843 + * the Free Software Foundation; either version 2 of the License, or
844 + * (at your option) any later version.
845 + */
846 +#include "sunxi-ss.h"
847 +
848 +extern struct sunxi_ss_ctx *ss;
849 +
850 +/* sunxi_hash_init: initialize request context
851 + * Activate the SS, and configure it for MD5 or SHA1
852 + */
853 +int sunxi_hash_init(struct ahash_request *areq)
854 +{
855 + const char *hash_type;
856 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
857 + struct sunxi_req_ctx *op = crypto_ahash_ctx(tfm);
858 +
859 + mutex_lock(&ss->lock);
860 +
861 + hash_type = crypto_tfm_alg_name(areq->base.tfm);
862 +
863 + op->byte_count = 0;
864 + op->nbwait = 0;
865 + op->waitbuf = 0;
866 +
867 + /* Enable and configure SS for MD5 or SHA1 */
868 + if (strcmp(hash_type, "sha1") == 0)
869 + op->mode = SS_OP_SHA1;
870 + else
871 + op->mode = SS_OP_MD5;
872 +
873 + writel(op->mode | SS_ENABLED, ss->base + SS_CTL);
874 + return 0;
875 +}
876 +
877 +/*
878 + * sunxi_hash_update: update hash engine
879 + *
880 + * Could be used for both SHA1 and MD5
881 + * Write data by step of 32bits and put then in the SS.
882 + * The remaining data is stored (nbwait bytes) in op->waitbuf
883 + * As an optimisation, we do not check RXFIFO_SPACES, since SS handle
884 + * the FIFO faster than our writes
885 + */
886 +int sunxi_hash_update(struct ahash_request *areq)
887 +{
888 + u32 v;
889 + unsigned int i = 0;/* bytes read, to be compared to areq->nbytes */
890 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
891 + struct sunxi_req_ctx *op = crypto_ahash_ctx(tfm);
892 + struct scatterlist *in_sg;
893 + unsigned int in_i = 0;/* advancement in the current SG */
894 + void *src_addr;
895 +
896 + u8 *waitbuf = (u8 *)(&op->waitbuf);
897 +
898 + if (areq->nbytes == 0)
899 + return 0;
900 +
901 + in_sg = areq->src;
902 + do {
903 + src_addr = kmap(sg_page(in_sg)) + in_sg->offset;
904 + /* step 1, if some bytes remains from last SG,
905 + * try to complete them to 4 and sent its */
906 + if (op->nbwait > 0) {
907 + while (op->nbwait < 4 && i < areq->nbytes &&
908 + in_i < in_sg->length) {
909 + waitbuf[op->nbwait] = *(u8 *)(src_addr + in_i);
910 + i++;
911 + in_i++;
912 + op->nbwait++;
913 + }
914 + if (op->nbwait == 4) {
915 + writel(op->waitbuf, ss->base + SS_RXFIFO);
916 + op->byte_count += 4;
917 + op->nbwait = 0;
918 + op->waitbuf = 0;
919 + }
920 + }
921 + /* step 2, main loop, read data 4bytes at a time */
922 + while (i < areq->nbytes && areq->nbytes - i >= 4 &&
923 + in_i < in_sg->length &&
924 + in_sg->length - in_i >= 4) {
925 + v = *(u32 *)(src_addr + in_i);
926 + writel_relaxed(v, ss->base + SS_RXFIFO);
927 + i += 4;
928 + op->byte_count += 4;
929 + in_i += 4;
930 + }
931 + /* step 3, if we have less than 4 bytes, copy them in waitbuf
932 + * no need to check for op->nbwait < 4 since we cannot have
933 + * more than 4 bytes remaining */
934 + if (in_i < in_sg->length && in_sg->length - in_i < 4 &&
935 + i < areq->nbytes) {
936 + do {
937 + waitbuf[op->nbwait] = *(u8 *)(src_addr + in_i);
938 + op->nbwait++;
939 + in_i++;
940 + i++;
941 + } while (in_i < in_sg->length && i < areq->nbytes);
942 + }
943 + /* we have finished the current SG, try next one */
944 + kunmap(sg_page(in_sg));
945 + in_sg = sg_next(in_sg);
946 + in_i = 0;
947 + } while (in_sg != NULL && i < areq->nbytes);
948 + return 0;
949 +}
950 +
951 +/*
952 + * sunxi_hash_final: finalize hashing operation
953 + *
954 + * If we have some remaining bytes, send it.
955 + * Then ask the SS for finalizing the hash
956 + */
957 +int sunxi_hash_final(struct ahash_request *areq)
958 +{
959 + u32 v;
960 + unsigned int i;
961 + int zeros;
962 + unsigned int index, padlen;
963 + __be64 bits;
964 + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
965 + struct sunxi_req_ctx *op = crypto_ahash_ctx(tfm);
966 +
967 + if (op->nbwait > 0) {
968 + op->waitbuf |= ((1 << 7) << (op->nbwait * 8));
969 + writel(op->waitbuf, ss->base + SS_RXFIFO);
970 + } else {
971 + writel((1 << 7), ss->base + SS_RXFIFO);
972 + }
973 +
974 + /* number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
975 + * example len=0
976 + * example len=56
977 + * */
978 +
979 + /* we have already send 4 more byte of which nbwait data */
980 + if (op->mode == SS_OP_MD5) {
981 + index = (op->byte_count + 4) & 0x3f;
982 + op->byte_count += op->nbwait;
983 + if (index > 56)
984 + zeros = (120 - index) / 4;
985 + else
986 + zeros = (56 - index) / 4;
987 + } else {
988 + op->byte_count += op->nbwait;
989 + index = op->byte_count & 0x3f;
990 + padlen = (index < 56) ? (56 - index) : ((64+56) - index);
991 + zeros = (padlen - 1) / 4;
992 + }
993 + for (i = 0; i < zeros; i++)
994 + writel(0, ss->base + SS_RXFIFO);
995 +
996 + /* write the lenght */
997 + if (op->mode == SS_OP_SHA1) {
998 + bits = cpu_to_be64(op->byte_count << 3);
999 + writel(bits & 0xffffffff, ss->base + SS_RXFIFO);
1000 + writel((bits >> 32) & 0xffffffff, ss->base + SS_RXFIFO);
1001 + } else {
1002 + writel((op->byte_count << 3) & 0xffffffff,
1003 + ss->base + SS_RXFIFO);
1004 + writel((op->byte_count >> 29) & 0xffffffff,
1005 + ss->base + SS_RXFIFO);
1006 + }
1007 +
1008 + /* stop the hashing */
1009 + v = readl(ss->base + SS_CTL);
1010 + v |= SS_DATA_END;
1011 + writel(v, ss->base + SS_CTL);
1012 +
1013 + /* check the end */
1014 + /* The timeout could happend only in case of bad overcloking */
1015 +#define SS_TIMEOUT 100
1016 + i = 0;
1017 + do {
1018 + v = readl(ss->base + SS_CTL);
1019 + i++;
1020 + } while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
1021 + if (i >= SS_TIMEOUT) {
1022 + dev_err(ss->dev, "ERROR: hash end timeout %d>%d\n",
1023 + i, SS_TIMEOUT);
1024 + writel(0, ss->base + SS_CTL);
1025 + mutex_unlock(&ss->lock);
1026 + return -1;
1027 + }
1028 +
1029 + if (op->mode == SS_OP_SHA1) {
1030 + for (i = 0; i < 5; i++) {
1031 + v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
1032 + memcpy(areq->result + i * 4, &v, 4);
1033 + }
1034 + } else {
1035 + for (i = 0; i < 4; i++) {
1036 + v = readl(ss->base + SS_MD0 + i * 4);
1037 + memcpy(areq->result + i * 4, &v, 4);
1038 + }
1039 + }
1040 + writel(0, ss->base + SS_CTL);
1041 + mutex_unlock(&ss->lock);
1042 + return 0;
1043 +}
1044 +
1045 +/* sunxi_hash_finup: finalize hashing operation after an update */
1046 +int sunxi_hash_finup(struct ahash_request *areq)
1047 +{
1048 + int err;
1049 +
1050 + err = sunxi_hash_update(areq);
1051 + if (err != 0)
1052 + return err;
1053 +
1054 + return sunxi_hash_final(areq);
1055 +}
1056 +
1057 +/* combo of init/update/final functions */
1058 +int sunxi_hash_digest(struct ahash_request *areq)
1059 +{
1060 + int err;
1061 +
1062 + err = sunxi_hash_init(areq);
1063 + if (err != 0)
1064 + return err;
1065 +
1066 + err = sunxi_hash_update(areq);
1067 + if (err != 0)
1068 + return err;
1069 +
1070 + return sunxi_hash_final(areq);
1071 +}
1072 diff --git a/drivers/crypto/sunxi-ss/sunxi-ss.h b/drivers/crypto/sunxi-ss/sunxi-ss.h
1073 new file mode 100644
1074 index 0000000..94aca20
1075 --- /dev/null
1076 +++ b/drivers/crypto/sunxi-ss/sunxi-ss.h
1077 @@ -0,0 +1,183 @@
1078 +/*
1079 + * sunxi-ss.c - hardware cryptographic accelerator for Allwinner A20 SoC
1080 + *
1081 + * Copyright (C) 2013-2014 Corentin LABBE <clabbe.montjoie@gmail.com>
1082 + *
1083 + * Support AES cipher with 128,192,256 bits keysize.
1084 + * Support MD5 and SHA1 hash algorithms.
1085 + * Support DES and 3DES
1086 + * Support PRNG
1087 + *
1088 + * You could find the datasheet at
1089 + * http://dl.linux-sunxi.org/A20/A20%20User%20Manual%202013-03-22.pdf
1090 + *
1091 + *
1092 + * Licensed under the GPL-2.
1093 + */
1094 +
1095 +#include <linux/clk.h>
1096 +#include <linux/crypto.h>
1097 +#include <linux/io.h>
1098 +#include <linux/module.h>
1099 +#include <linux/of.h>
1100 +#include <linux/platform_device.h>
1101 +#include <crypto/scatterwalk.h>
1102 +#include <linux/scatterlist.h>
1103 +#include <linux/interrupt.h>
1104 +#include <linux/delay.h>
1105 +#include <crypto/md5.h>
1106 +#include <crypto/sha.h>
1107 +#include <crypto/hash.h>
1108 +#include <crypto/internal/hash.h>
1109 +#include <crypto/aes.h>
1110 +#include <crypto/des.h>
1111 +#include <crypto/internal/rng.h>
1112 +
1113 +#define SS_CTL 0x00
1114 +#define SS_KEY0 0x04
1115 +#define SS_KEY1 0x08
1116 +#define SS_KEY2 0x0C
1117 +#define SS_KEY3 0x10
1118 +#define SS_KEY4 0x14
1119 +#define SS_KEY5 0x18
1120 +#define SS_KEY6 0x1C
1121 +#define SS_KEY7 0x20
1122 +
1123 +#define SS_IV0 0x24
1124 +#define SS_IV1 0x28
1125 +#define SS_IV2 0x2C
1126 +#define SS_IV3 0x30
1127 +
1128 +#define SS_CNT0 0x34
1129 +#define SS_CNT1 0x38
1130 +#define SS_CNT2 0x3C
1131 +#define SS_CNT3 0x40
1132 +
1133 +#define SS_FCSR 0x44
1134 +#define SS_ICSR 0x48
1135 +
1136 +#define SS_MD0 0x4C
1137 +#define SS_MD1 0x50
1138 +#define SS_MD2 0x54
1139 +#define SS_MD3 0x58
1140 +#define SS_MD4 0x5C
1141 +
1142 +#define SS_RXFIFO 0x200
1143 +#define SS_TXFIFO 0x204
1144 +
1145 +/* SS_CTL configuration values */
1146 +
1147 +/* PRNG generator mode - bit 15 */
1148 +#define SS_PRNG_ONESHOT (0 << 15)
1149 +#define SS_PRNG_CONTINUE (1 << 15)
1150 +
1151 +/* SS operation mode - bits 12-13 */
1152 +#define SS_ECB (0 << 12)
1153 +#define SS_CBC (1 << 12)
1154 +#define SS_CNT (2 << 12)
1155 +
1156 +/* Counter width for CNT mode - bits 10-11 */
1157 +#define SS_CNT_16BITS (0 << 10)
1158 +#define SS_CNT_32BITS (1 << 10)
1159 +#define SS_CNT_64BITS (2 << 10)
1160 +
1161 +/* Key size for AES - bits 8-9 */
1162 +#define SS_AES_128BITS (0 << 8)
1163 +#define SS_AES_192BITS (1 << 8)
1164 +#define SS_AES_256BITS (2 << 8)
1165 +
1166 +/* Operation direction - bit 7 */
1167 +#define SS_ENCRYPTION (0 << 7)
1168 +#define SS_DECRYPTION (1 << 7)
1169 +
1170 +/* SS Method - bits 4-6 */
1171 +#define SS_OP_AES (0 << 4)
1172 +#define SS_OP_DES (1 << 4)
1173 +#define SS_OP_3DES (2 << 4)
1174 +#define SS_OP_SHA1 (3 << 4)
1175 +#define SS_OP_MD5 (4 << 4)
1176 +#define SS_OP_PRNG (5 << 4)
1177 +
1178 +/* Data end bit - bit 2 */
1179 +#define SS_DATA_END (1 << 2)
1180 +
1181 +/* PRNG start bit - bit 1 */
1182 +#define SS_PRNG_START (1 << 1)
1183 +
1184 +/* SS Enable bit - bit 0 */
1185 +#define SS_DISABLED (0 << 0)
1186 +#define SS_ENABLED (1 << 0)
1187 +
1188 +/* SS_FCSR configuration values */
1189 +/* RX FIFO status - bit 30 */
1190 +#define SS_RXFIFO_FREE (1 << 30)
1191 +
1192 +/* RX FIFO empty spaces - bits 24-29 */
1193 +#define SS_RXFIFO_SPACES(val) (((val) >> 24) & 0x3f)
1194 +
1195 +/* TX FIFO status - bit 22 */
1196 +#define SS_TXFIFO_AVAILABLE (1 << 22)
1197 +
1198 +/* TX FIFO available spaces - bits 16-21 */
1199 +#define SS_TXFIFO_SPACES(val) (((val) >> 16) & 0x3f)
1200 +
1201 +#define SS_RXFIFO_EMP_INT_PENDING (1 << 10)
1202 +#define SS_TXFIFO_AVA_INT_PENDING (1 << 8)
1203 +#define SS_RXFIFO_EMP_INT_ENABLE (1 << 2)
1204 +#define SS_TXFIFO_AVA_INT_ENABLE (1 << 0)
1205 +
1206 +/* SS_ICSR configuration values */
1207 +#define SS_ICS_DRQ_ENABLE (1 << 4)
1208 +
1209 +struct sunxi_ss_ctx {
1210 + void __iomem *base;
1211 + int irq;
1212 + struct clk *busclk;
1213 + struct clk *ssclk;
1214 + struct device *dev;
1215 + struct resource *res;
1216 + void *buf_in; /* pointer to data to be uploaded to the device */
1217 + size_t buf_in_size; /* size of buf_in */
1218 + void *buf_out;
1219 + size_t buf_out_size;
1220 + struct mutex lock; /* control the use of the device */
1221 + struct mutex bufout_lock; /* control the use of buf_out*/
1222 + struct mutex bufin_lock; /* control the sue of buf_in*/
1223 +};
1224 +
1225 +struct sunxi_req_ctx {
1226 + u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */
1227 + u32 keylen;
1228 + u32 mode;
1229 + u64 byte_count; /* number of bytes "uploaded" to the device */
1230 + u32 waitbuf; /* a partial word waiting to be completed and
1231 + uploaded to the device */
1232 + /* number of bytes to be uploaded in the waitbuf word */
1233 + unsigned int nbwait;
1234 +};
1235 +
1236 +#define SS_SEED_LEN (192/8)
1237 +#define SS_DATA_LEN (160/8)
1238 +
1239 +struct prng_context {
1240 + u32 seed[SS_SEED_LEN/4];
1241 + unsigned int slen;
1242 +};
1243 +
1244 +int sunxi_hash_init(struct ahash_request *areq);
1245 +int sunxi_hash_update(struct ahash_request *areq);
1246 +int sunxi_hash_final(struct ahash_request *areq);
1247 +int sunxi_hash_finup(struct ahash_request *areq);
1248 +int sunxi_hash_digest(struct ahash_request *areq);
1249 +
1250 +int sunxi_ss_aes_poll(struct ablkcipher_request *areq);
1251 +int sunxi_ss_des_poll(struct ablkcipher_request *areq);
1252 +int sunxi_ss_cipher_init(struct crypto_tfm *tfm);
1253 +int sunxi_ss_cipher_encrypt(struct ablkcipher_request *areq);
1254 +int sunxi_ss_cipher_decrypt(struct ablkcipher_request *areq);
1255 +int sunxi_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1256 + unsigned int keylen);
1257 +int sunxi_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1258 + unsigned int keylen);
1259 +int sunxi_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1260 + unsigned int keylen);
1261 --
1262 1.8.5.5
1263
1264